Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-06-10 15:24:30 +0300
committerMichaël Zasso <targos@protonmail.com>2021-06-14 09:08:56 +0300
commite82ef4148e43beec1db89fc18fad55a8dae01b16 (patch)
treebc99922d3391a63e21d533bec1c4873f00e690c9 /deps
parented91379186cfe407a880124514a6f264583f3e33 (diff)
deps: update V8 to 9.1.269.36
PR-URL: https://github.com/nodejs/node/pull/38273 Backport-PR-URL: https://github.com/nodejs/node/pull/38991 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com> Reviewed-By: Mary Marchini <oss@mmarchini.me>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/AUTHORS4
-rw-r--r--deps/v8/BUILD.gn2680
-rw-r--r--deps/v8/COMMON_OWNERS6
-rw-r--r--deps/v8/DEPS48
-rw-r--r--deps/v8/ENG_REVIEW_OWNERS1
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h6
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni4
-rw-r--r--deps/v8/gni/v8.cmx44
-rw-r--r--deps/v8/include/OWNERS14
-rw-r--r--deps/v8/include/cppgc/allocation.h53
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h67
-rw-r--r--deps/v8/include/cppgc/explicit-management.h73
-rw-r--r--deps/v8/include/cppgc/garbage-collected.h3
-rw-r--r--deps/v8/include/cppgc/heap-state.h11
-rw-r--r--deps/v8/include/cppgc/internal/gc-info.h56
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h40
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h7
-rw-r--r--deps/v8/include/cppgc/testing.h49
-rw-r--r--deps/v8/include/cppgc/visitor.h79
-rw-r--r--deps/v8/include/v8-cppgc.h15
-rw-r--r--deps/v8/include/v8-fast-api-calls.h346
-rw-r--r--deps/v8/include/v8-internal.h18
-rw-r--r--deps/v8/include/v8-platform.h19
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h334
-rw-r--r--deps/v8/infra/mb/mb_config.pyl1
-rw-r--r--deps/v8/infra/testing/builders.pyl93
-rw-r--r--deps/v8/src/DEPS7
-rw-r--r--deps/v8/src/api/api-inl.h1
-rw-r--r--deps/v8/src/api/api-natives.cc6
-rw-r--r--deps/v8/src/api/api.cc882
-rw-r--r--deps/v8/src/api/api.h2
-rw-r--r--deps/v8/src/asmjs/OWNERS1
-rw-r--r--deps/v8/src/asmjs/asm-js.cc2
-rw-r--r--deps/v8/src/ast/OWNERS3
-rw-r--r--deps/v8/src/ast/ast.cc36
-rw-r--r--deps/v8/src/ast/ast.h111
-rw-r--r--deps/v8/src/ast/scopes.cc13
-rw-r--r--deps/v8/src/ast/scopes.h8
-rw-r--r--deps/v8/src/base/cpu.cc62
-rw-r--r--deps/v8/src/base/cpu.h44
-rw-r--r--deps/v8/src/base/immediate-crash.h162
-rw-r--r--deps/v8/src/base/logging.cc9
-rw-r--r--deps/v8/src/base/logging.h95
-rw-r--r--deps/v8/src/base/macros.h15
-rw-r--r--deps/v8/src/base/overflowing-math.h7
-rw-r--r--deps/v8/src/base/platform/OWNERS1
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc2
-rw-r--r--deps/v8/src/base/template-utils.h8
-rw-r--r--deps/v8/src/base/vlq.h85
-rw-r--r--deps/v8/src/baseline/OWNERS1
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h483
-rw-r--r--deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h94
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h52
-rw-r--r--deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h7
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h9
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h14
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc463
-rw-r--r--deps/v8/src/baseline/baseline-compiler.h39
-rw-r--r--deps/v8/src/baseline/baseline.cc19
-rw-r--r--deps/v8/src/baseline/baseline.h4
-rw-r--r--deps/v8/src/baseline/bytecode-offset-iterator.cc65
-rw-r--r--deps/v8/src/baseline/bytecode-offset-iterator.h98
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h445
-rw-r--r--deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h93
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h33
-rw-r--r--deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h5
-rw-r--r--deps/v8/src/bigint/DEPS13
-rw-r--r--deps/v8/src/bigint/OWNERS2
-rw-r--r--deps/v8/src/bigint/bigint.h131
-rw-r--r--deps/v8/src/bigint/vector-arithmetic.cc22
-rw-r--r--deps/v8/src/builtins/accessors.cc9
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc463
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc313
-rw-r--r--deps/v8/src/builtins/array-join.tq10
-rw-r--r--deps/v8/src/builtins/base.tq79
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-array.cc24
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc64
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc2
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h23
-rw-r--r--deps/v8/src/builtins/builtins-function.cc9
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc109
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc108
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc24
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc62
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h1
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc72
-rw-r--r--deps/v8/src/builtins/builtins.cc10
-rw-r--r--deps/v8/src/builtins/builtins.h7
-rw-r--r--deps/v8/src/builtins/cast.tq6
-rw-r--r--deps/v8/src/builtins/constructor.tq5
-rw-r--r--deps/v8/src/builtins/convert.tq24
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc452
-rw-r--r--deps/v8/src/builtins/iterator.tq1
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc34
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc34
-rw-r--r--deps/v8/src/builtins/object.tq6
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc35
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq15
-rw-r--r--deps/v8/src/builtins/promise-all-element-closure.tq2
-rw-r--r--deps/v8/src/builtins/promise-all.tq3
-rw-r--r--deps/v8/src/builtins/promise-constructor.tq7
-rw-r--r--deps/v8/src/builtins/promise-jobs.tq3
-rw-r--r--deps/v8/src/builtins/promise-misc.tq121
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq16
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc4
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc35
-rw-r--r--deps/v8/src/builtins/torque-internal.tq9
-rw-r--r--deps/v8/src/builtins/wasm.tq47
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc670
-rw-r--r--deps/v8/src/codegen/OWNERS4
-rw-r--r--deps/v8/src/codegen/aligned-slot-allocator.cc125
-rw-r--r--deps/v8/src/codegen/aligned-slot-allocator.h71
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc17
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h15
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc24
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc88
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h21
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h7
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc145
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h16
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h5
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc121
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h22
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h31
-rw-r--r--deps/v8/src/codegen/assembler.cc6
-rw-r--r--deps/v8/src/codegen/assembler.h5
-rw-r--r--deps/v8/src/codegen/code-factory.cc7
-rw-r--r--deps/v8/src/codegen/code-factory.h1
-rw-r--r--deps/v8/src/codegen/code-reference.cc18
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc1815
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h274
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc16
-rw-r--r--deps/v8/src/codegen/compilation-cache.h9
-rw-r--r--deps/v8/src/codegen/compiler.cc122
-rw-r--r--deps/v8/src/codegen/cpu-features.h5
-rw-r--r--deps/v8/src/codegen/external-reference.cc162
-rw-r--r--deps/v8/src/codegen/external-reference.h103
-rw-r--r--deps/v8/src/codegen/handler-table.cc5
-rw-r--r--deps/v8/src/codegen/handler-table.h2
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h6
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc32
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h6
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc16
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc637
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h238
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h7
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc31
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h47
-rw-r--r--deps/v8/src/codegen/machine-type.h24
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h2
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc2
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc10
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h11
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h7
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h2
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc2
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc10
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h12
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h6
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h2
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc50
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h15
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h201
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc10
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h10
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h30
-rw-r--r--deps/v8/src/codegen/register-arch.h14
-rw-r--r--deps/v8/src/codegen/register.cc16
-rw-r--r--deps/v8/src/codegen/register.h3
-rw-r--r--deps/v8/src/codegen/reloc-info.cc7
-rw-r--r--deps/v8/src/codegen/reloc-info.h4
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc12
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h15
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h6
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h4
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc4
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc224
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h26
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h7
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc31
-rw-r--r--deps/v8/src/codegen/safepoint-table.h43
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc403
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h189
-rw-r--r--deps/v8/src/codegen/signature.h54
-rw-r--r--deps/v8/src/codegen/tnode.h31
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h8
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc60
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h2
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc456
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h187
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h47
-rw-r--r--deps/v8/src/common/external-pointer-inl.h12
-rw-r--r--deps/v8/src/common/external-pointer.h4
-rw-r--r--deps/v8/src/common/globals.h37
-rw-r--r--deps/v8/src/common/message-template.h1
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h64
-rw-r--r--deps/v8/src/common/ptr-compr.h4
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS1
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc94
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h14
-rw-r--r--deps/v8/src/compiler/OWNERS11
-rw-r--r--deps/v8/src/compiler/access-info.cc369
-rw-r--r--deps/v8/src/compiler/access-info.h127
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc222
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h13
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc13
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc40
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc198
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h18
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc18
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc192
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc74
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h3
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc440
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h21
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc21
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc147
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h7
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc4
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc208
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h66
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc29
-rw-r--r--deps/v8/src/compiler/backend/instruction.h118
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc15
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc521
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc149
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h12
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc18
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc20
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc149
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h12
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc16
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc23
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc399
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h41
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc37
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc134
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc20
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc63
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h5
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc33
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h15
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc15
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc33
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc512
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h16
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc18
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc34
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc481
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h18
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc18
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc226
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc62
-rw-r--r--deps/v8/src/compiler/branch-elimination.h2
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc58
-rw-r--r--deps/v8/src/compiler/code-assembler.cc46
-rw-r--r--deps/v8/src/compiler/code-assembler.h215
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc27
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/common-operator.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.h2
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc138
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h14
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc119
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h3
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc31
-rw-r--r--deps/v8/src/compiler/frame-states.cc20
-rw-r--r--deps/v8/src/compiler/frame-states.h11
-rw-r--r--deps/v8/src/compiler/frame.cc31
-rw-r--r--deps/v8/src/compiler/frame.h99
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc11
-rw-r--r--deps/v8/src/compiler/heap-refs.h61
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc235
-rw-r--r--deps/v8/src/compiler/int64-lowering.h11
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc25
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc4
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc464
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h10
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc21
-rw-r--r--deps/v8/src/compiler/js-inlining.cc24
-rw-r--r--deps/v8/src/compiler/js-inlining.h4
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc76
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h1
-rw-r--r--deps/v8/src/compiler/js-operator.cc4
-rw-r--r--deps/v8/src/compiler/js-operator.h13
-rw-r--r--deps/v8/src/compiler/linkage.cc87
-rw-r--r--deps/v8/src/compiler/linkage.h44
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc80
-rw-r--r--deps/v8/src/compiler/loop-analysis.h40
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc6
-rw-r--r--deps/v8/src/compiler/loop-peeling.h1
-rw-r--r--deps/v8/src/compiler/loop-unrolling.cc220
-rw-r--r--deps/v8/src/compiler/loop-unrolling.h44
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc24
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc30
-rw-r--r--deps/v8/src/compiler/machine-operator.cc74
-rw-r--r--deps/v8/src/compiler/machine-operator.h32
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc18
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc5
-rw-r--r--deps/v8/src/compiler/node-matchers.h4
-rw-r--r--deps/v8/src/compiler/node-properties.cc30
-rw-r--r--deps/v8/src/compiler/node-properties.h4
-rw-r--r--deps/v8/src/compiler/node.cc7
-rw-r--r--deps/v8/src/compiler/node.h8
-rw-r--r--deps/v8/src/compiler/opcodes.h25
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc590
-rw-r--r--deps/v8/src/compiler/pipeline.h3
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc43
-rw-r--r--deps/v8/src/compiler/property-access-builder.h10
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h23
-rw-r--r--deps/v8/src/compiler/schedule.cc2
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc61
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc67
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc7
-rw-r--r--deps/v8/src/compiler/typer.cc2
-rw-r--r--deps/v8/src/compiler/types.cc4
-rw-r--r--deps/v8/src/compiler/verifier.cc5
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1691
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h185
-rw-r--r--deps/v8/src/d8/OWNERS3
-rw-r--r--deps/v8/src/d8/d8-test.cc230
-rw-r--r--deps/v8/src/d8/d8.cc253
-rw-r--r--deps/v8/src/d8/d8.h11
-rw-r--r--deps/v8/src/debug/OWNERS4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc47
-rw-r--r--deps/v8/src/debug/debug-frames.cc7
-rw-r--r--deps/v8/src/debug/debug-frames.h2
-rw-r--r--deps/v8/src/debug/debug-interface.cc62
-rw-r--r--deps/v8/src/debug/debug-interface.h26
-rw-r--r--deps/v8/src/debug/debug-scopes.cc5
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc16
-rw-r--r--deps/v8/src/debug/debug-wasm-objects-inl.h1
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc343
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.h16
-rw-r--r--deps/v8/src/debug/debug.cc353
-rw-r--r--deps/v8/src/debug/debug.h16
-rw-r--r--deps/v8/src/debug/liveedit.cc7
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc2
-rw-r--r--deps/v8/src/deoptimizer/OWNERS1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc4
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc145
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h10
-rw-r--r--deps/v8/src/deoptimizer/frame-description.h4
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc45
-rw-r--r--deps/v8/src/deoptimizer/translated-state.h21
-rw-r--r--deps/v8/src/deoptimizer/translation-array.cc44
-rw-r--r--deps/v8/src/deoptimizer/translation-array.h9
-rw-r--r--deps/v8/src/deoptimizer/translation-opcode.h4
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc15
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc16
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc72
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc130
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc14
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h4
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc31
-rw-r--r--deps/v8/src/diagnostics/system-jit-metadata-win.h243
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.cc108
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.h20
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/execution/OWNERS4
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h20
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc18
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h23
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc14
-rw-r--r--deps/v8/src/execution/execution.cc8
-rw-r--r--deps/v8/src/execution/execution.h2
-rw-r--r--deps/v8/src/execution/frame-constants.h2
-rw-r--r--deps/v8/src/execution/frames-inl.h15
-rw-r--r--deps/v8/src/execution/frames.cc323
-rw-r--r--deps/v8/src/execution/frames.h35
-rw-r--r--deps/v8/src/execution/futex-emulation.cc173
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.h12
-rw-r--r--deps/v8/src/execution/isolate-inl.h1
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h32
-rw-r--r--deps/v8/src/execution/isolate-utils.h11
-rw-r--r--deps/v8/src/execution/isolate.cc201
-rw-r--r--deps/v8/src/execution/isolate.h121
-rw-r--r--deps/v8/src/execution/local-isolate.cc6
-rw-r--r--deps/v8/src/execution/local-isolate.h11
-rw-r--r--deps/v8/src/execution/messages.cc4
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h10
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc1203
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.h107
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h10
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc2
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h8
-rw-r--r--deps/v8/src/execution/stack-guard.cc35
-rw-r--r--deps/v8/src/execution/stack-guard.h5
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h10
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc19
-rw-r--r--deps/v8/src/flags/flag-definitions.h178
-rw-r--r--deps/v8/src/flags/flags.cc3
-rw-r--r--deps/v8/src/flags/flags.h1
-rw-r--r--deps/v8/src/handles/global-handles.cc4
-rw-r--r--deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc16
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc9
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h15
-rw-r--r--deps/v8/src/heap/collection-barrier.cc76
-rw-r--r--deps/v8/src/heap/collection-barrier.h68
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc50
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc21
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc101
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h11
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc13
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h12
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc12
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h2
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc152
-rw-r--r--deps/v8/src/heap/cppgc/free-list.cc2
-rw-r--r--deps/v8/src/heap/cppgc/free-list.h4
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.cc11
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc14
-rw-r--r--deps/v8/src/heap/cppgc/globals.h7
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc16
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h33
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc14
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h42
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap-state.cc16
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc23
-rw-r--r--deps/v8/src/heap/cppgc/heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc7
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h4
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc2
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc2
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h6
-rw-r--r--deps/v8/src/heap/cppgc/object-poisoner.h40
-rw-r--r--deps/v8/src/heap/cppgc/object-size-trait.cc6
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc21
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc8
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc30
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h9
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc60
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h7
-rw-r--r--deps/v8/src/heap/cppgc/testing.cc35
-rw-r--r--deps/v8/src/heap/cppgc/trace-trait.cc5
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc13
-rw-r--r--deps/v8/src/heap/embedder-tracing.h2
-rw-r--r--deps/v8/src/heap/factory-base.cc322
-rw-r--r--deps/v8/src/heap/factory-base.h4
-rw-r--r--deps/v8/src/heap/factory-inl.h4
-rw-r--r--deps/v8/src/heap/factory.cc1577
-rw-r--r--deps/v8/src/heap/factory.h48
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc18
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h12
-rw-r--r--deps/v8/src/heap/gc-tracer.cc24
-rw-r--r--deps/v8/src/heap/gc-tracer.h9
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc5
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h2
-rw-r--r--deps/v8/src/heap/heap.cc191
-rw-r--r--deps/v8/src/heap/heap.h15
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc116
-rw-r--r--deps/v8/src/heap/item-parallel-job.h146
-rw-r--r--deps/v8/src/heap/large-spaces.cc2
-rw-r--r--deps/v8/src/heap/local-heap-inl.h4
-rw-r--r--deps/v8/src/heap/local-heap.cc138
-rw-r--r--deps/v8/src/heap/local-heap.h98
-rw-r--r--deps/v8/src/heap/mark-compact.cc105
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h1
-rw-r--r--deps/v8/src/heap/marking-barrier.cc10
-rw-r--r--deps/v8/src/heap/marking-barrier.h2
-rw-r--r--deps/v8/src/heap/memory-allocator.cc6
-rw-r--r--deps/v8/src/heap/memory-chunk.cc7
-rw-r--r--deps/v8/src/heap/memory-measurement.cc5
-rw-r--r--deps/v8/src/heap/object-stats.cc4
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h3
-rw-r--r--deps/v8/src/heap/objects-visiting.h84
-rw-r--r--deps/v8/src/heap/paged-spaces.cc24
-rw-r--r--deps/v8/src/heap/paged-spaces.h8
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h6
-rw-r--r--deps/v8/src/heap/read-only-heap.cc6
-rw-r--r--deps/v8/src/heap/read-only-heap.h4
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc4
-rw-r--r--deps/v8/src/heap/read-only-spaces.h9
-rw-r--r--deps/v8/src/heap/safepoint.cc120
-rw-r--r--deps/v8/src/heap/safepoint.h25
-rw-r--r--deps/v8/src/heap/scavenger.cc2
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc16
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc24
-rw-r--r--deps/v8/src/ic/OWNERS3
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc86
-rw-r--r--deps/v8/src/ic/accessor-assembler.h4
-rw-r--r--deps/v8/src/ic/call-optimization.cc14
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h4
-rw-r--r--deps/v8/src/ic/handler-configuration.cc9
-rw-r--r--deps/v8/src/ic/ic.cc55
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc24
-rw-r--r--deps/v8/src/init/OWNERS3
-rw-r--r--deps/v8/src/init/bootstrapper.cc169
-rw-r--r--deps/v8/src/init/heap-symbols.h2
-rw-r--r--deps/v8/src/init/isolate-allocator.cc10
-rw-r--r--deps/v8/src/init/v8.cc28
-rw-r--r--deps/v8/src/inspector/OWNERS7
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc44
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc28
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h13
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc4
-rw-r--r--deps/v8/src/inspector/value-mirror.cc16
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc367
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h187
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h19
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc355
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h167
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc64
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc28
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc5
-rw-r--r--deps/v8/src/json/json-parser.cc26
-rw-r--r--deps/v8/src/json/json-parser.h7
-rw-r--r--deps/v8/src/json/json-stringifier.cc4
-rw-r--r--deps/v8/src/libplatform/tracing/OWNERS2
-rw-r--r--deps/v8/src/libplatform/tracing/recorder-win.cc23
-rw-r--r--deps/v8/src/libplatform/tracing/recorder.h13
-rw-r--r--deps/v8/src/libsampler/OWNERS3
-rw-r--r--deps/v8/src/logging/code-events.h46
-rw-r--r--deps/v8/src/logging/counters-definitions.h2
-rw-r--r--deps/v8/src/logging/counters.h15
-rw-r--r--deps/v8/src/logging/log-utils.h3
-rw-r--r--deps/v8/src/logging/log.cc88
-rw-r--r--deps/v8/src/logging/log.h16
-rw-r--r--deps/v8/src/numbers/OWNERS1
-rw-r--r--deps/v8/src/objects/backing-store.cc102
-rw-r--r--deps/v8/src/objects/backing-store.h23
-rw-r--r--deps/v8/src/objects/bigint.cc47
-rw-r--r--deps/v8/src/objects/code-inl.h202
-rw-r--r--deps/v8/src/objects/code.cc72
-rw-r--r--deps/v8/src/objects/code.h91
-rw-r--r--deps/v8/src/objects/compilation-cache-table-inl.h24
-rw-r--r--deps/v8/src/objects/compilation-cache-table.cc53
-rw-r--r--deps/v8/src/objects/compilation-cache-table.h9
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h36
-rw-r--r--deps/v8/src/objects/compressed-slots.h16
-rw-r--r--deps/v8/src/objects/contexts-inl.h24
-rw-r--r--deps/v8/src/objects/contexts.cc48
-rw-r--r--deps/v8/src/objects/contexts.h22
-rw-r--r--deps/v8/src/objects/contexts.tq5
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h42
-rw-r--r--deps/v8/src/objects/descriptor-array.h11
-rw-r--r--deps/v8/src/objects/dictionary-inl.h45
-rw-r--r--deps/v8/src/objects/dictionary.h20
-rw-r--r--deps/v8/src/objects/elements.cc34
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h16
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h5
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h5
-rw-r--r--deps/v8/src/objects/feedback-vector.h2
-rw-r--r--deps/v8/src/objects/field-index-inl.h8
-rw-r--r--deps/v8/src/objects/field-index.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h48
-rw-r--r--deps/v8/src/objects/fixed-array.h27
-rw-r--r--deps/v8/src/objects/foreign-inl.h2
-rw-r--r--deps/v8/src/objects/hash-table-inl.h20
-rw-r--r--deps/v8/src/objects/hash-table.h19
-rw-r--r--deps/v8/src/objects/heap-object.h8
-rw-r--r--deps/v8/src/objects/intl-objects.cc83
-rw-r--r--deps/v8/src/objects/intl-objects.h8
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h11
-rw-r--r--deps/v8/src/objects/js-array-buffer.h2
-rw-r--r--deps/v8/src/objects/js-array-inl.h6
-rw-r--r--deps/v8/src/objects/js-array.h14
-rw-r--r--deps/v8/src/objects/js-collator.cc19
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc86
-rw-r--r--deps/v8/src/objects/js-date-time-format.h2
-rw-r--r--deps/v8/src/objects/js-display-names.cc19
-rw-r--r--deps/v8/src/objects/js-function-inl.h61
-rw-r--r--deps/v8/src/objects/js-function.cc32
-rw-r--r--deps/v8/src/objects/js-function.h10
-rw-r--r--deps/v8/src/objects/js-list-format.cc25
-rw-r--r--deps/v8/src/objects/js-number-format.cc20
-rw-r--r--deps/v8/src/objects/js-objects-inl.h161
-rw-r--r--deps/v8/src/objects/js-objects.cc263
-rw-r--r--deps/v8/src/objects/js-objects.h28
-rw-r--r--deps/v8/src/objects/js-objects.tq11
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc26
-rw-r--r--deps/v8/src/objects/js-regexp.cc14
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc30
-rw-r--r--deps/v8/src/objects/js-segmenter.cc19
-rw-r--r--deps/v8/src/objects/keys.cc43
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h16
-rw-r--r--deps/v8/src/objects/literal-objects.cc49
-rw-r--r--deps/v8/src/objects/literal-objects.h4
-rw-r--r--deps/v8/src/objects/lookup-inl.h11
-rw-r--r--deps/v8/src/objects/lookup.cc191
-rw-r--r--deps/v8/src/objects/lookup.h16
-rw-r--r--deps/v8/src/objects/map-inl.h115
-rw-r--r--deps/v8/src/objects/map-updater.cc88
-rw-r--r--deps/v8/src/objects/map-updater.h40
-rw-r--r--deps/v8/src/objects/map.cc182
-rw-r--r--deps/v8/src/objects/map.h106
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h5
-rw-r--r--deps/v8/src/objects/maybe-object.h2
-rw-r--r--deps/v8/src/objects/name-inl.h10
-rw-r--r--deps/v8/src/objects/object-list-macros.h22
-rw-r--r--deps/v8/src/objects/object-macros-undef.h18
-rw-r--r--deps/v8/src/objects/object-macros.h148
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h182
-rw-r--r--deps/v8/src/objects/objects-definitions.h115
-rw-r--r--deps/v8/src/objects/objects-inl.h223
-rw-r--r--deps/v8/src/objects/objects.cc195
-rw-r--r--deps/v8/src/objects/objects.h11
-rw-r--r--deps/v8/src/objects/oddball-inl.h2
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc46
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h6
-rw-r--r--deps/v8/src/objects/property-array-inl.h8
-rw-r--r--deps/v8/src/objects/property-array.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc2
-rw-r--r--deps/v8/src/objects/property-details.h11
-rw-r--r--deps/v8/src/objects/property.cc4
-rw-r--r--deps/v8/src/objects/regexp-match-info.h6
-rw-r--r--deps/v8/src/objects/scope-info-inl.h44
-rw-r--r--deps/v8/src/objects/scope-info.cc146
-rw-r--r--deps/v8/src/objects/scope-info.h70
-rw-r--r--deps/v8/src/objects/scope-info.tq49
-rw-r--r--deps/v8/src/objects/script-inl.h45
-rw-r--r--deps/v8/src/objects/script.h24
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h79
-rw-r--r--deps/v8/src/objects/shared-function-info.cc49
-rw-r--r--deps/v8/src/objects/shared-function-info.h31
-rw-r--r--deps/v8/src/objects/slots-inl.h13
-rw-r--r--deps/v8/src/objects/slots.h12
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h6
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc156
-rw-r--r--deps/v8/src/objects/stack-frame-info.h9
-rw-r--r--deps/v8/src/objects/string-inl.h243
-rw-r--r--deps/v8/src/objects/string-table.cc35
-rw-r--r--deps/v8/src/objects/string-table.h4
-rw-r--r--deps/v8/src/objects/string.cc21
-rw-r--r--deps/v8/src/objects/string.h112
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.h44
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.tq174
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary-inl.h107
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.cc289
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.h66
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.tq306
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h14
-rw-r--r--deps/v8/src/objects/tagged-field.h9
-rw-r--r--deps/v8/src/objects/templates-inl.h27
-rw-r--r--deps/v8/src/objects/templates.h14
-rw-r--r--deps/v8/src/objects/templates.tq3
-rw-r--r--deps/v8/src/objects/transitions-inl.h2
-rw-r--r--deps/v8/src/objects/transitions.cc53
-rw-r--r--deps/v8/src/objects/transitions.h15
-rw-r--r--deps/v8/src/objects/value-serializer.cc42
-rw-r--r--deps/v8/src/objects/value-serializer.h9
-rw-r--r--deps/v8/src/objects/visitors.h2
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/parse-info.cc5
-rw-r--r--deps/v8/src/parsing/parse-info.h9
-rw-r--r--deps/v8/src/parsing/parser-base.h24
-rw-r--r--deps/v8/src/parsing/parser.cc44
-rw-r--r--deps/v8/src/parsing/parser.h4
-rw-r--r--deps/v8/src/parsing/preparser.h12
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc101
-rw-r--r--deps/v8/src/profiler/OWNERS5
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h11
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc19
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h8
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc1
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc33
-rw-r--r--deps/v8/src/profiler/profile-generator.cc49
-rw-r--r--deps/v8/src/profiler/profile-generator.h20
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc54
-rw-r--r--deps/v8/src/profiler/profiler-listener.h15
-rw-r--r--deps/v8/src/profiler/weak-code-registry.cc62
-rw-r--r--deps/v8/src/profiler/weak-code-registry.h46
-rw-r--r--deps/v8/src/protobuf/OWNERS2
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc13
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.cc6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc7
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h34
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc5
-rw-r--r--deps/v8/src/regexp/regexp-stack.h3
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc4
-rw-r--r--deps/v8/src/regexp/regexp.cc3
-rw-r--r--deps/v8/src/roots/OWNERS1
-rw-r--r--deps/v8/src/roots/roots.h2
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc17
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc4
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc57
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc24
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc9
-rw-r--r--deps/v8/src/runtime/runtime-object.cc255
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc8
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc102
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc488
-rw-r--r--deps/v8/src/runtime/runtime-test.cc475
-rw-r--r--deps/v8/src/runtime/runtime-trace.cc12
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc75
-rw-r--r--deps/v8/src/runtime/runtime.cc6
-rw-r--r--deps/v8/src/runtime/runtime.h298
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc24
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc71
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h39
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h56
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc6
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h32
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/serializer.cc7
-rw-r--r--deps/v8/src/snapshot/serializer.h15
-rw-r--r--deps/v8/src/snapshot/snapshot.cc5
-rw-r--r--deps/v8/src/strings/OWNERS1
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/third_party/siphash/OWNERS1
-rw-r--r--deps/v8/src/third_party/utf8-decoder/OWNERS1
-rw-r--r--deps/v8/src/torque/ast.h11
-rw-r--r--deps/v8/src/torque/cc-generator.cc55
-rw-r--r--deps/v8/src/torque/class-debug-reader-generator.cc25
-rw-r--r--deps/v8/src/torque/constants.h1
-rw-r--r--deps/v8/src/torque/csa-generator.cc9
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc2
-rw-r--r--deps/v8/src/torque/global-context.cc1
-rw-r--r--deps/v8/src/torque/global-context.h3
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc162
-rw-r--r--deps/v8/src/torque/implementation-visitor.h19
-rw-r--r--deps/v8/src/torque/instructions.cc106
-rw-r--r--deps/v8/src/torque/instructions.h141
-rw-r--r--deps/v8/src/torque/torque-code-generator.cc9
-rw-r--r--deps/v8/src/torque/torque-code-generator.h6
-rw-r--r--deps/v8/src/torque/torque-compiler.cc3
-rw-r--r--deps/v8/src/torque/torque-compiler.h3
-rw-r--r--deps/v8/src/torque/torque-parser.cc22
-rw-r--r--deps/v8/src/torque/torque.cc2
-rw-r--r--deps/v8/src/torque/type-oracle.h8
-rw-r--r--deps/v8/src/torque/type-visitor.cc5
-rw-r--r--deps/v8/src/torque/types.cc56
-rw-r--r--deps/v8/src/torque/types.h16
-rw-r--r--deps/v8/src/torque/utils.cc39
-rw-r--r--deps/v8/src/torque/utils.h2
-rw-r--r--deps/v8/src/tracing/OWNERS3
-rw-r--r--deps/v8/src/tracing/trace-categories.h2
-rw-r--r--deps/v8/src/trap-handler/OWNERS2
-rw-r--r--deps/v8/src/utils/utils.h7
-rw-r--r--deps/v8/src/wasm/OWNERS3
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h103
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h65
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h180
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h10
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc135
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h119
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc1681
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h54
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h42
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h30
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h326
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h1210
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h212
-rw-r--r--deps/v8/src/wasm/c-api.cc9
-rw-r--r--deps/v8/src/wasm/c-api.h4
-rw-r--r--deps/v8/src/wasm/code-space-access.h4
-rw-r--r--deps/v8/src/wasm/compilation-environment.h12
-rw-r--r--deps/v8/src/wasm/decoder.h4
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1301
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc42
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h4
-rw-r--r--deps/v8/src/wasm/function-compiler.cc21
-rw-r--r--deps/v8/src/wasm/function-compiler.h4
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc455
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h6
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h4
-rw-r--r--deps/v8/src/wasm/leb-helper.h4
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.h4
-rw-r--r--deps/v8/src/wasm/memory-tracing.h4
-rw-r--r--deps/v8/src/wasm/module-compiler.cc13
-rw-r--r--deps/v8/src/wasm/module-compiler.h4
-rw-r--r--deps/v8/src/wasm/module-decoder.cc115
-rw-r--r--deps/v8/src/wasm/module-decoder.h85
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc62
-rw-r--r--deps/v8/src/wasm/module-instantiate.h4
-rw-r--r--deps/v8/src/wasm/object-access.h4
-rw-r--r--deps/v8/src/wasm/signature-map.h4
-rw-r--r--deps/v8/src/wasm/simd-shuffle.cc8
-rw-r--r--deps/v8/src/wasm/simd-shuffle.h12
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h4
-rw-r--r--deps/v8/src/wasm/struct-types.h5
-rw-r--r--deps/v8/src/wasm/value-type.h41
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h4
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc235
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h27
-rw-r--r--deps/v8/src/wasm/wasm-constants.h20
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc202
-rw-r--r--deps/v8/src/wasm/wasm-debug.h21
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc9
-rw-r--r--deps/v8/src/wasm/wasm-engine.h4
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h47
-rw-r--r--deps/v8/src/wasm/wasm-features.cc10
-rw-r--r--deps/v8/src/wasm/wasm-features.h9
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-js.cc98
-rw-r--r--deps/v8/src/wasm/wasm-js.h11
-rw-r--r--deps/v8/src/wasm/wasm-limits.h4
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h118
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc28
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h7
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.h9
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h29
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc145
-rw-r--r--deps/v8/src/wasm/wasm-objects.h8
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h32
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc10
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h157
-rw-r--r--deps/v8/src/wasm/wasm-result.h4
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc1
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h4
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc8
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h4
-rw-r--r--deps/v8/src/wasm/wasm-tier.h4
-rw-r--r--deps/v8/src/wasm/wasm-value.h75
-rw-r--r--deps/v8/src/web-snapshot/OWNERS4
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc845
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h181
-rw-r--r--deps/v8/src/zone/OWNERS1
-rw-r--r--deps/v8/test/BUILD.gn46
-rw-r--r--deps/v8/test/cctest/BUILD.gn112
-rw-r--r--deps/v8/test/cctest/OWNERS2
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.h13
-rw-r--r--deps/v8/test/cctest/cctest.status12
-rw-r--r--deps/v8/test/cctest/compiler/node-observer-tester.h1
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc182
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc111
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc33
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc31
-rw-r--r--deps/v8/test/cctest/compiler/test-sloppy-equality.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc109
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc16
-rw-r--r--deps/v8/test/cctest/heap/test-write-barrier.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc6
-rw-r--r--deps/v8/test/cctest/test-accessors.cc38
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc190
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc129
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc282
-rw-r--r--deps/v8/test/cctest/test-api-typed-array.cc4
-rw-r--r--deps/v8/test/cctest/test-api.cc290
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc57
-rw-r--r--deps/v8/test/cctest/test-code-pages.cc27
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc184
-rw-r--r--deps/v8/test/cctest/test-compiler.cc7
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc76
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc25
-rw-r--r--deps/v8/test/cctest/test-debug.cc76
-rw-r--r--deps/v8/test/cctest/test-descriptor-array.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc18
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc206
-rw-r--r--deps/v8/test/cctest/test-flags.cc2
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc3
-rw-r--r--deps/v8/test/cctest/test-hashcode.cc12
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc8
-rw-r--r--deps/v8/test/cctest/test-icache.cc7
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc40
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc19
-rw-r--r--deps/v8/test/cctest/test-log.cc140
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc10
-rw-r--r--deps/v8/test/cctest/test-object.cc95
-rw-r--r--deps/v8/test/cctest/test-parsing.cc13
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc61
-rw-r--r--deps/v8/test/cctest/test-serialize.cc36
-rw-r--r--deps/v8/test/cctest/test-strings.cc78
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc466
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc139
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-infra.h321
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h942
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary.cc150
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc24
-rw-r--r--deps/v8/test/cctest/test-verifiers.cc4
-rw-r--r--deps/v8/test/cctest/test-web-snapshots.cc131
-rw-r--r--deps/v8/test/cctest/wasm/test-backing-store.cc (renamed from deps/v8/test/cctest/test-backing-store.cc)5
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc167
-rw-r--r--deps/v8/test/cctest/wasm/test-grow-memory.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc82
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc177
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc20
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc239
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc1461
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc55
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc19
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc19
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h20
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.cc752
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.h177
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h4
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc194
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h2
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc7
-rw-r--r--deps/v8/test/debugger/debug/debug-break-class-fields.js100
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js2
-rw-r--r--deps/v8/test/debugger/debugger.status6
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-1199681.js52
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js4
-rw-r--r--deps/v8/test/fuzzer/BUILD.gn17
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status6
-rw-r--r--deps/v8/test/fuzzer/inspector-fuzzer.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc167
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc25
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h2
-rw-r--r--deps/v8/test/fuzzer/wasm.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm/regress-1191853.wasmbin0 -> 25 bytes
-rw-r--r--deps/v8/test/inspector/BUILD.gn1
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-await-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js60
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/regress-1190290-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/regress-1190290.js42
-rw-r--r--deps/v8/test/inspector/debugger/regression-1185540-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/regression-1185540.js34
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt14
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js9
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js75
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js31
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint.js16
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt31
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js214
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js50
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js9
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt34
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js59
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt53
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-source.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-check.js13
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js11
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js13
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js4
-rw-r--r--deps/v8/test/inspector/inspector-test.cc5
-rw-r--r--deps/v8/test/inspector/inspector.status4
-rw-r--r--deps/v8/test/inspector/isolate-data.cc9
-rw-r--r--deps/v8/test/inspector/isolate-data.h3
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt19
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1183664.js39
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt9
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1199919.js44
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js4
-rw-r--r--deps/v8/test/inspector/task-runner.cc2
-rw-r--r--deps/v8/test/inspector/wasm-inspector-test.js8
-rw-r--r--deps/v8/test/intl/displaynames/getoptionsobject.js20
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/intl/list-format/getoptionsobject.js20
-rw-r--r--deps/v8/test/intl/regress-11595.js23
-rw-r--r--deps/v8/test/intl/segmenter/getoptionsobject.js20
-rw-r--r--deps/v8/test/js-perf-test/OWNERS2
-rw-r--r--deps/v8/test/message/fail/await-non-async.out4
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.out4
-rw-r--r--deps/v8/test/message/message.status9
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.js2
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js6
-rw-r--r--deps/v8/test/mjsunit/array-sort.js16
-rw-r--r--deps/v8/test/mjsunit/array-store-and-grow.js12
-rw-r--r--deps/v8/test/mjsunit/baseline/cross-realm.js55
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline-module.mjs2
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline.js32
-rw-r--r--deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js37
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls.js148
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1215514.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-accessors.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js7
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js472
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking-2.js3
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking.js3
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/ensure-growing-store-learns.js6
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js2
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js7
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt.js22
-rw-r--r--deps/v8/test/mjsunit/field-type-tracking.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.mjs19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js10
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status73
-rw-r--r--deps/v8/test/mjsunit/promise-hooks.js275
-rw-r--r--deps/v8/test/mjsunit/proto-accessor-not-accessible.js43
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-673297.js (renamed from deps/v8/test/mjsunit/regress/regress-673297.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-743622.js (renamed from deps/v8/test/mjsunit/regress/regress-743622.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1067270.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1146880.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-11491.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-11519.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1181240.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1185072.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1187170.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1193903.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-673241.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7115.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-923723.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-992389.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-chromium-1194026.js69
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1191886.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1195331.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9534.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js49
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1027410.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1034394.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1074586.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1075953.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10831.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10898.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1101304.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1145135.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1146861.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1153442.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1161654.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179182.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1184964.js11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1185464.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1187831.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1188825.js28
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1188975.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1189454.js218
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1197393.js35
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1201340.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5800.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7366.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-782280.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-791810.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-793551.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-842501.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8533.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854050.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-905815.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-913804.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917412.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588b.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919533.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922933.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924843.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-968078.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9759.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9832.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress1192313.js30
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange-stress.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js119
-rw-r--r--deps/v8/test/mjsunit/wasm/externref.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-rotation.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-unrolling.js49
-rw-r--r--deps/v8/test/mjsunit/wasm/memory64.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/multiple-code-spaces.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-tables.js157
-rw-r--r--deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable-validation.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js149
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js433
-rw-r--r--deps/v8/test/mkgrokdump/BUILD.gn1
-rw-r--r--deps/v8/test/test262/test262.status17
-rw-r--r--deps/v8/test/test262/testcfg.py3
-rw-r--r--deps/v8/test/unittests/BUILD.gn59
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc1
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc25
-rw-r--r--deps/v8/test/unittests/base/vlq-unittest.cc123
-rw-r--r--deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc175
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc266
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc155
-rw-r--r--deps/v8/test/unittests/compiler/frame-unittest.cc242
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc110
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc105
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h11
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc288
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc1
-rw-r--r--deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc73
-rw-r--r--deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc194
-rw-r--r--deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc184
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc79
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc60
-rw-r--r--deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/testing-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h2
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc76
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc306
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc9
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc38
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc138
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h3
-rw-r--r--deps/v8/test/unittests/numbers/conversions-unittest.cc5
-rw-r--r--deps/v8/test/unittests/objects/object-unittest.cc6
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc33
-rw-r--r--deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc (renamed from deps/v8/test/unittests/objects/backing-store-unittest.cc)3
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc109
-rw-r--r--deps/v8/test/unittests/wasm/liftoff-register-unittests.cc41
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc77
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc44
-rw-r--r--deps/v8/test/wasm-api-tests/BUILD.gn6
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-tests.status6
-rw-r--r--deps/v8/test/wasm-js/testcfg.py11
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status15
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py12
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status16
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq4
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc2
-rw-r--r--deps/v8/tools/arguments.mjs6
-rwxr-xr-xdeps/v8/tools/bash-completion.sh113
-rw-r--r--deps/v8/tools/callstats.html1126
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js1
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js26
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js16
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_baseline_expected.js16
-rw-r--r--deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js4
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_flags.json3
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn1
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.cc2
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc104
-rwxr-xr-xdeps/v8/tools/dev/gm.py100
-rw-r--r--deps/v8/tools/dumpcpp.mjs4
-rwxr-xr-xdeps/v8/tools/find-builtin24
-rw-r--r--deps/v8/tools/gcmole/gcmole.py3
-rw-r--r--deps/v8/tools/ic-processor-driver.mjs8
-rw-r--r--deps/v8/tools/index.html13
-rw-r--r--deps/v8/tools/ninja/ninja_output.py44
-rw-r--r--deps/v8/tools/profview/profile-utils.js6
-rw-r--r--deps/v8/tools/profview/profview.js4
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py3
-rw-r--r--deps/v8/tools/release/git_recipes.py7
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py4
-rw-r--r--deps/v8/tools/system-analyzer/index.css2
-rw-r--r--deps/v8/tools/testrunner/base_runner.py9
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py47
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/tickprocessor.mjs2
-rw-r--r--deps/v8/tools/v8heapconst.py418
-rw-r--r--deps/v8/tools/v8windbg/BUILD.gn2
-rw-r--r--deps/v8/tools/v8windbg/README.md4
-rw-r--r--deps/v8/tools/v8windbg/src/cur-isolate.h2
-rw-r--r--deps/v8/tools/v8windbg/src/js-stack.cc229
-rw-r--r--deps/v8/tools/v8windbg/src/js-stack.h98
-rw-r--r--deps/v8/tools/v8windbg/src/v8windbg-extension.cc4
-rw-r--r--deps/v8/tools/v8windbg/test/v8windbg-test.cc26
-rw-r--r--deps/v8/tools/vim/ninja-build.vim14
-rw-r--r--deps/v8/tools/vim/ninja_output.py72
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt4
1300 files changed, 51965 insertions, 25892 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index a27cf5ef0a1..07644af9d18 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -90,6 +90,7 @@ David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Derek Tu <derek.t@rioslab.org>
+Dominic Chen <d.c.ddcc@gmail.com>
Dominic Farolini <domfarolino@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
@@ -168,6 +169,7 @@ Milton Chiang <milton.chiang@mediatek.com>
Mu Tao <pamilty@gmail.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
+Niek van der Maas <mail@niekvandermaas.nl>
Niklas Hambüchen <mail@nh2.me>
Noj Vek <nojvek@gmail.com>
Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
@@ -209,7 +211,6 @@ Seo Sanghyeon <sanxiyn@gmail.com>
Shawn Anastasio <shawnanastasio@gmail.com>
Shawn Presser <shawnpresser@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
-Stephen Belanger <stephen.belanger@datadoghq.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
Tao Liqiang <taolq@outlook.com>
@@ -237,6 +238,7 @@ Yi Wang <wangyi8848@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com>
+Yusif Khudhur <yusif.khudhur@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zeynep Cankara <zeynepcankara402@gmail.com>
Zhao Jiazhong <kyslie3100@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index a9ab6783fa6..d2bfb6129dc 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -41,7 +41,7 @@ declare_args() {
v8_enable_future = false
# Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing
- v8_enable_system_instrumentation = false
+ v8_enable_system_instrumentation = true
# Sets the GUID for the ETW provider
v8_etw_guid = ""
@@ -108,6 +108,7 @@ declare_args() {
# Enable pointer compression (sets -dV8_COMPRESS_POINTERS).
v8_enable_pointer_compression = ""
+ v8_enable_pointer_compression_shared_cage = ""
v8_enable_31bit_smis_on_64bit_arch = false
# Sets -dOBJECT_PRINT.
@@ -168,6 +169,10 @@ declare_args() {
# Enables various testing features.
v8_enable_test_features = ""
+ # Enable short builtins call instruction sequences by un-embedding builtins.
+ # Sets -dV8_SHORT_BUILTIN_CALLS
+ v8_enable_short_builtin_calls = ""
+
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@@ -251,6 +256,9 @@ declare_args() {
# file generation
v8_verify_torque_generation_invariance = false
+ # Generate comments describing the Torque intermediate representation.
+ v8_annotate_torque_ir = false
+
# Disable all snapshot compression.
v8_enable_snapshot_compression = true
@@ -279,9 +287,9 @@ declare_args() {
# Requires use_rtti = true
v8_enable_precise_zone_stats = false
- # Experimental feature for always keeping prototypes in dict/"slow" mode
- # Sets -DV8_DICT_MODE_PROTOTYPES
- v8_dict_mode_prototypes = false
+ # Experimental feature that uses SwissNameDictionary instead of NameDictionary
+ # as the backing store for all dictionary mode objects.
+ v8_enable_swiss_name_dictionary = false
# If enabled then macro definitions that are used in externally visible
# header files are placed in a separate header file v8-gn.h.
@@ -324,6 +332,9 @@ if (v8_enable_pointer_compression == "") {
v8_enable_pointer_compression =
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
}
+if (v8_enable_pointer_compression_shared_cage == "") {
+ v8_enable_pointer_compression_shared_cage = false
+}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
}
@@ -333,6 +344,10 @@ if (v8_enable_zone_compression == "") {
if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false
}
+if (v8_enable_short_builtin_calls == "") {
+ v8_enable_short_builtin_calls =
+ v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
+}
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
@@ -362,6 +377,13 @@ if (v8_multi_arch_build &&
rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) ==
"clang_x64_pointer_compression") {
v8_enable_pointer_compression = !v8_enable_pointer_compression
+ v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
+}
+if (v8_enable_short_builtin_calls &&
+ (!v8_enable_pointer_compression || v8_control_flow_integrity)) {
+ # Disable short calls when pointer compression is not enabled.
+ # Or when CFI is enabled (until the CFI-related issues are fixed).
+ v8_enable_short_builtin_calls = false
}
if (v8_enable_shared_ro_heap == "") {
v8_enable_shared_ro_heap = !v8_enable_pointer_compression
@@ -382,12 +404,20 @@ if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
}
+assert(
+ !v8_enable_pointer_compression_shared_cage || !v8_enable_shared_ro_heap,
+ "Sharing read-only heap is not yet supported when sharing a pointer compression cage")
+
assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
"Control-flow integrity does not support multisnapshots")
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
+assert(
+ !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
+ "Can't share a pointer compression cage if pointers aren't compressed")
+
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
@@ -489,11 +519,6 @@ config("cppgc_base_config") {
}
}
-# This config should be applied to code using the libsampler.
-config("libsampler_config") {
- include_dirs = [ "include" ]
-}
-
# This config is only applied to v8_headers and is the basis for external_config
# but without setting the USING_V8_SHARED define, which means v8_headers can be
# used inside v8 itself.
@@ -532,6 +557,8 @@ config("external_startup_data") {
external_v8_defines = [
"V8_ENABLE_CHECKS",
"V8_COMPRESS_POINTERS",
+ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE",
+ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE",
"V8_31BIT_SMIS_ON_64BIT_ARCH",
"V8_COMPRESS_ZONES",
"V8_HEAP_SANDBOX",
@@ -549,6 +576,11 @@ if (v8_enable_v8_checks) {
if (v8_enable_pointer_compression) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS" ]
}
+if (v8_enable_pointer_compression_shared_cage) {
+ enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ]
+} else if (v8_enable_pointer_compression) {
+ enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ]
+}
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
enabled_external_v8_defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
}
@@ -757,8 +789,11 @@ config("features") {
if (v8_fuzzilli) {
defines += [ "V8_FUZZILLI" ]
}
- if (v8_dict_mode_prototypes) {
- defines += [ "V8_DICT_MODE_PROTOTYPES" ]
+ if (v8_enable_short_builtin_calls) {
+ defines += [ "V8_SHORT_BUILTIN_CALLS" ]
+ }
+ if (v8_enable_swiss_name_dictionary) {
+ defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
if (v8_enable_system_instrumentation) {
defines += [ "V8_ENABLE_SYSTEM_INSTRUMENTATION" ]
@@ -1363,9 +1398,7 @@ torque_files = [
"src/builtins/typed-array-subarray.tq",
"src/builtins/typed-array-values.tq",
"src/builtins/typed-array.tq",
- "src/builtins/wasm.tq",
"src/builtins/weak-ref.tq",
- "src/debug/debug-wasm-objects.tq",
"src/ic/handler-configuration.tq",
"src/objects/allocation-site.tq",
"src/objects/api-callbacks.tq",
@@ -1418,12 +1451,12 @@ torque_files = [
"src/objects/stack-frame-info.tq",
"src/objects/string.tq",
"src/objects/struct.tq",
+ "src/objects/swiss-hash-table-helpers.tq",
"src/objects/swiss-name-dictionary.tq",
"src/objects/synthetic-module.tq",
"src/objects/template-objects.tq",
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
- "src/wasm/wasm-objects.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
@@ -1446,6 +1479,14 @@ if (v8_enable_i18n_support) {
]
}
+if (v8_enable_webassembly) {
+ torque_files += [
+ "src/builtins/wasm.tq",
+ "src/debug/debug-wasm-objects.tq",
+ "src/wasm/wasm-objects.tq",
+ ]
+}
+
# Template for running torque
# When building with v8_verify_torque_generation_invariance=true we need
# to be able to run torque for both 32 and 64 bits in the same build
@@ -1524,6 +1565,9 @@ template("run_torque") {
"-v8-root",
rebase_path(".", root_build_dir),
]
+ if (v8_annotate_torque_ir) {
+ args += [ "-annotate-ir" ]
+ }
if (defined(invoker.args)) {
args += invoker.args
}
@@ -1568,23 +1612,34 @@ group("v8_maybe_icu") {
}
}
+v8_header_set("torque_runtime_support") {
+ visibility = [ ":*" ]
+
+ sources = [ "src/torque/runtime-support.h" ]
+
+ configs = [ ":internal_config" ]
+}
+
v8_source_set("torque_generated_initializers") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_base_without_compiler",
":v8_tracing",
]
- public_deps = [ ":v8_maybe_icu" ]
+ public_deps = [
+ ":torque_runtime_support",
+ ":v8_maybe_icu",
+ ]
sources = [
"$target_gen_dir/torque-generated/csa-types.h",
"$target_gen_dir/torque-generated/enum-verifiers.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler.h",
- "src/torque/runtime-support.h",
]
foreach(file, torque_files) {
filetq = string_replace(file, ".tq", "-tq")
@@ -1603,6 +1658,8 @@ v8_source_set("torque_generated_definitions") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_internal_headers",
+ ":v8_libbase",
":v8_tracing",
]
@@ -1914,6 +1971,8 @@ v8_source_set("v8_initializers") {
"test/cctest:*",
]
+ allow_circular_includes_from = [ ":torque_generated_initializers" ]
+
deps = [
":torque_generated_initializers",
":v8_base_without_compiler",
@@ -1968,8 +2027,6 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-typed-array-gen.cc",
"src/builtins/builtins-typed-array-gen.h",
"src/builtins/builtins-utils-gen.h",
- "src/builtins/builtins-wasm-gen.cc",
- "src/builtins/builtins-wasm-gen.h",
"src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h",
"src/builtins/profile-data-reader.cc",
@@ -1995,6 +2052,13 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-intrinsics-generator.h",
]
+ if (v8_enable_webassembly) {
+ sources += [
+ "src/builtins/builtins-wasm-gen.cc",
+ "src/builtins/builtins-wasm-gen.h",
+ ]
+ }
+
if (v8_current_cpu == "x86") {
sources += [
### gcmole(arch:ia32) ###
@@ -2126,7 +2190,10 @@ v8_header_set("v8_headers") {
public_deps = [ ":v8_config_headers" ]
- deps = [ ":v8_version" ]
+ deps = [
+ ":cppgc_headers",
+ ":v8_version",
+ ]
}
if (v8_generate_external_defines_header) {
@@ -2155,12 +2222,6 @@ if (v8_generate_external_defines_header) {
}
}
-v8_header_set("v8_wrappers") {
- configs = [ ":internal_config" ]
-
- sources = [ "src/base/platform/wrappers.h" ]
-}
-
# This is split out to share basic headers with Torque and everything else:(
v8_header_set("v8_shared_internal_headers") {
visibility = [
@@ -2171,7 +2232,11 @@ v8_header_set("v8_shared_internal_headers") {
]
configs = [ ":internal_config" ]
- sources = [ "src/common/globals.h" ]
+ sources = [
+ "src/common/globals.h",
+ "src/wasm/wasm-constants.h",
+ "src/wasm/wasm-limits.h",
+ ]
deps = [
":v8_headers",
@@ -2179,333 +2244,26 @@ v8_header_set("v8_shared_internal_headers") {
]
}
-v8_compiler_sources = [
- ### gcmole(all) ###
- "src/builtins/profile-data-reader.h",
- "src/compiler/access-builder.cc",
- "src/compiler/access-builder.h",
- "src/compiler/access-info.cc",
- "src/compiler/access-info.h",
- "src/compiler/add-type-assertions-reducer.cc",
- "src/compiler/add-type-assertions-reducer.h",
- "src/compiler/all-nodes.cc",
- "src/compiler/all-nodes.h",
- "src/compiler/allocation-builder-inl.h",
- "src/compiler/allocation-builder.h",
- "src/compiler/backend/code-generator-impl.h",
- "src/compiler/backend/code-generator.cc",
- "src/compiler/backend/code-generator.h",
- "src/compiler/backend/frame-elider.cc",
- "src/compiler/backend/frame-elider.h",
- "src/compiler/backend/gap-resolver.cc",
- "src/compiler/backend/gap-resolver.h",
- "src/compiler/backend/instruction-codes.h",
- "src/compiler/backend/instruction-scheduler.cc",
- "src/compiler/backend/instruction-scheduler.h",
- "src/compiler/backend/instruction-selector-impl.h",
- "src/compiler/backend/instruction-selector.cc",
- "src/compiler/backend/instruction-selector.h",
- "src/compiler/backend/instruction.cc",
- "src/compiler/backend/instruction.h",
- "src/compiler/backend/jump-threading.cc",
- "src/compiler/backend/jump-threading.h",
- "src/compiler/backend/mid-tier-register-allocator.cc",
- "src/compiler/backend/mid-tier-register-allocator.h",
- "src/compiler/backend/move-optimizer.cc",
- "src/compiler/backend/move-optimizer.h",
- "src/compiler/backend/register-allocation.h",
- "src/compiler/backend/register-allocator-verifier.cc",
- "src/compiler/backend/register-allocator-verifier.h",
- "src/compiler/backend/register-allocator.cc",
- "src/compiler/backend/register-allocator.h",
- "src/compiler/backend/spill-placer.cc",
- "src/compiler/backend/spill-placer.h",
- "src/compiler/backend/unwinding-info-writer.h",
- "src/compiler/basic-block-instrumentor.cc",
- "src/compiler/basic-block-instrumentor.h",
- "src/compiler/branch-elimination.cc",
- "src/compiler/branch-elimination.h",
- "src/compiler/bytecode-analysis.cc",
- "src/compiler/bytecode-analysis.h",
- "src/compiler/bytecode-graph-builder.cc",
- "src/compiler/bytecode-graph-builder.h",
- "src/compiler/bytecode-liveness-map.cc",
- "src/compiler/bytecode-liveness-map.h",
- "src/compiler/c-linkage.cc",
- "src/compiler/checkpoint-elimination.cc",
- "src/compiler/checkpoint-elimination.h",
- "src/compiler/code-assembler.cc",
- "src/compiler/code-assembler.h",
- "src/compiler/common-node-cache.cc",
- "src/compiler/common-node-cache.h",
- "src/compiler/common-operator-reducer.cc",
- "src/compiler/common-operator-reducer.h",
- "src/compiler/common-operator.cc",
- "src/compiler/common-operator.h",
- "src/compiler/compilation-dependencies.cc",
- "src/compiler/compilation-dependencies.h",
- "src/compiler/compiler-source-position-table.cc",
- "src/compiler/compiler-source-position-table.h",
- "src/compiler/constant-folding-reducer.cc",
- "src/compiler/constant-folding-reducer.h",
- "src/compiler/control-equivalence.cc",
- "src/compiler/control-equivalence.h",
- "src/compiler/control-flow-optimizer.cc",
- "src/compiler/control-flow-optimizer.h",
- "src/compiler/csa-load-elimination.cc",
- "src/compiler/csa-load-elimination.h",
- "src/compiler/dead-code-elimination.cc",
- "src/compiler/dead-code-elimination.h",
- "src/compiler/decompression-optimizer.cc",
- "src/compiler/decompression-optimizer.h",
- "src/compiler/diamond.h",
- "src/compiler/effect-control-linearizer.cc",
- "src/compiler/effect-control-linearizer.h",
- "src/compiler/escape-analysis-reducer.cc",
- "src/compiler/escape-analysis-reducer.h",
- "src/compiler/escape-analysis.cc",
- "src/compiler/escape-analysis.h",
- "src/compiler/feedback-source.cc",
- "src/compiler/feedback-source.h",
- "src/compiler/frame-states.cc",
- "src/compiler/frame-states.h",
- "src/compiler/frame.cc",
- "src/compiler/frame.h",
- "src/compiler/functional-list.h",
- "src/compiler/globals.h",
- "src/compiler/graph-assembler.cc",
- "src/compiler/graph-assembler.h",
- "src/compiler/graph-reducer.cc",
- "src/compiler/graph-reducer.h",
- "src/compiler/graph-trimmer.cc",
- "src/compiler/graph-trimmer.h",
- "src/compiler/graph-visualizer.cc",
- "src/compiler/graph-visualizer.h",
- "src/compiler/graph-zone-traits.h",
- "src/compiler/graph.cc",
- "src/compiler/graph.h",
- "src/compiler/int64-lowering.cc",
- "src/compiler/int64-lowering.h",
- "src/compiler/js-call-reducer.cc",
- "src/compiler/js-call-reducer.h",
- "src/compiler/js-context-specialization.cc",
- "src/compiler/js-context-specialization.h",
- "src/compiler/js-create-lowering.cc",
- "src/compiler/js-create-lowering.h",
- "src/compiler/js-generic-lowering.cc",
- "src/compiler/js-generic-lowering.h",
- "src/compiler/js-graph.cc",
- "src/compiler/js-graph.h",
- "src/compiler/js-heap-broker.cc",
- "src/compiler/js-heap-broker.h",
- "src/compiler/js-heap-copy-reducer.cc",
- "src/compiler/js-heap-copy-reducer.h",
- "src/compiler/js-inlining-heuristic.cc",
- "src/compiler/js-inlining-heuristic.h",
- "src/compiler/js-inlining.cc",
- "src/compiler/js-inlining.h",
- "src/compiler/js-intrinsic-lowering.cc",
- "src/compiler/js-intrinsic-lowering.h",
- "src/compiler/js-native-context-specialization.cc",
- "src/compiler/js-native-context-specialization.h",
- "src/compiler/js-operator.cc",
- "src/compiler/js-operator.h",
- "src/compiler/js-type-hint-lowering.cc",
- "src/compiler/js-type-hint-lowering.h",
- "src/compiler/js-typed-lowering.cc",
- "src/compiler/js-typed-lowering.h",
- "src/compiler/linkage.cc",
- "src/compiler/linkage.h",
- "src/compiler/load-elimination.cc",
- "src/compiler/load-elimination.h",
- "src/compiler/loop-analysis.cc",
- "src/compiler/loop-analysis.h",
- "src/compiler/loop-peeling.cc",
- "src/compiler/loop-peeling.h",
- "src/compiler/loop-variable-optimizer.cc",
- "src/compiler/loop-variable-optimizer.h",
- "src/compiler/machine-graph-verifier.cc",
- "src/compiler/machine-graph-verifier.h",
- "src/compiler/machine-graph.cc",
- "src/compiler/machine-graph.h",
- "src/compiler/machine-operator-reducer.cc",
- "src/compiler/machine-operator-reducer.h",
- "src/compiler/machine-operator.cc",
- "src/compiler/machine-operator.h",
- "src/compiler/map-inference.cc",
- "src/compiler/map-inference.h",
- "src/compiler/memory-lowering.cc",
- "src/compiler/memory-lowering.h",
- "src/compiler/memory-optimizer.cc",
- "src/compiler/memory-optimizer.h",
- "src/compiler/node-aux-data.h",
- "src/compiler/node-cache.h",
- "src/compiler/node-marker.cc",
- "src/compiler/node-marker.h",
- "src/compiler/node-matchers.cc",
- "src/compiler/node-matchers.h",
- "src/compiler/node-observer.cc",
- "src/compiler/node-observer.h",
- "src/compiler/node-origin-table.cc",
- "src/compiler/node-origin-table.h",
- "src/compiler/node-properties.cc",
- "src/compiler/node-properties.h",
- "src/compiler/node.cc",
- "src/compiler/node.h",
- "src/compiler/opcodes.cc",
- "src/compiler/opcodes.h",
- "src/compiler/operation-typer.cc",
- "src/compiler/operation-typer.h",
- "src/compiler/operator-properties.cc",
- "src/compiler/operator-properties.h",
- "src/compiler/operator.cc",
- "src/compiler/operator.h",
- "src/compiler/osr.cc",
- "src/compiler/osr.h",
- "src/compiler/per-isolate-compiler-cache.h",
- "src/compiler/persistent-map.h",
- "src/compiler/pipeline-statistics.cc",
- "src/compiler/pipeline-statistics.h",
- "src/compiler/pipeline.cc",
- "src/compiler/pipeline.h",
- "src/compiler/property-access-builder.cc",
- "src/compiler/property-access-builder.h",
- "src/compiler/raw-machine-assembler.cc",
- "src/compiler/raw-machine-assembler.h",
- "src/compiler/redundancy-elimination.cc",
- "src/compiler/redundancy-elimination.h",
- "src/compiler/refs-map.cc",
- "src/compiler/refs-map.h",
- "src/compiler/representation-change.cc",
- "src/compiler/representation-change.h",
- "src/compiler/schedule.cc",
- "src/compiler/schedule.h",
- "src/compiler/scheduled-machine-lowering.cc",
- "src/compiler/scheduled-machine-lowering.h",
- "src/compiler/scheduler.cc",
- "src/compiler/scheduler.h",
- "src/compiler/select-lowering.cc",
- "src/compiler/select-lowering.h",
- "src/compiler/serializer-for-background-compilation.cc",
- "src/compiler/serializer-for-background-compilation.h",
- "src/compiler/serializer-hints.h",
- "src/compiler/simd-scalar-lowering.cc",
- "src/compiler/simd-scalar-lowering.h",
- "src/compiler/simplified-lowering.cc",
- "src/compiler/simplified-lowering.h",
- "src/compiler/simplified-operator-reducer.cc",
- "src/compiler/simplified-operator-reducer.h",
- "src/compiler/simplified-operator.cc",
- "src/compiler/simplified-operator.h",
- "src/compiler/state-values-utils.cc",
- "src/compiler/state-values-utils.h",
- "src/compiler/store-store-elimination.cc",
- "src/compiler/store-store-elimination.h",
- "src/compiler/type-cache.cc",
- "src/compiler/type-cache.h",
- "src/compiler/type-narrowing-reducer.cc",
- "src/compiler/type-narrowing-reducer.h",
- "src/compiler/typed-optimization.cc",
- "src/compiler/typed-optimization.h",
- "src/compiler/typer.cc",
- "src/compiler/typer.h",
- "src/compiler/types.cc",
- "src/compiler/types.h",
- "src/compiler/value-numbering-reducer.cc",
- "src/compiler/value-numbering-reducer.h",
- "src/compiler/verifier.cc",
- "src/compiler/verifier.h",
- "src/compiler/wasm-compiler.cc",
- "src/compiler/wasm-compiler.h",
- "src/compiler/write-barrier-kind.h",
- "src/compiler/zone-stats.cc",
- "src/compiler/zone-stats.h",
-]
-
-# The src/compiler files with optimizations.
-v8_source_set("v8_compiler_opt") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- sources = v8_compiler_sources
+v8_header_set("v8_flags") {
+ visibility = [ ":*" ]
- public_deps = [
- ":generate_bytecode_builtins_list",
- ":run_torque",
- ":v8_maybe_icu",
- ":v8_tracing",
- ]
+ configs = [ ":internal_config" ]
- deps = [
- ":v8_base_without_compiler",
- ":v8_libbase",
- ":v8_shared_internal_headers",
+ sources = [
+ "src/flags/flag-definitions.h",
+ "src/flags/flags.h",
]
- if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
- # The :no_optimize config is added to v8_add_configs in v8.gni.
- remove_configs = [ "//build/config/compiler:no_optimize" ]
- configs = [ ":always_optimize" ]
- } else {
- # Without this else branch, gn fails to generate build files for non-debug
- # builds (because we try to remove a config that is not present).
- # So we include it, even if this config is not used outside of debug builds.
- configs = [ ":internal_config" ]
- }
+ deps = [ ":v8_shared_internal_headers" ]
}
-# The src/compiler files with default optimization behavior.
-v8_source_set("v8_compiler") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- sources = v8_compiler_sources
-
- public_deps = [
- ":generate_bytecode_builtins_list",
- ":run_torque",
- ":v8_maybe_icu",
- ":v8_tracing",
- ]
-
- deps = [
- ":v8_base_without_compiler",
- ":v8_libbase",
- ":v8_shared_internal_headers",
- ]
-
+v8_header_set("v8_internal_headers") {
configs = [ ":internal_config" ]
-}
-
-group("v8_compiler_for_mksnapshot") {
- if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
- deps = [ ":v8_compiler_opt" ]
- } else {
- deps = [ ":v8_compiler" ]
- }
-}
-
-# Any target using trace events must directly or indirectly depend on
-# v8_tracing.
-group("v8_tracing") {
- if (v8_use_perfetto) {
- if (build_with_chromium) {
- public_deps = [ "//third_party/perfetto:libperfetto" ]
- } else {
- public_deps = [ ":v8_libperfetto" ]
- }
- }
-}
-
-v8_source_set("v8_base_without_compiler") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- # Split static libraries on windows into two.
- split_count = 2
sources = [
- "//base/trace_event/common/trace_event_common.h",
-
### gcmole(all) ###
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
+ "//base/trace_event/common/trace_event_common.h",
"include/cppgc/common.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
@@ -2513,147 +2271,77 @@ v8_source_set("v8_base_without_compiler") {
"include/v8-unwinder-state.h",
"include/v8-wasm-trap-handler-posix.h",
"src/api/api-arguments-inl.h",
- "src/api/api-arguments.cc",
"src/api/api-arguments.h",
"src/api/api-inl.h",
"src/api/api-macros.h",
- "src/api/api-natives.cc",
"src/api/api-natives.h",
- "src/api/api.cc",
"src/api/api.h",
- "src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-source-ranges.h",
"src/ast/ast-traversal-visitor.h",
- "src/ast/ast-value-factory.cc",
"src/ast/ast-value-factory.h",
- "src/ast/ast.cc",
"src/ast/ast.h",
- "src/ast/modules.cc",
"src/ast/modules.h",
- "src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
- "src/ast/scopes.cc",
"src/ast/scopes.h",
- "src/ast/source-range-ast-visitor.cc",
"src/ast/source-range-ast-visitor.h",
- "src/ast/variables.cc",
"src/ast/variables.h",
"src/baseline/baseline-assembler-inl.h",
"src/baseline/baseline-assembler.h",
- "src/baseline/baseline-compiler.cc",
"src/baseline/baseline-compiler.h",
- "src/baseline/baseline.cc",
"src/baseline/baseline.h",
- "src/builtins/accessors.cc",
+ "src/baseline/bytecode-offset-iterator.h",
"src/builtins/accessors.h",
- "src/builtins/builtins-api.cc",
- "src/builtins/builtins-array.cc",
- "src/builtins/builtins-arraybuffer.cc",
- "src/builtins/builtins-async-module.cc",
- "src/builtins/builtins-bigint.cc",
- "src/builtins/builtins-callsite.cc",
- "src/builtins/builtins-collections.cc",
- "src/builtins/builtins-console.cc",
"src/builtins/builtins-constructor.h",
- "src/builtins/builtins-dataview.cc",
- "src/builtins/builtins-date.cc",
"src/builtins/builtins-definitions.h",
"src/builtins/builtins-descriptors.h",
- "src/builtins/builtins-error.cc",
- "src/builtins/builtins-function.cc",
- "src/builtins/builtins-global.cc",
- "src/builtins/builtins-internal.cc",
- "src/builtins/builtins-intl.cc",
- "src/builtins/builtins-json.cc",
- "src/builtins/builtins-number.cc",
- "src/builtins/builtins-object.cc",
"src/builtins/builtins-promise.h",
- "src/builtins/builtins-reflect.cc",
- "src/builtins/builtins-regexp.cc",
- "src/builtins/builtins-sharedarraybuffer.cc",
- "src/builtins/builtins-string.cc",
- "src/builtins/builtins-symbol.cc",
- "src/builtins/builtins-trace.cc",
- "src/builtins/builtins-typed-array.cc",
"src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
- "src/builtins/builtins-weak-refs.cc",
- "src/builtins/builtins.cc",
"src/builtins/builtins.h",
- "src/builtins/constants-table-builder.cc",
"src/builtins/constants-table-builder.h",
"src/builtins/profile-data-reader.h",
+ "src/codegen/aligned-slot-allocator.h",
"src/codegen/assembler-arch.h",
"src/codegen/assembler-inl.h",
- "src/codegen/assembler.cc",
"src/codegen/assembler.h",
- "src/codegen/bailout-reason.cc",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
- "src/codegen/code-comments.cc",
"src/codegen/code-comments.h",
- "src/codegen/code-desc.cc",
"src/codegen/code-desc.h",
- "src/codegen/code-factory.cc",
"src/codegen/code-factory.h",
- "src/codegen/code-reference.cc",
"src/codegen/code-reference.h",
- "src/codegen/compilation-cache.cc",
"src/codegen/compilation-cache.h",
- "src/codegen/compiler.cc",
"src/codegen/compiler.h",
- "src/codegen/constant-pool.cc",
"src/codegen/constant-pool.h",
"src/codegen/constants-arch.h",
"src/codegen/cpu-features.h",
- "src/codegen/external-reference-encoder.cc",
"src/codegen/external-reference-encoder.h",
- "src/codegen/external-reference-table.cc",
"src/codegen/external-reference-table.h",
- "src/codegen/external-reference.cc",
"src/codegen/external-reference.h",
- "src/codegen/flush-instruction-cache.cc",
"src/codegen/flush-instruction-cache.h",
- "src/codegen/handler-table.cc",
"src/codegen/handler-table.h",
- "src/codegen/interface-descriptors.cc",
"src/codegen/interface-descriptors.h",
"src/codegen/label.h",
- "src/codegen/machine-type.cc",
"src/codegen/machine-type.h",
"src/codegen/macro-assembler-inl.h",
"src/codegen/macro-assembler.h",
- "src/codegen/optimized-compilation-info.cc",
"src/codegen/optimized-compilation-info.h",
- "src/codegen/pending-optimization-table.cc",
"src/codegen/pending-optimization-table.h",
"src/codegen/register-arch.h",
- "src/codegen/register-configuration.cc",
"src/codegen/register-configuration.h",
- "src/codegen/register.cc",
"src/codegen/register.h",
"src/codegen/reglist.h",
- "src/codegen/reloc-info.cc",
"src/codegen/reloc-info.h",
- "src/codegen/safepoint-table.cc",
"src/codegen/safepoint-table.h",
"src/codegen/signature.h",
- "src/codegen/source-position-table.cc",
"src/codegen/source-position-table.h",
- "src/codegen/source-position.cc",
"src/codegen/source-position.h",
- "src/codegen/string-constants.cc",
"src/codegen/string-constants.h",
- "src/codegen/tick-counter.cc",
"src/codegen/tick-counter.h",
- "src/codegen/tnode.cc",
"src/codegen/tnode.h",
- "src/codegen/turbo-assembler.cc",
"src/codegen/turbo-assembler.h",
- "src/codegen/unoptimized-compilation-info.cc",
"src/codegen/unoptimized-compilation-info.h",
- "src/common/assert-scope.cc",
"src/common/assert-scope.h",
"src/common/checks.h",
"src/common/external-pointer-inl.h",
@@ -2661,401 +2349,356 @@ v8_source_set("v8_base_without_compiler") {
"src/common/message-template.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
- "src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
- "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
- "src/date/date.cc",
+ "src/compiler/all-nodes.h",
+ "src/compiler/allocation-builder-inl.h",
+ "src/compiler/allocation-builder.h",
+ "src/compiler/backend/code-generator-impl.h",
+ "src/compiler/backend/code-generator.h",
+ "src/compiler/backend/frame-elider.h",
+ "src/compiler/backend/gap-resolver.h",
+ "src/compiler/backend/instruction-codes.h",
+ "src/compiler/backend/instruction-scheduler.h",
+ "src/compiler/backend/instruction-selector-impl.h",
+ "src/compiler/backend/instruction-selector.h",
+ "src/compiler/backend/instruction.h",
+ "src/compiler/backend/jump-threading.h",
+ "src/compiler/backend/mid-tier-register-allocator.h",
+ "src/compiler/backend/move-optimizer.h",
+ "src/compiler/backend/register-allocation.h",
+ "src/compiler/backend/register-allocator-verifier.h",
+ "src/compiler/backend/register-allocator.h",
+ "src/compiler/backend/spill-placer.h",
+ "src/compiler/backend/unwinding-info-writer.h",
+ "src/compiler/basic-block-instrumentor.h",
+ "src/compiler/branch-elimination.h",
+ "src/compiler/bytecode-analysis.h",
+ "src/compiler/bytecode-graph-builder.h",
+ "src/compiler/bytecode-liveness-map.h",
+ "src/compiler/checkpoint-elimination.h",
+ "src/compiler/code-assembler.h",
+ "src/compiler/common-node-cache.h",
+ "src/compiler/common-operator-reducer.h",
+ "src/compiler/common-operator.h",
+ "src/compiler/compilation-dependencies.h",
+ "src/compiler/compiler-source-position-table.h",
+ "src/compiler/constant-folding-reducer.h",
+ "src/compiler/control-equivalence.h",
+ "src/compiler/control-flow-optimizer.h",
+ "src/compiler/csa-load-elimination.h",
+ "src/compiler/dead-code-elimination.h",
+ "src/compiler/decompression-optimizer.h",
+ "src/compiler/diamond.h",
+ "src/compiler/effect-control-linearizer.h",
+ "src/compiler/escape-analysis-reducer.h",
+ "src/compiler/escape-analysis.h",
+ "src/compiler/feedback-source.h",
+ "src/compiler/frame-states.h",
+ "src/compiler/frame.h",
+ "src/compiler/functional-list.h",
+ "src/compiler/globals.h",
+ "src/compiler/graph-assembler.h",
+ "src/compiler/graph-reducer.h",
+ "src/compiler/graph-trimmer.h",
+ "src/compiler/graph-visualizer.h",
+ "src/compiler/graph-zone-traits.h",
+ "src/compiler/graph.h",
+ "src/compiler/js-call-reducer.h",
+ "src/compiler/js-context-specialization.h",
+ "src/compiler/js-create-lowering.h",
+ "src/compiler/js-generic-lowering.h",
+ "src/compiler/js-graph.h",
+ "src/compiler/js-heap-broker.h",
+ "src/compiler/js-heap-copy-reducer.h",
+ "src/compiler/js-inlining-heuristic.h",
+ "src/compiler/js-inlining.h",
+ "src/compiler/js-intrinsic-lowering.h",
+ "src/compiler/js-native-context-specialization.h",
+ "src/compiler/js-operator.h",
+ "src/compiler/js-type-hint-lowering.h",
+ "src/compiler/js-typed-lowering.h",
+ "src/compiler/linkage.h",
+ "src/compiler/load-elimination.h",
+ "src/compiler/loop-analysis.h",
+ "src/compiler/loop-peeling.h",
+ "src/compiler/loop-unrolling.h",
+ "src/compiler/loop-variable-optimizer.h",
+ "src/compiler/machine-graph-verifier.h",
+ "src/compiler/machine-graph.h",
+ "src/compiler/machine-operator-reducer.h",
+ "src/compiler/machine-operator.h",
+ "src/compiler/map-inference.h",
+ "src/compiler/memory-lowering.h",
+ "src/compiler/memory-optimizer.h",
+ "src/compiler/node-aux-data.h",
+ "src/compiler/node-cache.h",
+ "src/compiler/node-marker.h",
+ "src/compiler/node-matchers.h",
+ "src/compiler/node-observer.h",
+ "src/compiler/node-origin-table.h",
+ "src/compiler/node-properties.h",
+ "src/compiler/node.h",
+ "src/compiler/opcodes.h",
+ "src/compiler/operation-typer.h",
+ "src/compiler/operator-properties.h",
+ "src/compiler/operator.h",
+ "src/compiler/osr.h",
+ "src/compiler/per-isolate-compiler-cache.h",
+ "src/compiler/persistent-map.h",
+ "src/compiler/pipeline-statistics.h",
+ "src/compiler/pipeline.h",
+ "src/compiler/property-access-builder.h",
+ "src/compiler/raw-machine-assembler.h",
+ "src/compiler/redundancy-elimination.h",
+ "src/compiler/refs-map.h",
+ "src/compiler/representation-change.h",
+ "src/compiler/schedule.h",
+ "src/compiler/scheduled-machine-lowering.h",
+ "src/compiler/scheduler.h",
+ "src/compiler/select-lowering.h",
+ "src/compiler/serializer-for-background-compilation.h",
+ "src/compiler/serializer-hints.h",
+ "src/compiler/simd-scalar-lowering.h",
+ "src/compiler/simplified-lowering.h",
+ "src/compiler/simplified-operator-reducer.h",
+ "src/compiler/simplified-operator.h",
+ "src/compiler/state-values-utils.h",
+ "src/compiler/store-store-elimination.h",
+ "src/compiler/type-cache.h",
+ "src/compiler/type-narrowing-reducer.h",
+ "src/compiler/typed-optimization.h",
+ "src/compiler/typer.h",
+ "src/compiler/types.h",
+ "src/compiler/value-numbering-reducer.h",
+ "src/compiler/verifier.h",
+ "src/compiler/write-barrier-kind.h",
+ "src/compiler/zone-stats.h",
"src/date/date.h",
"src/date/dateparser-inl.h",
- "src/date/dateparser.cc",
"src/date/dateparser.h",
- "src/debug/debug-coverage.cc",
"src/debug/debug-coverage.h",
- "src/debug/debug-evaluate.cc",
"src/debug/debug-evaluate.h",
- "src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
- "src/debug/debug-interface.cc",
"src/debug/debug-interface.h",
- "src/debug/debug-property-iterator.cc",
"src/debug/debug-property-iterator.h",
- "src/debug/debug-scope-iterator.cc",
"src/debug/debug-scope-iterator.h",
- "src/debug/debug-scopes.cc",
"src/debug/debug-scopes.h",
- "src/debug/debug-stack-trace-iterator.cc",
"src/debug/debug-stack-trace-iterator.h",
- "src/debug/debug-type-profile.cc",
"src/debug/debug-type-profile.h",
- "src/debug/debug-wasm-objects-inl.h",
- "src/debug/debug-wasm-objects.cc",
- "src/debug/debug-wasm-objects.h",
- "src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/interface-types.h",
- "src/debug/liveedit.cc",
"src/debug/liveedit.h",
- "src/deoptimizer/deoptimize-reason.cc",
"src/deoptimizer/deoptimize-reason.h",
- "src/deoptimizer/deoptimized-frame-info.cc",
"src/deoptimizer/deoptimized-frame-info.h",
- "src/deoptimizer/deoptimizer.cc",
"src/deoptimizer/deoptimizer.h",
"src/deoptimizer/frame-description.h",
- "src/deoptimizer/materialized-object-store.cc",
"src/deoptimizer/materialized-object-store.h",
- "src/deoptimizer/translated-state.cc",
"src/deoptimizer/translated-state.h",
- "src/deoptimizer/translation-array.cc",
"src/deoptimizer/translation-array.h",
"src/deoptimizer/translation-opcode.h",
- "src/diagnostics/basic-block-profiler.cc",
"src/diagnostics/basic-block-profiler.h",
"src/diagnostics/code-tracer.h",
- "src/diagnostics/compilation-statistics.cc",
"src/diagnostics/compilation-statistics.h",
"src/diagnostics/disasm.h",
- "src/diagnostics/disassembler.cc",
"src/diagnostics/disassembler.h",
- "src/diagnostics/eh-frame.cc",
"src/diagnostics/eh-frame.h",
- "src/diagnostics/gdb-jit.cc",
"src/diagnostics/gdb-jit.h",
- "src/diagnostics/objects-debug.cc",
- "src/diagnostics/objects-printer.cc",
- "src/diagnostics/perf-jit.cc",
"src/diagnostics/perf-jit.h",
- "src/diagnostics/unwinder.cc",
"src/diagnostics/unwinder.h",
"src/execution/arguments-inl.h",
- "src/execution/arguments.cc",
"src/execution/arguments.h",
- "src/execution/execution.cc",
"src/execution/execution.h",
- "src/execution/external-pointer-table.cc",
"src/execution/external-pointer-table.h",
"src/execution/frame-constants.h",
"src/execution/frames-inl.h",
- "src/execution/frames.cc",
"src/execution/frames.h",
- "src/execution/futex-emulation.cc",
"src/execution/futex-emulation.h",
- "src/execution/interrupts-scope.cc",
"src/execution/interrupts-scope.h",
"src/execution/isolate-data.h",
"src/execution/isolate-inl.h",
"src/execution/isolate-utils.h",
- "src/execution/isolate.cc",
"src/execution/isolate.h",
"src/execution/local-isolate-inl.h",
- "src/execution/local-isolate.cc",
"src/execution/local-isolate.h",
- "src/execution/messages.cc",
"src/execution/messages.h",
- "src/execution/microtask-queue.cc",
"src/execution/microtask-queue.h",
"src/execution/pointer-authentication.h",
"src/execution/protectors-inl.h",
- "src/execution/protectors.cc",
"src/execution/protectors.h",
- "src/execution/runtime-profiler.cc",
"src/execution/runtime-profiler.h",
"src/execution/shared-mutex-guard-if-off-thread.h",
- "src/execution/simulator-base.cc",
"src/execution/simulator-base.h",
"src/execution/simulator.h",
- "src/execution/stack-guard.cc",
"src/execution/stack-guard.h",
- "src/execution/thread-id.cc",
"src/execution/thread-id.h",
- "src/execution/thread-local-top.cc",
"src/execution/thread-local-top.h",
- "src/execution/v8threads.cc",
"src/execution/v8threads.h",
"src/execution/vm-state-inl.h",
"src/execution/vm-state.h",
- "src/extensions/cputracemark-extension.cc",
"src/extensions/cputracemark-extension.h",
- "src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
- "src/extensions/gc-extension.cc",
"src/extensions/gc-extension.h",
- "src/extensions/ignition-statistics-extension.cc",
"src/extensions/ignition-statistics-extension.h",
- "src/extensions/statistics-extension.cc",
"src/extensions/statistics-extension.h",
- "src/extensions/trigger-failure-extension.cc",
"src/extensions/trigger-failure-extension.h",
- "src/flags/flag-definitions.h",
- "src/flags/flags.cc",
- "src/flags/flags.h",
- "src/handles/global-handles.cc",
"src/handles/global-handles.h",
"src/handles/handles-inl.h",
- "src/handles/handles.cc",
"src/handles/handles.h",
"src/handles/local-handles-inl.h",
- "src/handles/local-handles.cc",
"src/handles/local-handles.h",
"src/handles/maybe-handles-inl.h",
"src/handles/maybe-handles.h",
- "src/handles/persistent-handles.cc",
"src/handles/persistent-handles.h",
- "src/heap/allocation-observer.cc",
"src/heap/allocation-observer.h",
"src/heap/allocation-stats.h",
- "src/heap/array-buffer-sweeper.cc",
"src/heap/array-buffer-sweeper.h",
"src/heap/barrier.h",
- "src/heap/base-space.cc",
"src/heap/base-space.h",
- "src/heap/basic-memory-chunk.cc",
"src/heap/basic-memory-chunk.h",
- "src/heap/code-object-registry.cc",
"src/heap/code-object-registry.h",
- "src/heap/code-stats.cc",
"src/heap/code-stats.h",
- "src/heap/collection-barrier.cc",
"src/heap/collection-barrier.h",
- "src/heap/combined-heap.cc",
"src/heap/combined-heap.h",
"src/heap/concurrent-allocator-inl.h",
- "src/heap/concurrent-allocator.cc",
"src/heap/concurrent-allocator.h",
- "src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
- "src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-heap.h",
- "src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
- "src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
- "src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
- "src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
- "src/heap/factory-base.cc",
"src/heap/factory-base.h",
"src/heap/factory-inl.h",
- "src/heap/factory.cc",
"src/heap/factory.h",
- "src/heap/finalization-registry-cleanup-task.cc",
"src/heap/finalization-registry-cleanup-task.h",
"src/heap/free-list-inl.h",
- "src/heap/free-list.cc",
"src/heap/free-list.h",
- "src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
- "src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h",
- "src/heap/heap-controller.cc",
"src/heap/heap-controller.h",
"src/heap/heap-inl.h",
"src/heap/heap-write-barrier-inl.h",
- "src/heap/heap-write-barrier.cc",
"src/heap/heap-write-barrier.h",
- "src/heap/heap.cc",
"src/heap/heap.h",
"src/heap/incremental-marking-inl.h",
- "src/heap/incremental-marking-job.cc",
"src/heap/incremental-marking-job.h",
- "src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
- "src/heap/index-generator.cc",
"src/heap/index-generator.h",
"src/heap/invalidated-slots-inl.h",
- "src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
- "src/heap/item-parallel-job.cc",
- "src/heap/item-parallel-job.h",
- "src/heap/large-spaces.cc",
"src/heap/large-spaces.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
- "src/heap/local-factory.cc",
"src/heap/local-factory.h",
"src/heap/local-heap-inl.h",
- "src/heap/local-heap.cc",
"src/heap/local-heap.h",
"src/heap/mark-compact-inl.h",
- "src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
- "src/heap/marking-barrier.cc",
"src/heap/marking-barrier.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking-worklist-inl.h",
- "src/heap/marking-worklist.cc",
"src/heap/marking-worklist.h",
- "src/heap/marking.cc",
"src/heap/marking.h",
- "src/heap/memory-allocator.cc",
"src/heap/memory-allocator.h",
"src/heap/memory-chunk-inl.h",
- "src/heap/memory-chunk-layout.cc",
"src/heap/memory-chunk-layout.h",
- "src/heap/memory-chunk.cc",
"src/heap/memory-chunk.h",
"src/heap/memory-measurement-inl.h",
- "src/heap/memory-measurement.cc",
"src/heap/memory-measurement.h",
- "src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/new-spaces-inl.h",
- "src/heap/new-spaces.cc",
"src/heap/new-spaces.h",
- "src/heap/object-stats.cc",
"src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
- "src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/paged-spaces-inl.h",
- "src/heap/paged-spaces.cc",
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/parked-scope.h",
"src/heap/read-only-heap-inl.h",
- "src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
- "src/heap/read-only-spaces.cc",
"src/heap/read-only-spaces.h",
"src/heap/remembered-set-inl.h",
"src/heap/remembered-set.h",
- "src/heap/safepoint.cc",
"src/heap/safepoint.h",
- "src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
"src/heap/scavenger-inl.h",
- "src/heap/scavenger.cc",
"src/heap/scavenger.h",
- "src/heap/slot-set.cc",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
- "src/heap/spaces.cc",
"src/heap/spaces.h",
- "src/heap/stress-marking-observer.cc",
"src/heap/stress-marking-observer.h",
- "src/heap/stress-scavenge-observer.cc",
"src/heap/stress-scavenge-observer.h",
- "src/heap/sweeper.cc",
"src/heap/sweeper.h",
- "src/heap/weak-object-worklists.cc",
"src/heap/weak-object-worklists.h",
"src/heap/worklist.h",
- "src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-configuration-inl.h",
- "src/ic/handler-configuration.cc",
"src/ic/handler-configuration.h",
"src/ic/ic-inl.h",
- "src/ic/ic-stats.cc",
"src/ic/ic-stats.h",
- "src/ic/ic.cc",
"src/ic/ic.h",
- "src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
- "src/init/bootstrapper.cc",
"src/init/bootstrapper.h",
"src/init/heap-symbols.h",
- "src/init/icu_util.cc",
"src/init/icu_util.h",
- "src/init/isolate-allocator.cc",
"src/init/isolate-allocator.h",
"src/init/setup-isolate.h",
- "src/init/startup-data-util.cc",
"src/init/startup-data-util.h",
- "src/init/v8.cc",
"src/init/v8.h",
"src/interpreter/block-coverage-builder.h",
- "src/interpreter/bytecode-array-accessor.cc",
- "src/interpreter/bytecode-array-accessor.h",
- "src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
- "src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-iterator.h",
- "src/interpreter/bytecode-array-random-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.h",
- "src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h",
- "src/interpreter/bytecode-decoder.cc",
"src/interpreter/bytecode-decoder.h",
- "src/interpreter/bytecode-flags.cc",
"src/interpreter/bytecode-flags.h",
- "src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
"src/interpreter/bytecode-jump-table.h",
- "src/interpreter/bytecode-label.cc",
"src/interpreter/bytecode-label.h",
- "src/interpreter/bytecode-node.cc",
"src/interpreter/bytecode-node.h",
- "src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-register-allocator.h",
- "src/interpreter/bytecode-register-optimizer.cc",
"src/interpreter/bytecode-register-optimizer.h",
- "src/interpreter/bytecode-register.cc",
"src/interpreter/bytecode-register.h",
- "src/interpreter/bytecode-source-info.cc",
"src/interpreter/bytecode-source-info.h",
"src/interpreter/bytecode-traits.h",
- "src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
- "src/interpreter/constant-array-builder.cc",
"src/interpreter/constant-array-builder.h",
- "src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
- "src/interpreter/handler-table-builder.cc",
"src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter-generator.h",
- "src/interpreter/interpreter-intrinsics.cc",
"src/interpreter/interpreter-intrinsics.h",
- "src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
- "src/json/json-parser.cc",
"src/json/json-parser.h",
- "src/json/json-stringifier.cc",
"src/json/json-stringifier.h",
+ "src/libsampler/sampler.h",
"src/logging/code-events.h",
"src/logging/counters-definitions.h",
"src/logging/counters-inl.h",
- "src/logging/counters.cc",
"src/logging/counters.h",
- "src/logging/local-logger.cc",
"src/logging/local-logger.h",
"src/logging/log-inl.h",
- "src/logging/log-utils.cc",
"src/logging/log-utils.h",
- "src/logging/log.cc",
"src/logging/log.h",
- "src/logging/metrics.cc",
"src/logging/metrics.h",
- "src/logging/tracing-flags.cc",
"src/logging/tracing-flags.h",
- "src/numbers/bignum-dtoa.cc",
"src/numbers/bignum-dtoa.h",
- "src/numbers/bignum.cc",
"src/numbers/bignum.h",
- "src/numbers/cached-powers.cc",
"src/numbers/cached-powers.h",
"src/numbers/conversions-inl.h",
- "src/numbers/conversions.cc",
"src/numbers/conversions.h",
- "src/numbers/diy-fp.cc",
"src/numbers/diy-fp.h",
"src/numbers/double.h",
- "src/numbers/dtoa.cc",
"src/numbers/dtoa.h",
- "src/numbers/fast-dtoa.cc",
"src/numbers/fast-dtoa.h",
- "src/numbers/fixed-dtoa.cc",
"src/numbers/fixed-dtoa.h",
"src/numbers/hash-seed-inl.h",
- "src/numbers/math-random.cc",
"src/numbers/math-random.h",
- "src/numbers/strtod.cc",
"src/numbers/strtod.h",
"src/objects/all-objects-inl.h",
"src/objects/allocation-site-inl.h",
@@ -3066,53 +2709,41 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/api-callbacks.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
- "src/objects/backing-store.cc",
"src/objects/backing-store.h",
"src/objects/bigint-inl.h",
- "src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/cell-inl.h",
"src/objects/cell.h",
"src/objects/code-inl.h",
- "src/objects/code-kind.cc",
"src/objects/code-kind.h",
- "src/objects/code.cc",
"src/objects/code.h",
"src/objects/compilation-cache-table-inl.h",
- "src/objects/compilation-cache-table.cc",
"src/objects/compilation-cache-table.h",
"src/objects/compressed-slots-inl.h",
"src/objects/compressed-slots.h",
"src/objects/contexts-inl.h",
- "src/objects/contexts.cc",
"src/objects/contexts.h",
"src/objects/data-handler-inl.h",
"src/objects/data-handler.h",
"src/objects/debug-objects-inl.h",
- "src/objects/debug-objects.cc",
"src/objects/debug-objects.h",
"src/objects/descriptor-array-inl.h",
"src/objects/descriptor-array.h",
"src/objects/dictionary-inl.h",
"src/objects/dictionary.h",
"src/objects/elements-inl.h",
- "src/objects/elements-kind.cc",
"src/objects/elements-kind.h",
- "src/objects/elements.cc",
"src/objects/elements.h",
"src/objects/embedder-data-array-inl.h",
- "src/objects/embedder-data-array.cc",
"src/objects/embedder-data-array.h",
"src/objects/embedder-data-slot-inl.h",
"src/objects/embedder-data-slot.h",
"src/objects/feedback-cell-inl.h",
"src/objects/feedback-cell.h",
"src/objects/feedback-vector-inl.h",
- "src/objects/feedback-vector.cc",
"src/objects/feedback-vector.h",
"src/objects/field-index-inl.h",
"src/objects/field-index.h",
- "src/objects/field-type.cc",
"src/objects/field-type.h",
"src/objects/fixed-array-inl.h",
"src/objects/fixed-array.h",
@@ -3130,48 +2761,19 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/instance-type-inl.h",
"src/objects/instance-type.h",
"src/objects/internal-index.h",
- "src/objects/intl-objects.cc",
- "src/objects/intl-objects.h",
"src/objects/js-array-buffer-inl.h",
- "src/objects/js-array-buffer.cc",
"src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
- "src/objects/js-break-iterator-inl.h",
- "src/objects/js-break-iterator.cc",
- "src/objects/js-break-iterator.h",
- "src/objects/js-collator-inl.h",
- "src/objects/js-collator.cc",
- "src/objects/js-collator.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection-iterator.h",
"src/objects/js-collection.h",
- "src/objects/js-date-time-format-inl.h",
- "src/objects/js-date-time-format.cc",
- "src/objects/js-date-time-format.h",
- "src/objects/js-display-names-inl.h",
- "src/objects/js-display-names.cc",
- "src/objects/js-display-names.h",
"src/objects/js-function-inl.h",
- "src/objects/js-function.cc",
"src/objects/js-function.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
- "src/objects/js-list-format-inl.h",
- "src/objects/js-list-format.cc",
- "src/objects/js-list-format.h",
- "src/objects/js-locale-inl.h",
- "src/objects/js-locale.cc",
- "src/objects/js-locale.h",
- "src/objects/js-number-format-inl.h",
- "src/objects/js-number-format.cc",
- "src/objects/js-number-format.h",
"src/objects/js-objects-inl.h",
- "src/objects/js-objects.cc",
"src/objects/js-objects.h",
- "src/objects/js-plural-rules-inl.h",
- "src/objects/js-plural-rules.cc",
- "src/objects/js-plural-rules.h",
"src/objects/js-promise-inl.h",
"src/objects/js-promise.h",
"src/objects/js-proxy-inl.h",
@@ -3179,46 +2781,25 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
- "src/objects/js-regexp.cc",
"src/objects/js-regexp.h",
- "src/objects/js-relative-time-format-inl.h",
- "src/objects/js-relative-time-format.cc",
- "src/objects/js-relative-time-format.h",
- "src/objects/js-segment-iterator-inl.h",
- "src/objects/js-segment-iterator.cc",
- "src/objects/js-segment-iterator.h",
- "src/objects/js-segmenter-inl.h",
- "src/objects/js-segmenter.cc",
- "src/objects/js-segmenter.h",
- "src/objects/js-segments-inl.h",
- "src/objects/js-segments.cc",
- "src/objects/js-segments.h",
"src/objects/js-weak-refs-inl.h",
"src/objects/js-weak-refs.h",
- "src/objects/keys.cc",
"src/objects/keys.h",
"src/objects/literal-objects-inl.h",
- "src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
"src/objects/lookup-cache-inl.h",
- "src/objects/lookup-cache.cc",
"src/objects/lookup-cache.h",
"src/objects/lookup-inl.h",
- "src/objects/lookup.cc",
"src/objects/lookup.h",
- "src/objects/managed.cc",
"src/objects/managed.h",
"src/objects/map-inl.h",
- "src/objects/map-updater.cc",
"src/objects/map-updater.h",
- "src/objects/map.cc",
"src/objects/map.h",
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
"src/objects/microtask-inl.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
- "src/objects/module.cc",
"src/objects/module.h",
"src/objects/name-inl.h",
"src/objects/name.h",
@@ -3228,15 +2809,12 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/objects-body-descriptors-inl.h",
"src/objects/objects-body-descriptors.h",
"src/objects/objects-inl.h",
- "src/objects/objects.cc",
"src/objects/objects.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
"src/objects/ordered-hash-table-inl.h",
- "src/objects/ordered-hash-table.cc",
"src/objects/ordered-hash-table.h",
"src/objects/osr-optimized-code-cache-inl.h",
- "src/objects/osr-optimized-code-cache.cc",
"src/objects/osr-optimized-code-cache.h",
"src/objects/primitive-heap-object-inl.h",
"src/objects/primitive-heap-object.h",
@@ -3248,399 +2826,183 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/property-cell.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
- "src/objects/property-descriptor.cc",
"src/objects/property-descriptor.h",
"src/objects/property-details.h",
- "src/objects/property.cc",
"src/objects/property.h",
"src/objects/prototype-info-inl.h",
"src/objects/prototype-info.h",
"src/objects/prototype.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info-inl.h",
- "src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/objects/script-inl.h",
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
- "src/objects/shared-function-info.cc",
"src/objects/shared-function-info.h",
"src/objects/slots-atomic-inl.h",
"src/objects/slots-inl.h",
"src/objects/slots.h",
- "src/objects/source-text-module.cc",
"src/objects/source-text-module.h",
"src/objects/stack-frame-info-inl.h",
- "src/objects/stack-frame-info.cc",
"src/objects/stack-frame-info.h",
- "src/objects/string-comparator.cc",
"src/objects/string-comparator.h",
"src/objects/string-inl.h",
"src/objects/string-set-inl.h",
"src/objects/string-set.h",
"src/objects/string-table-inl.h",
- "src/objects/string-table.cc",
"src/objects/string-table.h",
- "src/objects/string.cc",
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
"src/objects/swiss-hash-table-helpers.h",
"src/objects/swiss-name-dictionary-inl.h",
- "src/objects/swiss-name-dictionary.cc",
"src/objects/swiss-name-dictionary.h",
"src/objects/synthetic-module-inl.h",
- "src/objects/synthetic-module.cc",
"src/objects/synthetic-module.h",
"src/objects/tagged-field-inl.h",
"src/objects/tagged-field.h",
"src/objects/tagged-impl-inl.h",
- "src/objects/tagged-impl.cc",
"src/objects/tagged-impl.h",
"src/objects/tagged-index.h",
"src/objects/tagged-value-inl.h",
"src/objects/tagged-value.h",
"src/objects/template-objects-inl.h",
- "src/objects/template-objects.cc",
"src/objects/template-objects.h",
"src/objects/templates-inl.h",
"src/objects/templates.h",
"src/objects/torque-defined-classes-inl.h",
"src/objects/torque-defined-classes.h",
"src/objects/transitions-inl.h",
- "src/objects/transitions.cc",
"src/objects/transitions.h",
- "src/objects/type-hints.cc",
"src/objects/type-hints.h",
- "src/objects/value-serializer.cc",
"src/objects/value-serializer.h",
- "src/objects/visitors.cc",
"src/objects/visitors.h",
"src/parsing/expression-scope.h",
- "src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
- "src/parsing/import-assertions.cc",
"src/parsing/import-assertions.h",
- "src/parsing/literal-buffer.cc",
"src/parsing/literal-buffer.h",
- "src/parsing/parse-info.cc",
"src/parsing/parse-info.h",
"src/parsing/parser-base.h",
- "src/parsing/parser.cc",
"src/parsing/parser.h",
- "src/parsing/parsing.cc",
"src/parsing/parsing.h",
- "src/parsing/pending-compilation-error-handler.cc",
"src/parsing/pending-compilation-error-handler.h",
"src/parsing/preparse-data-impl.h",
- "src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
"src/parsing/preparser-logger.h",
- "src/parsing/preparser.cc",
"src/parsing/preparser.h",
- "src/parsing/rewriter.cc",
"src/parsing/rewriter.h",
- "src/parsing/scanner-character-streams.cc",
"src/parsing/scanner-character-streams.h",
- "src/parsing/scanner.cc",
"src/parsing/scanner.h",
- "src/parsing/token.cc",
"src/parsing/token.h",
- "src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
"src/profiler/circular-queue.h",
"src/profiler/cpu-profiler-inl.h",
- "src/profiler/cpu-profiler.cc",
"src/profiler/cpu-profiler.h",
- "src/profiler/heap-profiler.cc",
"src/profiler/heap-profiler.h",
"src/profiler/heap-snapshot-generator-inl.h",
- "src/profiler/heap-snapshot-generator.cc",
"src/profiler/heap-snapshot-generator.h",
"src/profiler/profile-generator-inl.h",
- "src/profiler/profile-generator.cc",
"src/profiler/profile-generator.h",
- "src/profiler/profiler-listener.cc",
"src/profiler/profiler-listener.h",
- "src/profiler/profiler-stats.cc",
"src/profiler/profiler-stats.h",
- "src/profiler/sampling-heap-profiler.cc",
"src/profiler/sampling-heap-profiler.h",
- "src/profiler/strings-storage.cc",
"src/profiler/strings-storage.h",
- "src/profiler/symbolizer.cc",
"src/profiler/symbolizer.h",
- "src/profiler/tick-sample.cc",
"src/profiler/tick-sample.h",
- "src/profiler/tracing-cpu-profiler.cc",
"src/profiler/tracing-cpu-profiler.h",
- "src/regexp/experimental/experimental-bytecode.cc",
+ "src/profiler/weak-code-registry.h",
"src/regexp/experimental/experimental-bytecode.h",
- "src/regexp/experimental/experimental-compiler.cc",
"src/regexp/experimental/experimental-compiler.h",
- "src/regexp/experimental/experimental-interpreter.cc",
"src/regexp/experimental/experimental-interpreter.h",
- "src/regexp/experimental/experimental.cc",
"src/regexp/experimental/experimental.h",
- "src/regexp/property-sequences.cc",
"src/regexp/property-sequences.h",
- "src/regexp/regexp-ast.cc",
"src/regexp/regexp-ast.h",
"src/regexp/regexp-bytecode-generator-inl.h",
- "src/regexp/regexp-bytecode-generator.cc",
"src/regexp/regexp-bytecode-generator.h",
- "src/regexp/regexp-bytecode-peephole.cc",
"src/regexp/regexp-bytecode-peephole.h",
- "src/regexp/regexp-bytecodes.cc",
"src/regexp/regexp-bytecodes.h",
- "src/regexp/regexp-compiler-tonode.cc",
- "src/regexp/regexp-compiler.cc",
"src/regexp/regexp-compiler.h",
- "src/regexp/regexp-dotprinter.cc",
"src/regexp/regexp-dotprinter.h",
- "src/regexp/regexp-error.cc",
"src/regexp/regexp-error.h",
- "src/regexp/regexp-interpreter.cc",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
- "src/regexp/regexp-macro-assembler-tracer.cc",
"src/regexp/regexp-macro-assembler-tracer.h",
- "src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
"src/regexp/regexp-nodes.h",
- "src/regexp/regexp-parser.cc",
"src/regexp/regexp-parser.h",
- "src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
- "src/regexp/regexp-utils.cc",
"src/regexp/regexp-utils.h",
- "src/regexp/regexp.cc",
"src/regexp/regexp.h",
"src/regexp/special-case.h",
"src/roots/roots-inl.h",
- "src/roots/roots.cc",
"src/roots/roots.h",
- "src/runtime/runtime-array.cc",
- "src/runtime/runtime-atomics.cc",
- "src/runtime/runtime-bigint.cc",
- "src/runtime/runtime-classes.cc",
- "src/runtime/runtime-collections.cc",
- "src/runtime/runtime-compiler.cc",
- "src/runtime/runtime-date.cc",
- "src/runtime/runtime-debug.cc",
- "src/runtime/runtime-forin.cc",
- "src/runtime/runtime-function.cc",
- "src/runtime/runtime-futex.cc",
- "src/runtime/runtime-generator.cc",
- "src/runtime/runtime-internal.cc",
- "src/runtime/runtime-intl.cc",
- "src/runtime/runtime-literals.cc",
- "src/runtime/runtime-module.cc",
- "src/runtime/runtime-numbers.cc",
- "src/runtime/runtime-object.cc",
- "src/runtime/runtime-operators.cc",
- "src/runtime/runtime-promise.cc",
- "src/runtime/runtime-proxy.cc",
- "src/runtime/runtime-regexp.cc",
- "src/runtime/runtime-scopes.cc",
- "src/runtime/runtime-strings.cc",
- "src/runtime/runtime-symbol.cc",
- "src/runtime/runtime-test.cc",
- "src/runtime/runtime-trace.cc",
- "src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.h",
- "src/runtime/runtime-wasm.cc",
- "src/runtime/runtime-weak-refs.cc",
- "src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/sanitizer/asan.h",
- "src/sanitizer/lsan-page-allocator.cc",
"src/sanitizer/lsan-page-allocator.h",
"src/sanitizer/msan.h",
"src/sanitizer/tsan.h",
- "src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
- "src/snapshot/context-deserializer.cc",
"src/snapshot/context-deserializer.h",
- "src/snapshot/context-serializer.cc",
"src/snapshot/context-serializer.h",
- "src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
- "src/snapshot/embedded/embedded-data.cc",
"src/snapshot/embedded/embedded-data.h",
- "src/snapshot/object-deserializer.cc",
+ "src/snapshot/embedded/embedded-file-writer-interface.h",
"src/snapshot/object-deserializer.h",
- "src/snapshot/read-only-deserializer.cc",
"src/snapshot/read-only-deserializer.h",
- "src/snapshot/read-only-serializer.cc",
"src/snapshot/read-only-serializer.h",
"src/snapshot/references.h",
- "src/snapshot/roots-serializer.cc",
"src/snapshot/roots-serializer.h",
- "src/snapshot/serializer-deserializer.cc",
"src/snapshot/serializer-deserializer.h",
- "src/snapshot/serializer.cc",
"src/snapshot/serializer.h",
- "src/snapshot/snapshot-compression.cc",
"src/snapshot/snapshot-compression.h",
- "src/snapshot/snapshot-data.cc",
"src/snapshot/snapshot-data.h",
- "src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
- "src/snapshot/snapshot-utils.cc",
"src/snapshot/snapshot-utils.h",
- "src/snapshot/snapshot.cc",
"src/snapshot/snapshot.h",
- "src/snapshot/startup-deserializer.cc",
"src/snapshot/startup-deserializer.h",
- "src/snapshot/startup-serializer.cc",
"src/snapshot/startup-serializer.h",
"src/strings/char-predicates-inl.h",
- "src/strings/char-predicates.cc",
"src/strings/char-predicates.h",
"src/strings/string-builder-inl.h",
- "src/strings/string-builder.cc",
- "src/strings/string-case.cc",
"src/strings/string-case.h",
"src/strings/string-hasher-inl.h",
"src/strings/string-hasher.h",
"src/strings/string-search.h",
- "src/strings/string-stream.cc",
"src/strings/string-stream.h",
- "src/strings/unicode-decoder.cc",
"src/strings/unicode-decoder.h",
"src/strings/unicode-inl.h",
- "src/strings/unicode.cc",
"src/strings/unicode.h",
- "src/strings/uri.cc",
"src/strings/uri.h",
- "src/tasks/cancelable-task.cc",
"src/tasks/cancelable-task.h",
- "src/tasks/operations-barrier.cc",
"src/tasks/operations-barrier.h",
- "src/tasks/task-utils.cc",
"src/tasks/task-utils.h",
- "src/third_party/siphash/halfsiphash.cc",
"src/third_party/siphash/halfsiphash.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
- "src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
- "src/tracing/traced-value.cc",
"src/tracing/traced-value.h",
- "src/tracing/tracing-category-observer.cc",
"src/tracing/tracing-category-observer.h",
- "src/trap-handler/handler-inside.cc",
- "src/trap-handler/handler-outside.cc",
- "src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
"src/trap-handler/trap-handler.h",
- "src/utils/address-map.cc",
"src/utils/address-map.h",
- "src/utils/allocation.cc",
"src/utils/allocation.h",
- "src/utils/bit-vector.cc",
"src/utils/bit-vector.h",
"src/utils/boxed-float.h",
- "src/utils/detachable-vector.cc",
"src/utils/detachable-vector.h",
- "src/utils/identity-map.cc",
"src/utils/identity-map.h",
"src/utils/locked-queue-inl.h",
"src/utils/locked-queue.h",
- "src/utils/memcopy.cc",
"src/utils/memcopy.h",
- "src/utils/ostreams.cc",
"src/utils/ostreams.h",
"src/utils/pointer-with-payload.h",
"src/utils/scoped-list.h",
"src/utils/utils-inl.h",
- "src/utils/utils.cc",
"src/utils/utils.h",
"src/utils/vector.h",
- "src/utils/version.cc",
"src/utils/version.h",
- "src/wasm/baseline/liftoff-assembler-defs.h",
- "src/wasm/baseline/liftoff-assembler.cc",
- "src/wasm/baseline/liftoff-assembler.h",
- "src/wasm/baseline/liftoff-compiler.cc",
- "src/wasm/baseline/liftoff-compiler.h",
- "src/wasm/baseline/liftoff-register.h",
- "src/wasm/code-space-access.h",
- "src/wasm/compilation-environment.h",
- "src/wasm/decoder.h",
- "src/wasm/function-body-decoder-impl.h",
- "src/wasm/function-body-decoder.cc",
- "src/wasm/function-body-decoder.h",
- "src/wasm/function-compiler.cc",
- "src/wasm/function-compiler.h",
- "src/wasm/graph-builder-interface.cc",
- "src/wasm/graph-builder-interface.h",
- "src/wasm/jump-table-assembler.cc",
- "src/wasm/jump-table-assembler.h",
- "src/wasm/leb-helper.h",
- "src/wasm/local-decl-encoder.cc",
- "src/wasm/local-decl-encoder.h",
- "src/wasm/memory-tracing.cc",
- "src/wasm/memory-tracing.h",
- "src/wasm/module-compiler.cc",
- "src/wasm/module-compiler.h",
- "src/wasm/module-decoder.cc",
- "src/wasm/module-decoder.h",
- "src/wasm/module-instantiate.cc",
- "src/wasm/module-instantiate.h",
- "src/wasm/object-access.h",
- "src/wasm/signature-map.cc",
- "src/wasm/signature-map.h",
- "src/wasm/simd-shuffle.cc",
- "src/wasm/simd-shuffle.h",
- "src/wasm/streaming-decoder.cc",
- "src/wasm/streaming-decoder.h",
- "src/wasm/struct-types.h",
- "src/wasm/sync-streaming-decoder.cc",
- "src/wasm/value-type.cc",
- "src/wasm/value-type.h",
- "src/wasm/wasm-arguments.h",
- "src/wasm/wasm-code-manager.cc",
- "src/wasm/wasm-code-manager.h",
- "src/wasm/wasm-constants.h",
- "src/wasm/wasm-debug.cc",
- "src/wasm/wasm-engine.cc",
- "src/wasm/wasm-engine.h",
- "src/wasm/wasm-external-refs.cc",
- "src/wasm/wasm-external-refs.h",
- "src/wasm/wasm-feature-flags.h",
- "src/wasm/wasm-features.cc",
- "src/wasm/wasm-features.h",
- "src/wasm/wasm-import-wrapper-cache.cc",
- "src/wasm/wasm-import-wrapper-cache.h",
- "src/wasm/wasm-js.cc",
- "src/wasm/wasm-js.h",
- "src/wasm/wasm-limits.h",
- "src/wasm/wasm-linkage.h",
- "src/wasm/wasm-module-builder.cc",
- "src/wasm/wasm-module-builder.h",
- "src/wasm/wasm-module-sourcemap.cc",
- "src/wasm/wasm-module-sourcemap.h",
- "src/wasm/wasm-module.cc",
- "src/wasm/wasm-module.h",
- "src/wasm/wasm-objects-inl.h",
- "src/wasm/wasm-objects.cc",
- "src/wasm/wasm-objects.h",
- "src/wasm/wasm-opcodes.cc",
- "src/wasm/wasm-opcodes.h",
- "src/wasm/wasm-result.cc",
- "src/wasm/wasm-result.h",
- "src/wasm/wasm-serialization.cc",
- "src/wasm/wasm-serialization.h",
- "src/wasm/wasm-subtyping.cc",
- "src/wasm/wasm-subtyping.h",
- "src/wasm/wasm-tier.h",
- "src/wasm/wasm-value.h",
- "src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/compressed-zone-ptr.h",
- "src/zone/type-stats.cc",
"src/zone/type-stats.h",
"src/zone/zone-allocator.h",
"src/zone/zone-chunk-list.h",
@@ -3650,25 +3012,100 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone-hashmap.h",
"src/zone/zone-list-inl.h",
"src/zone/zone-list.h",
- "src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
"src/zone/zone-type-traits.h",
"src/zone/zone-utils.h",
- "src/zone/zone.cc",
"src/zone/zone.h",
]
+ if (v8_use_perfetto) {
+ sources -= [ "//base/trace_event/common/trace_event_common.h" ]
+ }
+
if (v8_enable_webassembly) {
sources += [
- "src/asmjs/asm-js.cc",
"src/asmjs/asm-js.h",
- "src/asmjs/asm-names.h",
- "src/asmjs/asm-parser.cc",
"src/asmjs/asm-parser.h",
- "src/asmjs/asm-scanner.cc",
"src/asmjs/asm-scanner.h",
- "src/asmjs/asm-types.cc",
"src/asmjs/asm-types.h",
+ "src/compiler/int64-lowering.h",
+ "src/compiler/wasm-compiler.h",
+ "src/debug/debug-wasm-objects-inl.h",
+ "src/debug/debug-wasm-objects.h",
+ "src/wasm/baseline/liftoff-assembler-defs.h",
+ "src/wasm/baseline/liftoff-assembler.h",
+ "src/wasm/baseline/liftoff-compiler.h",
+ "src/wasm/baseline/liftoff-register.h",
+ "src/wasm/code-space-access.h",
+ "src/wasm/compilation-environment.h",
+ "src/wasm/decoder.h",
+ "src/wasm/function-body-decoder-impl.h",
+ "src/wasm/function-body-decoder.h",
+ "src/wasm/function-compiler.h",
+ "src/wasm/graph-builder-interface.h",
+ "src/wasm/jump-table-assembler.h",
+ "src/wasm/leb-helper.h",
+ "src/wasm/local-decl-encoder.h",
+ "src/wasm/memory-tracing.h",
+ "src/wasm/module-compiler.h",
+ "src/wasm/module-decoder.h",
+ "src/wasm/module-instantiate.h",
+ "src/wasm/object-access.h",
+ "src/wasm/signature-map.h",
+ "src/wasm/simd-shuffle.h",
+ "src/wasm/streaming-decoder.h",
+ "src/wasm/struct-types.h",
+ "src/wasm/value-type.h",
+ "src/wasm/wasm-arguments.h",
+ "src/wasm/wasm-code-manager.h",
+ "src/wasm/wasm-engine.h",
+ "src/wasm/wasm-external-refs.h",
+ "src/wasm/wasm-feature-flags.h",
+ "src/wasm/wasm-features.h",
+ "src/wasm/wasm-import-wrapper-cache.h",
+ "src/wasm/wasm-js.h",
+ "src/wasm/wasm-linkage.h",
+ "src/wasm/wasm-module-builder.h",
+ "src/wasm/wasm-module-sourcemap.h",
+ "src/wasm/wasm-module.h",
+ "src/wasm/wasm-objects-inl.h",
+ "src/wasm/wasm-objects.h",
+ "src/wasm/wasm-opcodes.h",
+ "src/wasm/wasm-result.h",
+ "src/wasm/wasm-serialization.h",
+ "src/wasm/wasm-subtyping.h",
+ "src/wasm/wasm-tier.h",
+ "src/wasm/wasm-value.h",
+ ]
+ }
+
+ if (v8_enable_i18n_support) {
+ sources += [
+ "src/objects/intl-objects.h",
+ "src/objects/js-break-iterator-inl.h",
+ "src/objects/js-break-iterator.h",
+ "src/objects/js-collator-inl.h",
+ "src/objects/js-collator.h",
+ "src/objects/js-date-time-format-inl.h",
+ "src/objects/js-date-time-format.h",
+ "src/objects/js-display-names-inl.h",
+ "src/objects/js-display-names.h",
+ "src/objects/js-list-format-inl.h",
+ "src/objects/js-list-format.h",
+ "src/objects/js-locale-inl.h",
+ "src/objects/js-locale.h",
+ "src/objects/js-number-format-inl.h",
+ "src/objects/js-number-format.h",
+ "src/objects/js-plural-rules-inl.h",
+ "src/objects/js-plural-rules.h",
+ "src/objects/js-relative-time-format-inl.h",
+ "src/objects/js-relative-time-format.h",
+ "src/objects/js-segment-iterator-inl.h",
+ "src/objects/js-segment-iterator.h",
+ "src/objects/js-segmenter-inl.h",
+ "src/objects/js-segmenter.h",
+ "src/objects/js-segments-inl.h",
+ "src/objects/js-segments.h",
]
}
@@ -3676,6 +3113,897 @@ v8_source_set("v8_base_without_compiler") {
sources += [ "src/execution/pointer-authentication-dummy.h" ]
}
+ if (v8_enable_conservative_stack_scanning) {
+ sources += [
+ "src/heap/conservative-stack-visitor.h",
+ "src/heap/object-start-bitmap.h",
+ ]
+ }
+
+ if (v8_enable_wasm_gdb_remote_debugging) {
+ sources += [
+ "src/debug/wasm/gdb-server/gdb-remote-util.h",
+ "src/debug/wasm/gdb-server/gdb-server-thread.h",
+ "src/debug/wasm/gdb-server/gdb-server.h",
+ "src/debug/wasm/gdb-server/packet.h",
+ "src/debug/wasm/gdb-server/session.h",
+ "src/debug/wasm/gdb-server/target.h",
+ "src/debug/wasm/gdb-server/transport.h",
+ "src/debug/wasm/gdb-server/wasm-module-debug.h",
+ ]
+ }
+
+ if (v8_current_cpu == "x86") {
+ sources += [ ### gcmole(arch:ia32) ###
+ "src/baseline/ia32/baseline-assembler-ia32-inl.h",
+ "src/baseline/ia32/baseline-compiler-ia32-inl.h",
+ "src/codegen/ia32/assembler-ia32-inl.h",
+ "src/codegen/ia32/assembler-ia32.h",
+ "src/codegen/ia32/constants-ia32.h",
+ "src/codegen/ia32/macro-assembler-ia32.h",
+ "src/codegen/ia32/register-ia32.h",
+ "src/codegen/ia32/sse-instr.h",
+ "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
+ "src/compiler/backend/ia32/instruction-codes-ia32.h",
+ "src/execution/ia32/frame-constants-ia32.h",
+ "src/regexp/ia32/regexp-macro-assembler-ia32.h",
+ "src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
+ ]
+ } else if (v8_current_cpu == "x64") {
+ sources += [ ### gcmole(arch:x64) ###
+ "src/baseline/x64/baseline-assembler-x64-inl.h",
+ "src/baseline/x64/baseline-compiler-x64-inl.h",
+ "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
+ "src/codegen/x64/assembler-x64-inl.h",
+ "src/codegen/x64/assembler-x64.h",
+ "src/codegen/x64/constants-x64.h",
+ "src/codegen/x64/fma-instr.h",
+ "src/codegen/x64/macro-assembler-x64.h",
+ "src/codegen/x64/register-x64.h",
+ "src/codegen/x64/sse-instr.h",
+ "src/compiler/backend/x64/instruction-codes-x64.h",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.h",
+ "src/execution/x64/frame-constants-x64.h",
+ "src/regexp/x64/regexp-macro-assembler-x64.h",
+ "src/third_party/valgrind/valgrind.h",
+ "src/wasm/baseline/x64/liftoff-assembler-x64.h",
+ ]
+
+ # iOS Xcode simulator builds run on an x64 target. iOS and macOS are both
+ # based on Darwin and thus POSIX-compliant to a similar degree.
+ if (is_linux || is_chromeos || is_mac || is_ios || target_os == "freebsd") {
+ sources += [ "src/trap-handler/handler-inside-posix.h" ]
+ }
+ if (is_win) {
+ sources += [
+ "src/diagnostics/unwinding-info-win64.h",
+ "src/trap-handler/handler-inside-win.h",
+ ]
+ }
+ } else if (v8_current_cpu == "arm") {
+ sources += [ ### gcmole(arch:arm) ###
+ "src/baseline/arm/baseline-assembler-arm-inl.h",
+ "src/baseline/arm/baseline-compiler-arm-inl.h",
+ "src/codegen/arm/assembler-arm-inl.h",
+ "src/codegen/arm/assembler-arm.h",
+ "src/codegen/arm/constants-arm.h",
+ "src/codegen/arm/macro-assembler-arm.h",
+ "src/codegen/arm/register-arm.h",
+ "src/compiler/backend/arm/instruction-codes-arm.h",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.h",
+ "src/execution/arm/frame-constants-arm.h",
+ "src/execution/arm/simulator-arm.h",
+ "src/regexp/arm/regexp-macro-assembler-arm.h",
+ "src/wasm/baseline/arm/liftoff-assembler-arm.h",
+ ]
+ } else if (v8_current_cpu == "arm64") {
+ sources += [ ### gcmole(arch:arm64) ###
+ "src/baseline/arm64/baseline-assembler-arm64-inl.h",
+ "src/baseline/arm64/baseline-compiler-arm64-inl.h",
+ "src/codegen/arm64/assembler-arm64-inl.h",
+ "src/codegen/arm64/assembler-arm64.h",
+ "src/codegen/arm64/constants-arm64.h",
+ "src/codegen/arm64/decoder-arm64-inl.h",
+ "src/codegen/arm64/decoder-arm64.h",
+ "src/codegen/arm64/instructions-arm64.h",
+ "src/codegen/arm64/macro-assembler-arm64-inl.h",
+ "src/codegen/arm64/macro-assembler-arm64.h",
+ "src/codegen/arm64/register-arm64.h",
+ "src/codegen/arm64/utils-arm64.h",
+ "src/compiler/backend/arm64/instruction-codes-arm64.h",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
+ "src/diagnostics/arm64/disasm-arm64.h",
+ "src/execution/arm64/frame-constants-arm64.h",
+ "src/execution/arm64/simulator-arm64.h",
+ "src/regexp/arm64/regexp-macro-assembler-arm64.h",
+ "src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
+ ]
+ if (v8_control_flow_integrity) {
+ sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ]
+ }
+ if (current_cpu == "arm64" && is_mac) {
+ sources += [ "src/trap-handler/handler-inside-posix.h" ]
+ }
+ if (is_win) {
+ sources += [ "src/diagnostics/unwinding-info-win64.h" ]
+ }
+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
+ sources += [ ### gcmole(arch:mipsel) ###
+ "src/baseline/mips/baseline-assembler-mips-inl.h",
+ "src/baseline/mips/baseline-compiler-mips-inl.h",
+ "src/codegen/mips/assembler-mips-inl.h",
+ "src/codegen/mips/assembler-mips.h",
+ "src/codegen/mips/constants-mips.h",
+ "src/codegen/mips/macro-assembler-mips.h",
+ "src/codegen/mips/register-mips.h",
+ "src/compiler/backend/mips/instruction-codes-mips.h",
+ "src/execution/mips/frame-constants-mips.h",
+ "src/execution/mips/simulator-mips.h",
+ "src/regexp/mips/regexp-macro-assembler-mips.h",
+ "src/wasm/baseline/mips/liftoff-assembler-mips.h",
+ ]
+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ sources += [ ### gcmole(arch:mips64el) ###
+ "src/baseline/mips64/baseline-assembler-mips64-inl.h",
+ "src/baseline/mips64/baseline-compiler-mips64-inl.h",
+ "src/codegen/mips64/assembler-mips64-inl.h",
+ "src/codegen/mips64/assembler-mips64.h",
+ "src/codegen/mips64/constants-mips64.h",
+ "src/codegen/mips64/macro-assembler-mips64.h",
+ "src/codegen/mips64/register-mips64.h",
+ "src/compiler/backend/mips64/instruction-codes-mips64.h",
+ "src/execution/mips64/frame-constants-mips64.h",
+ "src/execution/mips64/simulator-mips64.h",
+ "src/regexp/mips64/regexp-macro-assembler-mips64.h",
+ "src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
+ ]
+ } else if (v8_current_cpu == "ppc") {
+ sources += [ ### gcmole(arch:ppc) ###
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc.h",
+ "src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/macro-assembler-ppc.h",
+ "src/codegen/ppc/register-ppc.h",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
+ "src/execution/ppc/frame-constants-ppc.h",
+ "src/execution/ppc/simulator-ppc.h",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
+ ]
+ } else if (v8_current_cpu == "ppc64") {
+ sources += [ ### gcmole(arch:ppc64) ###
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc.h",
+ "src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/macro-assembler-ppc.h",
+ "src/codegen/ppc/register-ppc.h",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
+ "src/execution/ppc/frame-constants-ppc.h",
+ "src/execution/ppc/simulator-ppc.h",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
+ ]
+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ sources += [ ### gcmole(arch:s390) ###
+ "src/baseline/s390/baseline-assembler-s390-inl.h",
+ "src/baseline/s390/baseline-compiler-s390-inl.h",
+ "src/codegen/s390/assembler-s390-inl.h",
+ "src/codegen/s390/assembler-s390.h",
+ "src/codegen/s390/constants-s390.h",
+ "src/codegen/s390/macro-assembler-s390.h",
+ "src/codegen/s390/register-s390.h",
+ "src/compiler/backend/s390/instruction-codes-s390.h",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.h",
+ "src/execution/s390/frame-constants-s390.h",
+ "src/execution/s390/simulator-s390.h",
+ "src/regexp/s390/regexp-macro-assembler-s390.h",
+ "src/wasm/baseline/s390/liftoff-assembler-s390.h",
+ ]
+ } else if (v8_current_cpu == "riscv64") {
+ sources += [ ### gcmole(arch:riscv64) ###
+ "src/codegen/riscv64/assembler-riscv64-inl.h",
+ "src/codegen/riscv64/assembler-riscv64.h",
+ "src/codegen/riscv64/constants-riscv64.h",
+ "src/codegen/riscv64/macro-assembler-riscv64.h",
+ "src/codegen/riscv64/register-riscv64.h",
+ "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
+ "src/execution/riscv64/frame-constants-riscv64.h",
+ "src/execution/riscv64/simulator-riscv64.h",
+ "src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
+ "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
+ ]
+ }
+
+ public_deps = [
+ ":torque_runtime_support",
+ ":v8_flags",
+ ":v8_headers",
+ ":v8_maybe_icu",
+ ":v8_shared_internal_headers",
+ ]
+
+ deps = [
+ ":cppgc_headers",
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ":v8_libbase",
+ ]
+}
+
+v8_compiler_sources = [
+ ### gcmole(all) ###
+ "src/compiler/access-builder.cc",
+ "src/compiler/access-info.cc",
+ "src/compiler/add-type-assertions-reducer.cc",
+ "src/compiler/all-nodes.cc",
+ "src/compiler/backend/code-generator.cc",
+ "src/compiler/backend/frame-elider.cc",
+ "src/compiler/backend/gap-resolver.cc",
+ "src/compiler/backend/instruction-scheduler.cc",
+ "src/compiler/backend/instruction-selector.cc",
+ "src/compiler/backend/instruction.cc",
+ "src/compiler/backend/jump-threading.cc",
+ "src/compiler/backend/mid-tier-register-allocator.cc",
+ "src/compiler/backend/move-optimizer.cc",
+ "src/compiler/backend/register-allocator-verifier.cc",
+ "src/compiler/backend/register-allocator.cc",
+ "src/compiler/backend/spill-placer.cc",
+ "src/compiler/basic-block-instrumentor.cc",
+ "src/compiler/branch-elimination.cc",
+ "src/compiler/bytecode-analysis.cc",
+ "src/compiler/bytecode-graph-builder.cc",
+ "src/compiler/bytecode-liveness-map.cc",
+ "src/compiler/c-linkage.cc",
+ "src/compiler/checkpoint-elimination.cc",
+ "src/compiler/code-assembler.cc",
+ "src/compiler/common-node-cache.cc",
+ "src/compiler/common-operator-reducer.cc",
+ "src/compiler/common-operator.cc",
+ "src/compiler/compilation-dependencies.cc",
+ "src/compiler/compiler-source-position-table.cc",
+ "src/compiler/constant-folding-reducer.cc",
+ "src/compiler/control-equivalence.cc",
+ "src/compiler/control-flow-optimizer.cc",
+ "src/compiler/csa-load-elimination.cc",
+ "src/compiler/dead-code-elimination.cc",
+ "src/compiler/decompression-optimizer.cc",
+ "src/compiler/effect-control-linearizer.cc",
+ "src/compiler/escape-analysis-reducer.cc",
+ "src/compiler/escape-analysis.cc",
+ "src/compiler/feedback-source.cc",
+ "src/compiler/frame-states.cc",
+ "src/compiler/frame.cc",
+ "src/compiler/graph-assembler.cc",
+ "src/compiler/graph-reducer.cc",
+ "src/compiler/graph-trimmer.cc",
+ "src/compiler/graph-visualizer.cc",
+ "src/compiler/graph.cc",
+ "src/compiler/js-call-reducer.cc",
+ "src/compiler/js-context-specialization.cc",
+ "src/compiler/js-create-lowering.cc",
+ "src/compiler/js-generic-lowering.cc",
+ "src/compiler/js-graph.cc",
+ "src/compiler/js-heap-broker.cc",
+ "src/compiler/js-heap-copy-reducer.cc",
+ "src/compiler/js-inlining-heuristic.cc",
+ "src/compiler/js-inlining.cc",
+ "src/compiler/js-intrinsic-lowering.cc",
+ "src/compiler/js-native-context-specialization.cc",
+ "src/compiler/js-operator.cc",
+ "src/compiler/js-type-hint-lowering.cc",
+ "src/compiler/js-typed-lowering.cc",
+ "src/compiler/linkage.cc",
+ "src/compiler/load-elimination.cc",
+ "src/compiler/loop-analysis.cc",
+ "src/compiler/loop-peeling.cc",
+ "src/compiler/loop-unrolling.cc",
+ "src/compiler/loop-variable-optimizer.cc",
+ "src/compiler/machine-graph-verifier.cc",
+ "src/compiler/machine-graph.cc",
+ "src/compiler/machine-operator-reducer.cc",
+ "src/compiler/machine-operator.cc",
+ "src/compiler/map-inference.cc",
+ "src/compiler/memory-lowering.cc",
+ "src/compiler/memory-optimizer.cc",
+ "src/compiler/node-marker.cc",
+ "src/compiler/node-matchers.cc",
+ "src/compiler/node-observer.cc",
+ "src/compiler/node-origin-table.cc",
+ "src/compiler/node-properties.cc",
+ "src/compiler/node.cc",
+ "src/compiler/opcodes.cc",
+ "src/compiler/operation-typer.cc",
+ "src/compiler/operator-properties.cc",
+ "src/compiler/operator.cc",
+ "src/compiler/osr.cc",
+ "src/compiler/pipeline-statistics.cc",
+ "src/compiler/pipeline.cc",
+ "src/compiler/property-access-builder.cc",
+ "src/compiler/raw-machine-assembler.cc",
+ "src/compiler/redundancy-elimination.cc",
+ "src/compiler/refs-map.cc",
+ "src/compiler/representation-change.cc",
+ "src/compiler/schedule.cc",
+ "src/compiler/scheduled-machine-lowering.cc",
+ "src/compiler/scheduler.cc",
+ "src/compiler/select-lowering.cc",
+ "src/compiler/serializer-for-background-compilation.cc",
+ "src/compiler/simplified-lowering.cc",
+ "src/compiler/simplified-operator-reducer.cc",
+ "src/compiler/simplified-operator.cc",
+ "src/compiler/state-values-utils.cc",
+ "src/compiler/store-store-elimination.cc",
+ "src/compiler/type-cache.cc",
+ "src/compiler/type-narrowing-reducer.cc",
+ "src/compiler/typed-optimization.cc",
+ "src/compiler/typer.cc",
+ "src/compiler/types.cc",
+ "src/compiler/value-numbering-reducer.cc",
+ "src/compiler/verifier.cc",
+ "src/compiler/zone-stats.cc",
+]
+
+if (v8_enable_webassembly) {
+ v8_compiler_sources += [
+ "src/compiler/int64-lowering.cc",
+ "src/compiler/simd-scalar-lowering.cc",
+ "src/compiler/wasm-compiler.cc",
+ ]
+}
+
+# The src/compiler files with optimizations.
+v8_source_set("v8_compiler_opt") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = v8_compiler_sources
+
+ public_deps = [
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ":v8_maybe_icu",
+ ":v8_tracing",
+ ]
+
+ deps = [
+ ":v8_base_without_compiler",
+ ":v8_internal_headers",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
+ ]
+
+ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
+ # The :no_optimize config is added to v8_add_configs in v8.gni.
+ remove_configs = [ "//build/config/compiler:no_optimize" ]
+ configs = [ ":always_optimize" ]
+ } else {
+ # Without this else branch, gn fails to generate build files for non-debug
+ # builds (because we try to remove a config that is not present).
+ # So we include it, even if this config is not used outside of debug builds.
+ configs = [ ":internal_config" ]
+ }
+}
+
+# The src/compiler files with default optimization behavior.
+v8_source_set("v8_compiler") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = v8_compiler_sources
+
+ public_deps = [
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ":v8_internal_headers",
+ ":v8_maybe_icu",
+ ":v8_tracing",
+ ]
+
+ deps = [
+ ":v8_base_without_compiler",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
+ ]
+
+ configs = [ ":internal_config" ]
+}
+
+group("v8_compiler_for_mksnapshot") {
+ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
+ deps = [ ":v8_compiler_opt" ]
+ } else {
+ deps = [ ":v8_compiler" ]
+ }
+}
+
+# Any target using trace events must directly or indirectly depend on
+# v8_tracing.
+group("v8_tracing") {
+ if (v8_use_perfetto) {
+ if (build_with_chromium) {
+ public_deps = [ "//third_party/perfetto:libperfetto" ]
+ } else {
+ public_deps = [ ":v8_libperfetto" ]
+ }
+ }
+}
+
+v8_source_set("v8_base_without_compiler") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ # Split static libraries on windows into two.
+ split_count = 2
+
+ sources = [
+ ### gcmole(all) ###
+ "src/api/api-arguments.cc",
+ "src/api/api-natives.cc",
+ "src/api/api.cc",
+ "src/ast/ast-function-literal-id-reindexer.cc",
+ "src/ast/ast-value-factory.cc",
+ "src/ast/ast.cc",
+ "src/ast/modules.cc",
+ "src/ast/prettyprinter.cc",
+ "src/ast/scopes.cc",
+ "src/ast/source-range-ast-visitor.cc",
+ "src/ast/variables.cc",
+ "src/baseline/baseline-compiler.cc",
+ "src/baseline/baseline.cc",
+ "src/baseline/bytecode-offset-iterator.cc",
+ "src/builtins/accessors.cc",
+ "src/builtins/builtins-api.cc",
+ "src/builtins/builtins-array.cc",
+ "src/builtins/builtins-arraybuffer.cc",
+ "src/builtins/builtins-async-module.cc",
+ "src/builtins/builtins-bigint.cc",
+ "src/builtins/builtins-callsite.cc",
+ "src/builtins/builtins-collections.cc",
+ "src/builtins/builtins-console.cc",
+ "src/builtins/builtins-dataview.cc",
+ "src/builtins/builtins-date.cc",
+ "src/builtins/builtins-error.cc",
+ "src/builtins/builtins-function.cc",
+ "src/builtins/builtins-global.cc",
+ "src/builtins/builtins-internal.cc",
+ "src/builtins/builtins-intl.cc",
+ "src/builtins/builtins-json.cc",
+ "src/builtins/builtins-number.cc",
+ "src/builtins/builtins-object.cc",
+ "src/builtins/builtins-reflect.cc",
+ "src/builtins/builtins-regexp.cc",
+ "src/builtins/builtins-sharedarraybuffer.cc",
+ "src/builtins/builtins-string.cc",
+ "src/builtins/builtins-symbol.cc",
+ "src/builtins/builtins-trace.cc",
+ "src/builtins/builtins-typed-array.cc",
+ "src/builtins/builtins-weak-refs.cc",
+ "src/builtins/builtins.cc",
+ "src/builtins/constants-table-builder.cc",
+ "src/codegen/aligned-slot-allocator.cc",
+ "src/codegen/assembler.cc",
+ "src/codegen/bailout-reason.cc",
+ "src/codegen/code-comments.cc",
+ "src/codegen/code-desc.cc",
+ "src/codegen/code-factory.cc",
+ "src/codegen/code-reference.cc",
+ "src/codegen/compilation-cache.cc",
+ "src/codegen/compiler.cc",
+ "src/codegen/constant-pool.cc",
+ "src/codegen/external-reference-encoder.cc",
+ "src/codegen/external-reference-table.cc",
+ "src/codegen/external-reference.cc",
+ "src/codegen/flush-instruction-cache.cc",
+ "src/codegen/handler-table.cc",
+ "src/codegen/interface-descriptors.cc",
+ "src/codegen/machine-type.cc",
+ "src/codegen/optimized-compilation-info.cc",
+ "src/codegen/pending-optimization-table.cc",
+ "src/codegen/register-configuration.cc",
+ "src/codegen/reloc-info.cc",
+ "src/codegen/safepoint-table.cc",
+ "src/codegen/source-position-table.cc",
+ "src/codegen/source-position.cc",
+ "src/codegen/string-constants.cc",
+ "src/codegen/tick-counter.cc",
+ "src/codegen/tnode.cc",
+ "src/codegen/turbo-assembler.cc",
+ "src/codegen/unoptimized-compilation-info.cc",
+ "src/common/assert-scope.cc",
+ "src/compiler-dispatcher/compiler-dispatcher.cc",
+ "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
+ "src/date/date.cc",
+ "src/date/dateparser.cc",
+ "src/debug/debug-coverage.cc",
+ "src/debug/debug-evaluate.cc",
+ "src/debug/debug-frames.cc",
+ "src/debug/debug-interface.cc",
+ "src/debug/debug-property-iterator.cc",
+ "src/debug/debug-scope-iterator.cc",
+ "src/debug/debug-scopes.cc",
+ "src/debug/debug-stack-trace-iterator.cc",
+ "src/debug/debug-type-profile.cc",
+ "src/debug/debug.cc",
+ "src/debug/liveedit.cc",
+ "src/deoptimizer/deoptimize-reason.cc",
+ "src/deoptimizer/deoptimized-frame-info.cc",
+ "src/deoptimizer/deoptimizer.cc",
+ "src/deoptimizer/materialized-object-store.cc",
+ "src/deoptimizer/translated-state.cc",
+ "src/deoptimizer/translation-array.cc",
+ "src/diagnostics/basic-block-profiler.cc",
+ "src/diagnostics/compilation-statistics.cc",
+ "src/diagnostics/disassembler.cc",
+ "src/diagnostics/eh-frame.cc",
+ "src/diagnostics/gdb-jit.cc",
+ "src/diagnostics/objects-debug.cc",
+ "src/diagnostics/objects-printer.cc",
+ "src/diagnostics/perf-jit.cc",
+ "src/diagnostics/unwinder.cc",
+ "src/execution/arguments.cc",
+ "src/execution/execution.cc",
+ "src/execution/external-pointer-table.cc",
+ "src/execution/frames.cc",
+ "src/execution/futex-emulation.cc",
+ "src/execution/interrupts-scope.cc",
+ "src/execution/isolate.cc",
+ "src/execution/local-isolate.cc",
+ "src/execution/messages.cc",
+ "src/execution/microtask-queue.cc",
+ "src/execution/protectors.cc",
+ "src/execution/runtime-profiler.cc",
+ "src/execution/simulator-base.cc",
+ "src/execution/stack-guard.cc",
+ "src/execution/thread-id.cc",
+ "src/execution/thread-local-top.cc",
+ "src/execution/v8threads.cc",
+ "src/extensions/cputracemark-extension.cc",
+ "src/extensions/externalize-string-extension.cc",
+ "src/extensions/gc-extension.cc",
+ "src/extensions/ignition-statistics-extension.cc",
+ "src/extensions/statistics-extension.cc",
+ "src/extensions/trigger-failure-extension.cc",
+ "src/flags/flags.cc",
+ "src/handles/global-handles.cc",
+ "src/handles/handles.cc",
+ "src/handles/local-handles.cc",
+ "src/handles/persistent-handles.cc",
+ "src/heap/allocation-observer.cc",
+ "src/heap/array-buffer-sweeper.cc",
+ "src/heap/base-space.cc",
+ "src/heap/basic-memory-chunk.cc",
+ "src/heap/code-object-registry.cc",
+ "src/heap/code-stats.cc",
+ "src/heap/collection-barrier.cc",
+ "src/heap/combined-heap.cc",
+ "src/heap/concurrent-allocator.cc",
+ "src/heap/concurrent-marking.cc",
+ "src/heap/cppgc-js/cpp-heap.cc",
+ "src/heap/cppgc-js/cpp-snapshot.cc",
+ "src/heap/cppgc-js/unified-heap-marking-verifier.cc",
+ "src/heap/cppgc-js/unified-heap-marking-visitor.cc",
+ "src/heap/embedder-tracing.cc",
+ "src/heap/factory-base.cc",
+ "src/heap/factory.cc",
+ "src/heap/finalization-registry-cleanup-task.cc",
+ "src/heap/free-list.cc",
+ "src/heap/gc-idle-time-handler.cc",
+ "src/heap/gc-tracer.cc",
+ "src/heap/heap-controller.cc",
+ "src/heap/heap-write-barrier.cc",
+ "src/heap/heap.cc",
+ "src/heap/incremental-marking-job.cc",
+ "src/heap/incremental-marking.cc",
+ "src/heap/index-generator.cc",
+ "src/heap/invalidated-slots.cc",
+ "src/heap/large-spaces.cc",
+ "src/heap/local-factory.cc",
+ "src/heap/local-heap.cc",
+ "src/heap/mark-compact.cc",
+ "src/heap/marking-barrier.cc",
+ "src/heap/marking-worklist.cc",
+ "src/heap/marking.cc",
+ "src/heap/memory-allocator.cc",
+ "src/heap/memory-chunk-layout.cc",
+ "src/heap/memory-chunk.cc",
+ "src/heap/memory-measurement.cc",
+ "src/heap/memory-reducer.cc",
+ "src/heap/new-spaces.cc",
+ "src/heap/object-stats.cc",
+ "src/heap/objects-visiting.cc",
+ "src/heap/paged-spaces.cc",
+ "src/heap/read-only-heap.cc",
+ "src/heap/read-only-spaces.cc",
+ "src/heap/safepoint.cc",
+ "src/heap/scavenge-job.cc",
+ "src/heap/scavenger.cc",
+ "src/heap/slot-set.cc",
+ "src/heap/spaces.cc",
+ "src/heap/stress-marking-observer.cc",
+ "src/heap/stress-scavenge-observer.cc",
+ "src/heap/sweeper.cc",
+ "src/heap/weak-object-worklists.cc",
+ "src/ic/call-optimization.cc",
+ "src/ic/handler-configuration.cc",
+ "src/ic/ic-stats.cc",
+ "src/ic/ic.cc",
+ "src/ic/stub-cache.cc",
+ "src/init/bootstrapper.cc",
+ "src/init/icu_util.cc",
+ "src/init/isolate-allocator.cc",
+ "src/init/startup-data-util.cc",
+ "src/init/v8.cc",
+ "src/interpreter/bytecode-array-builder.cc",
+ "src/interpreter/bytecode-array-iterator.cc",
+ "src/interpreter/bytecode-array-random-iterator.cc",
+ "src/interpreter/bytecode-array-writer.cc",
+ "src/interpreter/bytecode-decoder.cc",
+ "src/interpreter/bytecode-flags.cc",
+ "src/interpreter/bytecode-generator.cc",
+ "src/interpreter/bytecode-label.cc",
+ "src/interpreter/bytecode-node.cc",
+ "src/interpreter/bytecode-operands.cc",
+ "src/interpreter/bytecode-register-optimizer.cc",
+ "src/interpreter/bytecode-register.cc",
+ "src/interpreter/bytecode-source-info.cc",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/constant-array-builder.cc",
+ "src/interpreter/control-flow-builders.cc",
+ "src/interpreter/handler-table-builder.cc",
+ "src/interpreter/interpreter-intrinsics.cc",
+ "src/interpreter/interpreter.cc",
+ "src/json/json-parser.cc",
+ "src/json/json-stringifier.cc",
+ "src/libsampler/sampler.cc",
+ "src/logging/counters.cc",
+ "src/logging/local-logger.cc",
+ "src/logging/log-utils.cc",
+ "src/logging/log.cc",
+ "src/logging/metrics.cc",
+ "src/logging/tracing-flags.cc",
+ "src/numbers/bignum-dtoa.cc",
+ "src/numbers/bignum.cc",
+ "src/numbers/cached-powers.cc",
+ "src/numbers/conversions.cc",
+ "src/numbers/diy-fp.cc",
+ "src/numbers/dtoa.cc",
+ "src/numbers/fast-dtoa.cc",
+ "src/numbers/fixed-dtoa.cc",
+ "src/numbers/math-random.cc",
+ "src/numbers/strtod.cc",
+ "src/objects/backing-store.cc",
+ "src/objects/bigint.cc",
+ "src/objects/code-kind.cc",
+ "src/objects/code.cc",
+ "src/objects/compilation-cache-table.cc",
+ "src/objects/contexts.cc",
+ "src/objects/debug-objects.cc",
+ "src/objects/elements-kind.cc",
+ "src/objects/elements.cc",
+ "src/objects/embedder-data-array.cc",
+ "src/objects/feedback-vector.cc",
+ "src/objects/field-type.cc",
+ "src/objects/intl-objects.cc",
+ "src/objects/js-array-buffer.cc",
+ "src/objects/js-break-iterator.cc",
+ "src/objects/js-collator.cc",
+ "src/objects/js-date-time-format.cc",
+ "src/objects/js-display-names.cc",
+ "src/objects/js-function.cc",
+ "src/objects/js-list-format.cc",
+ "src/objects/js-locale.cc",
+ "src/objects/js-number-format.cc",
+ "src/objects/js-objects.cc",
+ "src/objects/js-plural-rules.cc",
+ "src/objects/js-regexp.cc",
+ "src/objects/js-relative-time-format.cc",
+ "src/objects/js-segment-iterator.cc",
+ "src/objects/js-segmenter.cc",
+ "src/objects/js-segments.cc",
+ "src/objects/keys.cc",
+ "src/objects/literal-objects.cc",
+ "src/objects/lookup-cache.cc",
+ "src/objects/lookup.cc",
+ "src/objects/managed.cc",
+ "src/objects/map-updater.cc",
+ "src/objects/map.cc",
+ "src/objects/module.cc",
+ "src/objects/objects.cc",
+ "src/objects/ordered-hash-table.cc",
+ "src/objects/osr-optimized-code-cache.cc",
+ "src/objects/property-descriptor.cc",
+ "src/objects/property.cc",
+ "src/objects/scope-info.cc",
+ "src/objects/shared-function-info.cc",
+ "src/objects/source-text-module.cc",
+ "src/objects/stack-frame-info.cc",
+ "src/objects/string-comparator.cc",
+ "src/objects/string-table.cc",
+ "src/objects/string.cc",
+ "src/objects/swiss-name-dictionary.cc",
+ "src/objects/synthetic-module.cc",
+ "src/objects/tagged-impl.cc",
+ "src/objects/template-objects.cc",
+ "src/objects/transitions.cc",
+ "src/objects/type-hints.cc",
+ "src/objects/value-serializer.cc",
+ "src/objects/visitors.cc",
+ "src/parsing/func-name-inferrer.cc",
+ "src/parsing/import-assertions.cc",
+ "src/parsing/literal-buffer.cc",
+ "src/parsing/parse-info.cc",
+ "src/parsing/parser.cc",
+ "src/parsing/parsing.cc",
+ "src/parsing/pending-compilation-error-handler.cc",
+ "src/parsing/preparse-data.cc",
+ "src/parsing/preparser.cc",
+ "src/parsing/rewriter.cc",
+ "src/parsing/scanner-character-streams.cc",
+ "src/parsing/scanner.cc",
+ "src/parsing/token.cc",
+ "src/profiler/allocation-tracker.cc",
+ "src/profiler/cpu-profiler.cc",
+ "src/profiler/heap-profiler.cc",
+ "src/profiler/heap-snapshot-generator.cc",
+ "src/profiler/profile-generator.cc",
+ "src/profiler/profiler-listener.cc",
+ "src/profiler/profiler-stats.cc",
+ "src/profiler/sampling-heap-profiler.cc",
+ "src/profiler/strings-storage.cc",
+ "src/profiler/symbolizer.cc",
+ "src/profiler/tick-sample.cc",
+ "src/profiler/tracing-cpu-profiler.cc",
+ "src/profiler/weak-code-registry.cc",
+ "src/regexp/experimental/experimental-bytecode.cc",
+ "src/regexp/experimental/experimental-compiler.cc",
+ "src/regexp/experimental/experimental-interpreter.cc",
+ "src/regexp/experimental/experimental.cc",
+ "src/regexp/property-sequences.cc",
+ "src/regexp/regexp-ast.cc",
+ "src/regexp/regexp-bytecode-generator.cc",
+ "src/regexp/regexp-bytecode-peephole.cc",
+ "src/regexp/regexp-bytecodes.cc",
+ "src/regexp/regexp-compiler-tonode.cc",
+ "src/regexp/regexp-compiler.cc",
+ "src/regexp/regexp-dotprinter.cc",
+ "src/regexp/regexp-error.cc",
+ "src/regexp/regexp-interpreter.cc",
+ "src/regexp/regexp-macro-assembler-tracer.cc",
+ "src/regexp/regexp-macro-assembler.cc",
+ "src/regexp/regexp-parser.cc",
+ "src/regexp/regexp-stack.cc",
+ "src/regexp/regexp-utils.cc",
+ "src/regexp/regexp.cc",
+ "src/roots/roots.cc",
+ "src/runtime/runtime-array.cc",
+ "src/runtime/runtime-atomics.cc",
+ "src/runtime/runtime-bigint.cc",
+ "src/runtime/runtime-classes.cc",
+ "src/runtime/runtime-collections.cc",
+ "src/runtime/runtime-compiler.cc",
+ "src/runtime/runtime-date.cc",
+ "src/runtime/runtime-debug.cc",
+ "src/runtime/runtime-forin.cc",
+ "src/runtime/runtime-function.cc",
+ "src/runtime/runtime-futex.cc",
+ "src/runtime/runtime-generator.cc",
+ "src/runtime/runtime-internal.cc",
+ "src/runtime/runtime-intl.cc",
+ "src/runtime/runtime-literals.cc",
+ "src/runtime/runtime-module.cc",
+ "src/runtime/runtime-numbers.cc",
+ "src/runtime/runtime-object.cc",
+ "src/runtime/runtime-operators.cc",
+ "src/runtime/runtime-promise.cc",
+ "src/runtime/runtime-proxy.cc",
+ "src/runtime/runtime-regexp.cc",
+ "src/runtime/runtime-scopes.cc",
+ "src/runtime/runtime-strings.cc",
+ "src/runtime/runtime-symbol.cc",
+ "src/runtime/runtime-test.cc",
+ "src/runtime/runtime-trace.cc",
+ "src/runtime/runtime-typedarray.cc",
+ "src/runtime/runtime-weak-refs.cc",
+ "src/runtime/runtime.cc",
+ "src/sanitizer/lsan-page-allocator.cc",
+ "src/snapshot/code-serializer.cc",
+ "src/snapshot/context-deserializer.cc",
+ "src/snapshot/context-serializer.cc",
+ "src/snapshot/deserializer.cc",
+ "src/snapshot/embedded/embedded-data.cc",
+ "src/snapshot/object-deserializer.cc",
+ "src/snapshot/read-only-deserializer.cc",
+ "src/snapshot/read-only-serializer.cc",
+ "src/snapshot/roots-serializer.cc",
+ "src/snapshot/serializer-deserializer.cc",
+ "src/snapshot/serializer.cc",
+ "src/snapshot/snapshot-compression.cc",
+ "src/snapshot/snapshot-data.cc",
+ "src/snapshot/snapshot-source-sink.cc",
+ "src/snapshot/snapshot-utils.cc",
+ "src/snapshot/snapshot.cc",
+ "src/snapshot/startup-deserializer.cc",
+ "src/snapshot/startup-serializer.cc",
+ "src/strings/char-predicates.cc",
+ "src/strings/string-builder.cc",
+ "src/strings/string-case.cc",
+ "src/strings/string-stream.cc",
+ "src/strings/unicode-decoder.cc",
+ "src/strings/unicode.cc",
+ "src/strings/uri.cc",
+ "src/tasks/cancelable-task.cc",
+ "src/tasks/operations-barrier.cc",
+ "src/tasks/task-utils.cc",
+ "src/third_party/siphash/halfsiphash.cc",
+ "src/tracing/trace-event.cc",
+ "src/tracing/traced-value.cc",
+ "src/tracing/tracing-category-observer.cc",
+ "src/trap-handler/handler-inside.cc",
+ "src/trap-handler/handler-outside.cc",
+ "src/trap-handler/handler-shared.cc",
+ "src/utils/address-map.cc",
+ "src/utils/allocation.cc",
+ "src/utils/bit-vector.cc",
+ "src/utils/detachable-vector.cc",
+ "src/utils/identity-map.cc",
+ "src/utils/memcopy.cc",
+ "src/utils/ostreams.cc",
+ "src/utils/utils.cc",
+ "src/utils/version.cc",
+ "src/web-snapshot/web-snapshot.cc",
+ "src/web-snapshot/web-snapshot.h",
+ "src/zone/accounting-allocator.cc",
+ "src/zone/type-stats.cc",
+ "src/zone/zone-segment.cc",
+ "src/zone/zone.cc",
+ ]
+
+ if (v8_enable_webassembly) {
+ sources += [
+ "src/asmjs/asm-js.cc",
+ "src/asmjs/asm-parser.cc",
+ "src/asmjs/asm-scanner.cc",
+ "src/asmjs/asm-types.cc",
+ "src/debug/debug-wasm-objects.cc",
+ "src/runtime/runtime-test-wasm.cc",
+ "src/runtime/runtime-wasm.cc",
+ "src/wasm/baseline/liftoff-assembler.cc",
+ "src/wasm/baseline/liftoff-compiler.cc",
+ "src/wasm/function-body-decoder.cc",
+ "src/wasm/function-compiler.cc",
+ "src/wasm/graph-builder-interface.cc",
+ "src/wasm/jump-table-assembler.cc",
+ "src/wasm/local-decl-encoder.cc",
+ "src/wasm/memory-tracing.cc",
+ "src/wasm/module-compiler.cc",
+ "src/wasm/module-decoder.cc",
+ "src/wasm/module-instantiate.cc",
+ "src/wasm/signature-map.cc",
+ "src/wasm/simd-shuffle.cc",
+ "src/wasm/streaming-decoder.cc",
+ "src/wasm/sync-streaming-decoder.cc",
+ "src/wasm/value-type.cc",
+ "src/wasm/wasm-code-manager.cc",
+ "src/wasm/wasm-debug.cc",
+ "src/wasm/wasm-engine.cc",
+ "src/wasm/wasm-external-refs.cc",
+ "src/wasm/wasm-features.cc",
+ "src/wasm/wasm-import-wrapper-cache.cc",
+ "src/wasm/wasm-js.cc",
+ "src/wasm/wasm-module-builder.cc",
+ "src/wasm/wasm-module-sourcemap.cc",
+ "src/wasm/wasm-module.cc",
+ "src/wasm/wasm-objects.cc",
+ "src/wasm/wasm-opcodes.cc",
+ "src/wasm/wasm-result.cc",
+ "src/wasm/wasm-serialization.cc",
+ "src/wasm/wasm-subtyping.cc",
+ ]
+ }
+
if (v8_enable_third_party_heap) {
sources += v8_third_party_heap_files
} else {
@@ -3683,31 +4011,19 @@ v8_source_set("v8_base_without_compiler") {
}
if (v8_enable_conservative_stack_scanning) {
- sources += [
- "src/heap/conservative-stack-visitor.cc",
- "src/heap/conservative-stack-visitor.h",
- "src/heap/object-start-bitmap.h",
- ]
+ sources += [ "src/heap/conservative-stack-visitor.cc" ]
}
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [
"src/debug/wasm/gdb-server/gdb-remote-util.cc",
- "src/debug/wasm/gdb-server/gdb-remote-util.h",
"src/debug/wasm/gdb-server/gdb-server-thread.cc",
- "src/debug/wasm/gdb-server/gdb-server-thread.h",
"src/debug/wasm/gdb-server/gdb-server.cc",
- "src/debug/wasm/gdb-server/gdb-server.h",
"src/debug/wasm/gdb-server/packet.cc",
- "src/debug/wasm/gdb-server/packet.h",
"src/debug/wasm/gdb-server/session.cc",
- "src/debug/wasm/gdb-server/session.h",
"src/debug/wasm/gdb-server/target.cc",
- "src/debug/wasm/gdb-server/target.h",
"src/debug/wasm/gdb-server/transport.cc",
- "src/debug/wasm/gdb-server/transport.h",
"src/debug/wasm/gdb-server/wasm-module-debug.cc",
- "src/debug/wasm/gdb-server/wasm-module-debug.h",
]
}
@@ -3720,20 +4036,12 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
- "src/baseline/ia32/baseline-assembler-ia32-inl.h",
- "src/baseline/ia32/baseline-compiler-ia32-inl.h",
- "src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.cc",
- "src/codegen/ia32/assembler-ia32.h",
- "src/codegen/ia32/constants-ia32.h",
"src/codegen/ia32/cpu-ia32.cc",
"src/codegen/ia32/interface-descriptors-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.cc",
- "src/codegen/ia32/macro-assembler-ia32.h",
- "src/codegen/ia32/register-ia32.h",
- "src/codegen/ia32/sse-instr.h",
+ "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/compiler/backend/ia32/code-generator-ia32.cc",
- "src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
@@ -3741,43 +4049,26 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
"src/execution/ia32/frame-constants-ia32.cc",
- "src/execution/ia32/frame-constants-ia32.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
- "src/regexp/ia32/regexp-macro-assembler-ia32.h",
- "src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
- "src/baseline/x64/baseline-assembler-x64-inl.h",
- "src/baseline/x64/baseline-compiler-x64-inl.h",
- "src/codegen/x64/assembler-x64-inl.h",
+ "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/codegen/x64/assembler-x64.cc",
- "src/codegen/x64/assembler-x64.h",
- "src/codegen/x64/constants-x64.h",
"src/codegen/x64/cpu-x64.cc",
- "src/codegen/x64/fma-instr.h",
"src/codegen/x64/interface-descriptors-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc",
- "src/codegen/x64/macro-assembler-x64.h",
- "src/codegen/x64/register-x64.h",
- "src/codegen/x64/sse-instr.h",
"src/compiler/backend/x64/code-generator-x64.cc",
- "src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
- "src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
"src/diagnostics/x64/unwinder-x64.cc",
"src/execution/x64/frame-constants-x64.cc",
- "src/execution/x64/frame-constants-x64.h",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
- "src/regexp/x64/regexp-macro-assembler-x64.h",
- "src/third_party/valgrind/valgrind.h",
- "src/wasm/baseline/x64/liftoff-assembler-x64.h",
]
# iOS Xcode simulator builds run on an x64 target. iOS and macOS are both
@@ -3785,129 +4076,79 @@ v8_source_set("v8_base_without_compiler") {
if (is_linux || is_chromeos || is_mac || is_ios || target_os == "freebsd") {
sources += [
"src/trap-handler/handler-inside-posix.cc",
- "src/trap-handler/handler-inside-posix.h",
"src/trap-handler/handler-outside-posix.cc",
]
}
if (is_win) {
sources += [
"src/diagnostics/unwinding-info-win64.cc",
- "src/diagnostics/unwinding-info-win64.h",
"src/trap-handler/handler-inside-win.cc",
- "src/trap-handler/handler-inside-win.h",
"src/trap-handler/handler-outside-win.cc",
]
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
- "src/baseline/arm/baseline-assembler-arm-inl.h",
- "src/baseline/arm/baseline-compiler-arm-inl.h",
- "src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.cc",
- "src/codegen/arm/assembler-arm.h",
"src/codegen/arm/constants-arm.cc",
- "src/codegen/arm/constants-arm.h",
"src/codegen/arm/cpu-arm.cc",
"src/codegen/arm/interface-descriptors-arm.cc",
"src/codegen/arm/macro-assembler-arm.cc",
- "src/codegen/arm/macro-assembler-arm.h",
- "src/codegen/arm/register-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
- "src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
- "src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
"src/diagnostics/arm/unwinder-arm.cc",
"src/execution/arm/frame-constants-arm.cc",
- "src/execution/arm/frame-constants-arm.h",
"src/execution/arm/simulator-arm.cc",
- "src/execution/arm/simulator-arm.h",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
- "src/regexp/arm/regexp-macro-assembler-arm.h",
- "src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
- "src/baseline/arm64/baseline-assembler-arm64-inl.h",
- "src/baseline/arm64/baseline-compiler-arm64-inl.h",
- "src/codegen/arm64/assembler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64.cc",
- "src/codegen/arm64/assembler-arm64.h",
- "src/codegen/arm64/constants-arm64.h",
"src/codegen/arm64/cpu-arm64.cc",
- "src/codegen/arm64/decoder-arm64-inl.h",
"src/codegen/arm64/decoder-arm64.cc",
- "src/codegen/arm64/decoder-arm64.h",
"src/codegen/arm64/instructions-arm64-constants.cc",
"src/codegen/arm64/instructions-arm64.cc",
- "src/codegen/arm64/instructions-arm64.h",
"src/codegen/arm64/interface-descriptors-arm64.cc",
- "src/codegen/arm64/macro-assembler-arm64-inl.h",
"src/codegen/arm64/macro-assembler-arm64.cc",
- "src/codegen/arm64/macro-assembler-arm64.h",
"src/codegen/arm64/register-arm64.cc",
- "src/codegen/arm64/register-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
- "src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
- "src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
- "src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
- "src/diagnostics/arm64/disasm-arm64.h",
"src/diagnostics/arm64/eh-frame-arm64.cc",
"src/diagnostics/arm64/unwinder-arm64.cc",
"src/execution/arm64/frame-constants-arm64.cc",
- "src/execution/arm64/frame-constants-arm64.h",
"src/execution/arm64/pointer-auth-arm64.cc",
"src/execution/arm64/simulator-arm64.cc",
- "src/execution/arm64/simulator-arm64.h",
"src/execution/arm64/simulator-logic-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
- "src/regexp/arm64/regexp-macro-assembler-arm64.h",
- "src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
- if (v8_control_flow_integrity) {
- sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ]
- }
if (current_cpu == "arm64" && is_mac) {
sources += [
"src/trap-handler/handler-inside-posix.cc",
- "src/trap-handler/handler-inside-posix.h",
"src/trap-handler/handler-outside-posix.cc",
]
}
if (is_win) {
- sources += [
- "src/diagnostics/unwinding-info-win64.cc",
- "src/diagnostics/unwinding-info-win64.h",
- ]
+ sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
- "src/baseline/mips/baseline-assembler-mips-inl.h",
- "src/baseline/mips/baseline-compiler-mips-inl.h",
- "src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.cc",
- "src/codegen/mips/assembler-mips.h",
"src/codegen/mips/constants-mips.cc",
- "src/codegen/mips/constants-mips.h",
"src/codegen/mips/cpu-mips.cc",
"src/codegen/mips/interface-descriptors-mips.cc",
"src/codegen/mips/macro-assembler-mips.cc",
- "src/codegen/mips/macro-assembler-mips.h",
- "src/codegen/mips/register-mips.h",
"src/compiler/backend/mips/code-generator-mips.cc",
- "src/compiler/backend/mips/instruction-codes-mips.h",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
@@ -3915,29 +4156,17 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/mips/disasm-mips.cc",
"src/diagnostics/mips/unwinder-mips.cc",
"src/execution/mips/frame-constants-mips.cc",
- "src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.cc",
- "src/execution/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
- "src/regexp/mips/regexp-macro-assembler-mips.h",
- "src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
- "src/baseline/mips64/baseline-assembler-mips64-inl.h",
- "src/baseline/mips64/baseline-compiler-mips64-inl.h",
- "src/codegen/mips64/assembler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64.cc",
- "src/codegen/mips64/assembler-mips64.h",
"src/codegen/mips64/constants-mips64.cc",
- "src/codegen/mips64/constants-mips64.h",
"src/codegen/mips64/cpu-mips64.cc",
"src/codegen/mips64/interface-descriptors-mips64.cc",
"src/codegen/mips64/macro-assembler-mips64.cc",
- "src/codegen/mips64/macro-assembler-mips64.h",
- "src/codegen/mips64/register-mips64.h",
"src/compiler/backend/mips64/code-generator-mips64.cc",
- "src/compiler/backend/mips64/instruction-codes-mips64.h",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
@@ -3945,126 +4174,77 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/mips64/disasm-mips64.cc",
"src/diagnostics/mips64/unwinder-mips64.cc",
"src/execution/mips64/frame-constants-mips64.cc",
- "src/execution/mips64/frame-constants-mips64.h",
"src/execution/mips64/simulator-mips64.cc",
- "src/execution/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
- "src/regexp/mips64/regexp-macro-assembler-mips64.h",
- "src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
- "src/baseline/ppc/baseline-assembler-ppc-inl.h",
- "src/baseline/ppc/baseline-compiler-ppc-inl.h",
- "src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
- "src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.cc",
- "src/codegen/ppc/constants-ppc.h",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/interface-descriptors-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
- "src/codegen/ppc/macro-assembler-ppc.h",
- "src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
- "src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/debug/ppc/debug-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
"src/diagnostics/ppc/unwinder-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
- "src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.cc",
- "src/execution/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
- "src/regexp/ppc/regexp-macro-assembler-ppc.h",
- "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
- "src/baseline/ppc/baseline-assembler-ppc-inl.h",
- "src/baseline/ppc/baseline-compiler-ppc-inl.h",
- "src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
- "src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.cc",
- "src/codegen/ppc/constants-ppc.h",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/interface-descriptors-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
- "src/codegen/ppc/macro-assembler-ppc.h",
- "src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
- "src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/debug/ppc/debug-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
"src/diagnostics/ppc/unwinder-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
- "src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.cc",
- "src/execution/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
- "src/regexp/ppc/regexp-macro-assembler-ppc.h",
- "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
- "src/baseline/s390/baseline-assembler-s390-inl.h",
- "src/baseline/s390/baseline-compiler-s390-inl.h",
- "src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.cc",
- "src/codegen/s390/assembler-s390.h",
"src/codegen/s390/constants-s390.cc",
- "src/codegen/s390/constants-s390.h",
"src/codegen/s390/cpu-s390.cc",
"src/codegen/s390/interface-descriptors-s390.cc",
"src/codegen/s390/macro-assembler-s390.cc",
- "src/codegen/s390/macro-assembler-s390.h",
- "src/codegen/s390/register-s390.h",
"src/compiler/backend/s390/code-generator-s390.cc",
- "src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
- "src/compiler/backend/s390/unwinding-info-writer-s390.h",
"src/debug/s390/debug-s390.cc",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
"src/diagnostics/s390/unwinder-s390.cc",
"src/execution/s390/frame-constants-s390.cc",
- "src/execution/s390/frame-constants-s390.h",
"src/execution/s390/simulator-s390.cc",
- "src/execution/s390/simulator-s390.h",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
- "src/regexp/s390/regexp-macro-assembler-s390.h",
- "src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
} else if (v8_current_cpu == "riscv64") {
sources += [ ### gcmole(arch:riscv64) ###
- "src/codegen/riscv64/assembler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64.cc",
- "src/codegen/riscv64/assembler-riscv64.h",
"src/codegen/riscv64/constants-riscv64.cc",
- "src/codegen/riscv64/constants-riscv64.h",
"src/codegen/riscv64/cpu-riscv64.cc",
"src/codegen/riscv64/interface-descriptors-riscv64.cc",
"src/codegen/riscv64/macro-assembler-riscv64.cc",
- "src/codegen/riscv64/macro-assembler-riscv64.h",
- "src/codegen/riscv64/register-riscv64.h",
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
- "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
"src/debug/riscv64/debug-riscv64.cc",
@@ -4072,15 +4252,22 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
"src/execution/riscv64/frame-constants-riscv64.cc",
- "src/execution/riscv64/frame-constants-riscv64.h",
"src/execution/riscv64/simulator-riscv64.cc",
- "src/execution/riscv64/simulator-riscv64.h",
"src/regexp/riscv64/regexp-macro-assembler-riscv64.cc",
- "src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
- "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
]
}
+ # Architecture independent but platform-specific sources
+ if (is_win) {
+ if (v8_enable_system_instrumentation) {
+ sources += [
+ "src/diagnostics/system-jit-metadata-win.h",
+ "src/diagnostics/system-jit-win.cc",
+ "src/diagnostics/system-jit-win.h",
+ ]
+ }
+ }
+
configs = [
":internal_config",
":cppgc_base_config",
@@ -4088,14 +4275,13 @@ v8_source_set("v8_base_without_compiler") {
deps = [
":torque_generated_definitions",
+ ":v8_bigint",
":v8_cppgc_shared",
":v8_headers",
":v8_libbase",
- ":v8_libsampler",
":v8_shared_internal_headers",
":v8_tracing",
":v8_version",
- ":v8_wrappers",
"src/inspector:inspector",
]
@@ -4104,6 +4290,7 @@ v8_source_set("v8_base_without_compiler") {
":generate_bytecode_builtins_list",
":run_torque",
":v8_headers",
+ ":v8_internal_headers",
":v8_maybe_icu",
]
@@ -4117,43 +4304,18 @@ v8_source_set("v8_base_without_compiler") {
sources -= [
"src/builtins/builtins-intl.cc",
"src/objects/intl-objects.cc",
- "src/objects/intl-objects.h",
- "src/objects/js-break-iterator-inl.h",
"src/objects/js-break-iterator.cc",
- "src/objects/js-break-iterator.h",
- "src/objects/js-collator-inl.h",
"src/objects/js-collator.cc",
- "src/objects/js-collator.h",
- "src/objects/js-date-time-format-inl.h",
"src/objects/js-date-time-format.cc",
- "src/objects/js-date-time-format.h",
- "src/objects/js-display-names-inl.h",
"src/objects/js-display-names.cc",
- "src/objects/js-display-names.h",
- "src/objects/js-list-format-inl.h",
"src/objects/js-list-format.cc",
- "src/objects/js-list-format.h",
- "src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
- "src/objects/js-locale.h",
- "src/objects/js-number-format-inl.h",
"src/objects/js-number-format.cc",
- "src/objects/js-number-format.h",
- "src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
- "src/objects/js-plural-rules.h",
- "src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
- "src/objects/js-relative-time-format.h",
- "src/objects/js-segment-iterator-inl.h",
"src/objects/js-segment-iterator.cc",
- "src/objects/js-segment-iterator.h",
- "src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.cc",
- "src/objects/js-segmenter.h",
- "src/objects/js-segments-inl.h",
"src/objects/js-segments.cc",
- "src/objects/js-segments.h",
"src/runtime/runtime-intl.cc",
"src/strings/char-predicates.cc",
]
@@ -4194,7 +4356,6 @@ v8_source_set("v8_base_without_compiler") {
}
if (v8_use_perfetto) {
- sources -= [ "//base/trace_event/common/trace_event_common.h" ]
sources += [
"src/tracing/trace-categories.cc",
"src/tracing/trace-categories.h",
@@ -4261,13 +4422,13 @@ v8_source_set("torque_base") {
"src/torque/utils.h",
]
- deps = [ ":v8_shared_internal_headers" ]
-
- public_deps = [
- ":v8_libbase",
- ":v8_wrappers",
+ deps = [
+ ":v8_flags",
+ ":v8_shared_internal_headers",
]
+ public_deps = [ ":v8_libbase" ]
+
# The use of exceptions for Torque in violation of the Chromium style-guide
# is justified by the fact that it is only used from the non-essential
# language server and can be removed anytime if it causes problems.
@@ -4366,6 +4527,7 @@ v8_component("v8_libbase") {
"src/base/hashmap.h",
"src/base/ieee754.cc",
"src/base/ieee754.h",
+ "src/base/immediate-crash.h",
"src/base/iterator.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
@@ -4389,6 +4551,7 @@ v8_component("v8_libbase") {
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
+ "src/base/platform/wrappers.h",
"src/base/region-allocator.cc",
"src/base/region-allocator.h",
"src/base/ring-buffer.h",
@@ -4405,15 +4568,14 @@ v8_component("v8_libbase") {
"src/base/utils/random-number-generator.h",
"src/base/vlq-base64.cc",
"src/base/vlq-base64.h",
+ "src/base/vlq.h",
]
configs = [ ":internal_config_base" ]
public_configs = [ ":libbase_config" ]
- deps = [ ":v8_headers" ]
-
- public_deps = [ ":v8_wrappers" ]
+ deps = [ ":v8_config_headers" ]
data = []
@@ -4582,16 +4744,16 @@ v8_component("v8_libplatform") {
public_deps = []
deps = [
- ":v8_headers",
+ ":v8_config_headers",
":v8_libbase",
":v8_tracing",
- ":v8_wrappers",
]
if (v8_use_perfetto) {
sources -= [
"//base/trace_event/common/trace_event_common.h",
"src/libplatform/tracing/recorder-default.cc",
+ "src/libplatform/tracing/recorder.h",
"src/libplatform/tracing/trace-buffer.cc",
"src/libplatform/tracing/trace-buffer.h",
"src/libplatform/tracing/trace-object.cc",
@@ -4612,19 +4774,6 @@ v8_component("v8_libplatform") {
}
}
-v8_source_set("v8_libsampler") {
- sources = [
- "src/libsampler/sampler.cc",
- "src/libsampler/sampler.h",
- ]
-
- configs = [ ":internal_config" ]
-
- public_configs = [ ":libsampler_config" ]
-
- deps = [ ":v8_libbase" ]
-}
-
v8_source_set("fuzzer_support") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -4643,6 +4792,15 @@ v8_source_set("fuzzer_support") {
]
}
+v8_source_set("v8_bigint") {
+ sources = [
+ "src/bigint/bigint.h",
+ "src/bigint/vector-arithmetic.cc",
+ ]
+
+ configs = [ ":internal_config" ]
+}
+
v8_source_set("v8_cppgc_shared") {
sources = [
"src/heap/base/stack.cc",
@@ -4697,26 +4855,12 @@ v8_header_set("cppgc_headers") {
]
sources = [
- "include/cppgc/garbage-collected.h",
- "include/cppgc/member.h",
- "include/cppgc/persistent.h",
- "include/cppgc/type-traits.h",
- "include/cppgc/visitor.h",
- ]
-
- deps = [ ":cppgc_base" ]
- public_deps = [ ":v8_headers" ]
-}
-
-v8_source_set("cppgc_base") {
- visibility = [ ":*" ]
-
- sources = [
"include/cppgc/allocation.h",
"include/cppgc/common.h",
"include/cppgc/custom-space.h",
"include/cppgc/default-platform.h",
"include/cppgc/ephemeron-pair.h",
+ "include/cppgc/explicit-management.h",
"include/cppgc/garbage-collected.h",
"include/cppgc/heap-consistency.h",
"include/cppgc/heap-state.h",
@@ -4746,6 +4890,21 @@ v8_source_set("cppgc_base") {
"include/cppgc/trace-trait.h",
"include/cppgc/type-traits.h",
"include/cppgc/visitor.h",
+ ]
+
+ if (cppgc_enable_caged_heap) {
+ sources += [ "include/cppgc/internal/caged-heap-local-data.h" ]
+ }
+
+ deps = [ ":v8_libplatform" ]
+
+ public_deps = [ ":v8_config_headers" ]
+}
+
+v8_source_set("cppgc_base") {
+ visibility = [ ":*" ]
+
+ sources = [
"src/heap/cppgc/allocation.cc",
"src/heap/cppgc/compaction-worklists.cc",
"src/heap/cppgc/compaction-worklists.h",
@@ -4754,6 +4913,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
"src/heap/cppgc/default-platform.cc",
+ "src/heap/cppgc/explicit-management.cc",
"src/heap/cppgc/free-list.cc",
"src/heap/cppgc/free-list.h",
"src/heap/cppgc/garbage-collector.h",
@@ -4798,6 +4958,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/name-trait.cc",
"src/heap/cppgc/object-allocator.cc",
"src/heap/cppgc/object-allocator.h",
+ "src/heap/cppgc/object-poisoner.h",
"src/heap/cppgc/object-size-trait.cc",
"src/heap/cppgc/object-start-bitmap.h",
"src/heap/cppgc/page-memory.cc",
@@ -4828,15 +4989,8 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/write-barrier.h",
]
- if (cppgc_is_standalone) {
- sources += [ "//base/trace_event/common/trace_event_common.h" ]
- } else {
- deps = [ ":v8_tracing" ]
- }
-
if (cppgc_enable_caged_heap) {
sources += [
- "include/cppgc/internal/caged-heap-local-data.h",
"src/heap/cppgc/caged-heap-local-data.cc",
"src/heap/cppgc/caged-heap.cc",
"src/heap/cppgc/caged-heap.h",
@@ -4849,11 +5003,17 @@ v8_source_set("cppgc_base") {
]
public_deps = [
- ":v8_config_headers",
+ ":cppgc_headers",
":v8_cppgc_shared",
":v8_libbase",
":v8_libplatform",
]
+
+ if (cppgc_is_standalone && !v8_use_perfetto) {
+ sources += [ "//base/trace_event/common/trace_event_common.h" ]
+ } else {
+ public_deps += [ ":v8_tracing" ]
+ }
}
v8_source_set("cppgc_base_for_testing") {
@@ -4887,7 +5047,6 @@ if (v8_monolithic) {
":v8",
":v8_libbase",
":v8_libplatform",
- ":v8_libsampler",
"//build/win:default_exe_manifest",
]
@@ -4895,28 +5054,29 @@ if (v8_monolithic) {
}
}
-v8_static_library("wee8") {
- deps = [
- ":v8_base",
- ":v8_libbase",
- ":v8_libplatform",
- ":v8_libsampler",
- ":v8_shared_internal_headers",
- ":v8_snapshot",
- "//build/win:default_exe_manifest",
- ]
+if (v8_enable_webassembly) {
+ v8_static_library("wee8") {
+ deps = [
+ ":v8_base",
+ ":v8_libbase",
+ ":v8_libplatform",
+ ":v8_shared_internal_headers",
+ ":v8_snapshot",
+ "//build/win:default_exe_manifest",
+ ]
- # TODO: v8dll-main.cc equivalent for shared library builds
+ # TODO: v8dll-main.cc equivalent for shared library builds
- configs = [ ":internal_config" ]
+ configs = [ ":internal_config" ]
- sources = [
- ### gcmole(all) ###
- "src/wasm/c-api.cc",
- "src/wasm/c-api.h",
- "third_party/wasm-api/wasm.h",
- "third_party/wasm-api/wasm.hh",
- ]
+ sources = [
+ ### gcmole(all) ###
+ "src/wasm/c-api.cc",
+ "src/wasm/c-api.h",
+ "third_party/wasm-api/wasm.h",
+ "third_party/wasm-api/wasm.hh",
+ ]
+ }
}
###############################################################################
@@ -4985,7 +5145,6 @@ if (current_toolchain == v8_snapshot_toolchain) {
":v8_maybe_icu",
":v8_shared_internal_headers",
":v8_tracing",
- ":v8_wrappers",
"//build/win:default_exe_manifest",
]
}
@@ -5169,7 +5328,7 @@ if (is_fuchsia && !build_with_chromium) {
cr_fuchsia_package("d8_fuchsia_pkg") {
testonly = true
binary = ":d8"
- manifest = "//build/config/fuchsia/tests-with-exec.cmx"
+ manifest = "gni/v8.cmx"
package_name_override = "d8"
}
@@ -5185,15 +5344,20 @@ group("v8_fuzzers") {
data_deps = [
":v8_simple_inspector_fuzzer",
":v8_simple_json_fuzzer",
- ":v8_simple_multi_return_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_builtins_fuzzer",
":v8_simple_regexp_fuzzer",
- ":v8_simple_wasm_async_fuzzer",
- ":v8_simple_wasm_code_fuzzer",
- ":v8_simple_wasm_compile_fuzzer",
- ":v8_simple_wasm_fuzzer",
]
+
+ if (v8_enable_webassembly) {
+ data_deps += [
+ ":v8_simple_multi_return_fuzzer",
+ ":v8_simple_wasm_async_fuzzer",
+ ":v8_simple_wasm_code_fuzzer",
+ ":v8_simple_wasm_compile_fuzzer",
+ ":v8_simple_wasm_fuzzer",
+ ]
+ }
}
if (is_component_build) {
@@ -5329,6 +5493,7 @@ v8_executable("d8") {
"src/d8/d8-js.cc",
"src/d8/d8-platforms.cc",
"src/d8/d8-platforms.h",
+ "src/d8/d8-test.cc",
"src/d8/d8.cc",
"src/d8/d8.h",
]
@@ -5353,7 +5518,6 @@ v8_executable("d8") {
":v8_libbase",
":v8_libplatform",
":v8_tracing",
- ":v8_wrappers",
"//build/win:default_exe_manifest",
]
@@ -5483,30 +5647,10 @@ v8_source_set("json_fuzzer") {
v8_fuzzer("json_fuzzer") {
}
-v8_source_set("multi_return_fuzzer") {
- sources = [ "test/fuzzer/multi-return.cc" ]
-
- deps = [
- ":fuzzer_support",
- ":v8_wrappers",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("multi_return_fuzzer") {
-}
-
v8_source_set("parser_fuzzer") {
sources = [ "test/fuzzer/parser.cc" ]
- deps = [
- ":fuzzer_support",
- ":v8_wrappers",
- ]
+ deps = [ ":fuzzer_support" ]
configs = [
":external_config",
@@ -5523,10 +5667,7 @@ v8_source_set("regexp_builtins_fuzzer") {
"test/fuzzer/regexp_builtins/mjsunit.js.h",
]
- deps = [
- ":fuzzer_support",
- ":v8_wrappers",
- ]
+ deps = [ ":fuzzer_support" ]
configs = [
":external_config",
@@ -5540,10 +5681,7 @@ v8_fuzzer("regexp_builtins_fuzzer") {
v8_source_set("regexp_fuzzer") {
sources = [ "test/fuzzer/regexp.cc" ]
- deps = [
- ":fuzzer_support",
- ":v8_wrappers",
- ]
+ deps = [ ":fuzzer_support" ]
configs = [
":external_config",
@@ -5554,131 +5692,148 @@ v8_source_set("regexp_fuzzer") {
v8_fuzzer("regexp_fuzzer") {
}
-v8_source_set("wasm_test_common") {
- sources = [
- "test/common/wasm/wasm-interpreter.cc",
- "test/common/wasm/wasm-interpreter.h",
- "test/common/wasm/wasm-module-runner.cc",
- "test/common/wasm/wasm-module-runner.h",
- ]
+if (v8_enable_webassembly) {
+ v8_source_set("multi_return_fuzzer") {
+ sources = [ "test/fuzzer/multi-return.cc" ]
- deps = [
- ":generate_bytecode_builtins_list",
- ":run_torque",
- ":v8_libbase",
- ":v8_shared_internal_headers",
- ":v8_tracing",
- ]
+ deps = [ ":fuzzer_support" ]
- public_deps = [ ":v8_maybe_icu" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ v8_fuzzer("multi_return_fuzzer") {
+ }
-v8_source_set("wasm_fuzzer") {
- sources = [ "test/fuzzer/wasm.cc" ]
+ v8_source_set("wasm_test_common") {
+ sources = [
+ "test/common/flag-utils.h",
+ "test/common/wasm/flag-utils.h",
+ "test/common/wasm/wasm-interpreter.cc",
+ "test/common/wasm/wasm-interpreter.h",
+ "test/common/wasm/wasm-module-runner.cc",
+ "test/common/wasm/wasm-module-runner.h",
+ ]
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":v8_wrappers",
- ":wasm_test_common",
- ]
+ deps = [
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ":v8_internal_headers",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
+ ":v8_tracing",
+ ]
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ public_deps = [ ":v8_maybe_icu" ]
-v8_fuzzer("wasm_fuzzer") {
-}
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
-v8_source_set("wasm_async_fuzzer") {
- sources = [ "test/fuzzer/wasm-async.cc" ]
+ v8_source_set("wasm_fuzzer") {
+ sources = [ "test/fuzzer/wasm.cc" ]
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":v8_wrappers",
- ":wasm_test_common",
- ]
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
+ ":wasm_test_common",
+ ]
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
-v8_fuzzer("wasm_async_fuzzer") {
-}
+ v8_fuzzer("wasm_fuzzer") {
+ }
-v8_source_set("wasm_code_fuzzer") {
- sources = [
- "test/common/wasm/test-signatures.h",
- "test/fuzzer/wasm-code.cc",
- ]
+ v8_source_set("wasm_async_fuzzer") {
+ sources = [ "test/fuzzer/wasm-async.cc" ]
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":v8_wrappers",
- ":wasm_test_common",
- ]
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
+ ":wasm_test_common",
+ ]
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
-v8_fuzzer("wasm_code_fuzzer") {
-}
+ v8_fuzzer("wasm_async_fuzzer") {
+ }
-v8_source_set("lib_wasm_fuzzer_common") {
- sources = [
- "test/fuzzer/wasm-fuzzer-common.cc",
- "test/fuzzer/wasm-fuzzer-common.h",
- ]
+ v8_source_set("wasm_code_fuzzer") {
+ sources = [
+ "test/common/wasm/test-signatures.h",
+ "test/fuzzer/wasm-code.cc",
+ ]
- deps = [
- ":generate_bytecode_builtins_list",
- ":run_torque",
- ":v8_tracing",
- ":wasm_test_common",
- ]
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
+ ":wasm_test_common",
+ ]
- public_deps = [ ":v8_maybe_icu" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ v8_fuzzer("wasm_code_fuzzer") {
+ }
-v8_source_set("wasm_compile_fuzzer") {
- sources = [
- "test/common/wasm/test-signatures.h",
- "test/fuzzer/wasm-compile.cc",
- ]
+ v8_source_set("lib_wasm_fuzzer_common") {
+ sources = [
+ "test/fuzzer/wasm-fuzzer-common.cc",
+ "test/fuzzer/wasm-fuzzer-common.h",
+ ]
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":v8_wrappers",
- ":wasm_test_common",
- ]
+ deps = [
+ ":fuzzer_support",
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ":v8_internal_headers",
+ ":v8_tracing",
+ ":wasm_test_common",
+ ]
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
+ public_deps = [ ":v8_maybe_icu" ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
+
+ v8_source_set("wasm_compile_fuzzer") {
+ sources = [
+ "test/common/wasm/test-signatures.h",
+ "test/fuzzer/wasm-compile.cc",
+ ]
-v8_fuzzer("wasm_compile_fuzzer") {
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
+ ":wasm_test_common",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+ }
+
+ v8_fuzzer("wasm_compile_fuzzer") {
+ }
}
v8_source_set("inspector_fuzzer") {
@@ -5686,7 +5841,6 @@ v8_source_set("inspector_fuzzer") {
deps = [
":fuzzer_support",
- ":v8_wrappers",
"test/inspector:inspector_test",
]
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index a6aff240988..8072df037bf 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -1,7 +1,6 @@
adamk@chromium.org
ahaas@chromium.org
bbudge@chromium.org
-binji@chromium.org
bikineev@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
@@ -15,11 +14,11 @@ gsathya@chromium.org
hablich@chromium.org
hpayer@chromium.org
ishell@chromium.org
-jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
machenbach@chromium.org
+manoskouk@chromium.org
mathias@chromium.org
marja@chromium.org
mlippautz@chromium.org
@@ -27,8 +26,9 @@ mslekova@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
+nicohartmann@chromium.org
omerkatz@chromium.org
-petermarshall@chromium.org
+pthier@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
solanes@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 48ddbad6af9..b27a4e8e8fa 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -47,10 +47,10 @@ vars = {
'checkout_google_benchmark' : False,
# GN CIPD package version.
- 'gn_version': 'git_revision:dfcbc6fed0a8352696f92d67ccad54048ad182b3',
+ 'gn_version': 'git_revision:dba01723a441c358d843a575cb7720d54ddcdf92',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:fd10124659e991321df2f8a5d3749687b54ceb0a',
+ 'luci_go': 'git_revision:d6d24b11ecded4d89f3dfd1b2e5a0072a3d4ab15',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -88,15 +88,15 @@ vars = {
deps = {
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '446bf3e5a00bfe4fd99d91cb76ec3b3a7b34d226',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '77edba11e25386aa719d4f08c3ce2d8c4f868c15',
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5fe664f150beaf71104ce7787560fabdb55ebf5b',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '98a52e2e312dd10d7fcf281e322039a6b706b86b',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'e05b663d1c50b4e9ecc3ff9325f5158f1d071471',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '81d656878ec611cb0b42d52c82e9dae93920d9ba',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0964a78c832d1d0f2669b020b073c38f67509cf2',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '084aee04777db574038af9e9d33ca5caed577462',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '4c78ef9c38b683c5c5cbac70445378c2362cebfc',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5dbd89c9d9c0b0ff47cefdc2bc421b8c9a1c5a21',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -122,9 +122,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '8fa87946779682841e21e2da977eccfb6cb3bded',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '196ba1aaa8ac285d94f4ea8d9836390a45360533',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'd0f33885a2ffa7d5af74af6065b60eb48e3c70f5',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a2cc4f8c554dedcb0c64cac5511b19c43f1f3d32',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '08f35c8514a74817103121def05351186830d4b7',
'buildtools/win': {
'packages': [
{
@@ -136,7 +136,7 @@ deps = {
'condition': 'host_os == "win"',
},
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7af6071eddf11ad91fbd5df54138f9d3c6d980d5',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cab90cbdaaf4444d67aef6ce3cef09fc5fdeb560',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7',
'condition': 'checkout_android',
@@ -184,7 +184,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '81c9d30d7f1b3c1ab0f1856761f738cc81741322',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '41a5e5e465ad93d6e08224613d3544334a6278bc',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -196,7 +196,7 @@ deps = {
'condition': 'checkout_fuchsia',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '1e315c5b1a62707fac9b8f1d4e03180ee7507f98',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '07f4869221012b16b7f9ee685d94856e1fc9f361',
'third_party/google_benchmark/src': {
'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7f27afe83b82f3a98baf58ef595814b9d42a5b2b',
'condition': 'checkout_google_benchmark',
@@ -212,7 +212,7 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f6034ebe9fb92d4d3dea644b9225bdc18b44a7ab',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '31126581e7290f9233c29cefd93f66c6ac78f1c9',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/qemu-linux-x64': {
@@ -239,7 +239,7 @@ deps = {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'qI8e328VwkWv64EapCvG3Xj9_hDpKQFuJWeVdUHz7W0C'
+ 'version': 'SeLS6a0f6IL-PCOUKbMTN5LYgjjJbDSnb3DGf5q9pwsC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -256,7 +256,7 @@ deps = {
'dep_type': 'cipd',
},
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'cfd0f628093b7382ac054fb33e23fa9d9a278bc3',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a387faa2a6741f565e45d78804a49a0e55de5909',
'tools/luci-go': {
'packages': [
{
@@ -290,7 +290,7 @@ deps = {
'third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '348acca950b1d6de784a954f4fda0952046c652c',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '09490503d0f201b81e03f5ca0ab8ba8ee76d4a8e',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/ittapi': {
@@ -300,7 +300,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/requests': {
- 'url': Var('chromium_url') + '/external/github.com/kennethreitz/requests.git' + '@' + 'bfb93d4b7d269a8735f1b216093e7e9a9fdc4517',
+ 'url': Var('chromium_url') + '/external/github.com/kennethreitz/requests.git' + '@' + '2c2138e811487b13020eb331482fb991fd399d4e',
'condition': 'checkout_android',
},
}
@@ -476,11 +476,23 @@ hooks = [
],
},
{
+ # Case-insensitivity for the Win SDK. Must run before win_toolchain below.
+ 'name': 'ciopfs_linux',
+ 'pattern': '.',
+ 'condition': 'checkout_win and host_os == "linux"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-browser-clang/ciopfs',
+ '-s', 'build/ciopfs.sha1',
+ ]
+ },
+ {
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'pattern': '.',
'condition': 'checkout_win',
- 'action': ['python', 'build/vs_toolchain.py', 'update'],
+ 'action': ['python', 'build/vs_toolchain.py', 'update', '--force'],
},
{
# Update the Mac toolchain if necessary.
diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS
index 6b189307ad7..173f6d6aeee 100644
--- a/deps/v8/ENG_REVIEW_OWNERS
+++ b/deps/v8/ENG_REVIEW_OWNERS
@@ -6,4 +6,3 @@ adamk@chromium.org
danno@chromium.org
hpayer@chromium.org
rmcilroy@chromium.org
-yangguo@chromium.org
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 9b6783bb353..dcbb09bb663 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -264,8 +264,10 @@ bool BASE_EXPORT ConvertThreadId(const ::base::PlatformThreadId& thread,
} // namespace legacy
template <>
-BASE_EXPORT TraceTimestamp
-ConvertTimestampToTraceTimeNs(const ::base::TimeTicks& ticks);
+struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
+ static TraceTimestamp ConvertTimestampToTraceTimeNs(
+ const ::base::TimeTicks& ticks);
+};
} // namespace perfetto
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index 53963a048bf..e855b88e430 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -60,6 +60,10 @@ if (v8_snapshot_toolchain == "") {
# binaries built for the same OS, so build the snapshot with the current
# toolchain here, too.
v8_snapshot_toolchain = current_toolchain
+ } else if (current_os == host_os && host_cpu == "arm64" &&
+ current_cpu == "arm") {
+ # Trying to compile 32-bit arm on arm64. Good luck!
+ v8_snapshot_toolchain = current_toolchain
} else if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
# We don't support snapshot generation for big-endian targets,
diff --git a/deps/v8/gni/v8.cmx b/deps/v8/gni/v8.cmx
new file mode 100644
index 00000000000..8cd8b75fdfe
--- /dev/null
+++ b/deps/v8/gni/v8.cmx
@@ -0,0 +1,44 @@
+{
+ "sandbox": {
+ "dev": [
+ "null",
+ "zero"
+ ],
+ "features": [
+ "deprecated-ambient-replace-as-executable",
+ "isolated-cache-storage",
+ "isolated-persistent-storage",
+ "isolated-temp",
+ "root-ssl-certificates",
+ "vulkan"
+ ],
+ "services": [
+ "fuchsia.accessibility.semantics.SemanticsManager",
+ "fuchsia.camera3.DeviceWatcher",
+ "fuchsia.device.NameProvider",
+ "fuchsia.fonts.Provider",
+ "fuchsia.intl.PropertyProvider",
+ "fuchsia.logger.Log",
+ "fuchsia.logger.LogSink",
+ "fuchsia.media.Audio",
+ "fuchsia.media.SessionAudioConsumerFactory",
+ "fuchsia.media.drm.Widevine",
+ "fuchsia.mediacodec.CodecFactory",
+ "fuchsia.memorypressure.Provider",
+ "fuchsia.net.NameLookup",
+ "fuchsia.net.interfaces.State",
+ "fuchsia.posix.socket.Provider",
+ "fuchsia.process.Launcher",
+ "fuchsia.sys.Environment",
+ "fuchsia.sys.Launcher",
+ "fuchsia.sys.Loader",
+ "fuchsia.sysmem.Allocator",
+ "fuchsia.ui.input.ImeService",
+ "fuchsia.ui.input.ImeVisibilityService",
+ "fuchsia.ui.scenic.Scenic",
+ "fuchsia.ui.policy.Presenter",
+ "fuchsia.vulkan.loader.Loader",
+ "fuchsia.web.ContextProvider"
+ ]
+ }
+}
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index cd5fd0535e4..7d538da1aa6 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -1,6 +1,6 @@
adamk@chromium.org
cbruni@chromium.org
-danno@chromium.org
+leszeks@chromium.org
mlippautz@chromium.org
ulan@chromium.org
verwaest@chromium.org
@@ -8,15 +8,9 @@ yangguo@chromium.org
per-file *DEPS=file:../COMMON_OWNERS
per-file v8-internal.h=file:../COMMON_OWNERS
-per-file v8-inspector.h=dgozman@chromium.org
-per-file v8-inspector.h=pfeldman@chromium.org
-per-file v8-inspector.h=kozyatinskiy@chromium.org
-per-file v8-inspector-protocol.h=dgozman@chromium.org
-per-file v8-inspector-protocol.h=pfeldman@chromium.org
-per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org
-per-file js_protocol.pdl=dgozman@chromium.org
-per-file js_protocol.pdl=pfeldman@chromium.org
-per-file js_protocol.pdl=bmeurer@chromium.org
+per-file v8-inspector.h=file:../src/inspector/OWNERS
+per-file v8-inspector-protocol.h=file:../src/inspector/OWNERS
+per-file js_protocol.pdl=file:../src/inspector/OWNERS
# For branch updates:
per-file v8-version.h=file:../INFRA_OWNERS
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index b6f9d3902ba..f4f0e72bd51 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -43,6 +43,28 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal {
std::memory_order_release);
}
+ template <typename U, typename CustomSpace>
+ struct SpacePolicy {
+ static void* Allocate(AllocationHandle& handle, size_t size) {
+ // Custom space.
+ static_assert(std::is_base_of<CustomSpaceBase, CustomSpace>::value,
+ "Custom space must inherit from CustomSpaceBase.");
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, internal::GCInfoTrait<U>::Index(),
+ CustomSpace::kSpaceIndex);
+ }
+ };
+
+ template <typename U>
+ struct SpacePolicy<U, void> {
+ static void* Allocate(AllocationHandle& handle, size_t size) {
+ // Default space.
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, internal::GCInfoTrait<U>::Index());
+ }
+ };
+
+ private:
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
GCInfoIndex index);
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
@@ -71,27 +93,6 @@ class MakeGarbageCollectedTraitBase
internal::api_constants::kLargeObjectSizeThreshold,
"GarbageCollectedMixin may not be a large object");
- template <typename U, typename CustomSpace>
- struct SpacePolicy {
- static void* Allocate(AllocationHandle& handle, size_t size) {
- // Custom space.
- static_assert(std::is_base_of<CustomSpaceBase, CustomSpace>::value,
- "Custom space must inherit from CustomSpaceBase.");
- return internal::MakeGarbageCollectedTraitInternal::Allocate(
- handle, size, internal::GCInfoTrait<T>::Index(),
- CustomSpace::kSpaceIndex);
- }
- };
-
- template <typename U>
- struct SpacePolicy<U, void> {
- static void* Allocate(AllocationHandle& handle, size_t size) {
- // Default space.
- return internal::MakeGarbageCollectedTraitInternal::Allocate(
- handle, size, internal::GCInfoTrait<T>::Index());
- }
- };
-
protected:
/**
* Allocates memory for an object of type T.
@@ -101,9 +102,11 @@ class MakeGarbageCollectedTraitBase
* \param size The size that should be reserved for the object.
* \returns the memory to construct an object of type T on.
*/
- static void* Allocate(AllocationHandle& handle, size_t size) {
- return SpacePolicy<T, typename SpaceTrait<T>::Space>::Allocate(handle,
- size);
+ V8_INLINE static void* Allocate(AllocationHandle& handle, size_t size) {
+ return SpacePolicy<
+ typename internal::GCInfoFolding<
+ T, typename T::ParentMostGarbageCollectedType>::ResultType,
+ typename SpaceTrait<T>::Space>::Allocate(handle, size);
}
/**
@@ -112,7 +115,7 @@ class MakeGarbageCollectedTraitBase
*
* \param payload The base pointer the object is allocated at.
*/
- static void MarkObjectAsFullyConstructed(const void* payload) {
+ V8_INLINE static void MarkObjectAsFullyConstructed(const void* payload) {
internal::MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
payload);
}
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
index 1f509d4b007..9cfcd23fdf8 100644
--- a/deps/v8/include/cppgc/cross-thread-persistent.h
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -44,7 +44,26 @@ class BasicCrossThreadPersistent final : public PersistentBase,
T* raw, const SourceLocation& loc = SourceLocation::Current())
: PersistentBase(raw), LocationPolicy(loc) {
if (!IsValid(raw)) return;
- PersistentRegion& region = this->GetPersistentRegion(raw);
+ PersistentRegionLock guard;
+ CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
+ SetNode(region.AllocateNode(this, &Trace));
+ this->CheckPointer(raw);
+ }
+
+ class UnsafeCtorTag {
+ private:
+ UnsafeCtorTag() = default;
+ template <typename U, typename OtherWeaknessPolicy,
+ typename OtherLocationPolicy, typename OtherCheckingPolicy>
+ friend class BasicCrossThreadPersistent;
+ };
+
+ BasicCrossThreadPersistent( // NOLINT
+ UnsafeCtorTag, T* raw,
+ const SourceLocation& loc = SourceLocation::Current())
+ : PersistentBase(raw), LocationPolicy(loc) {
+ if (!IsValid(raw)) return;
+ CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
SetNode(region.AllocateNode(this, &Trace));
this->CheckPointer(raw);
}
@@ -173,9 +192,17 @@ class BasicCrossThreadPersistent final : public PersistentBase,
const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegionLock guard;
- PersistentRegion& region = this->GetPersistentRegion(old_value);
- region.FreeNode(GetNode());
- SetNode(nullptr);
+ old_value = GetValue();
+ // The fast path check (IsValid()) does not acquire the lock. Reload
+ // the value to ensure the reference has not been cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
+ }
}
SetValue(nullptr);
}
@@ -225,9 +252,12 @@ class BasicCrossThreadPersistent final : public PersistentBase,
BasicCrossThreadPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
OtherCheckingPolicy>
To() const {
+ using OtherBasicCrossThreadPersistent =
+ BasicCrossThreadPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>;
PersistentRegionLock guard;
- return BasicCrossThreadPersistent<U, OtherWeaknessPolicy,
- OtherLocationPolicy, OtherCheckingPolicy>(
+ return OtherBasicCrossThreadPersistent(
+ typename OtherBasicCrossThreadPersistent::UnsafeCtorTag(),
static_cast<U*>(Get()));
}
@@ -254,14 +284,22 @@ class BasicCrossThreadPersistent final : public PersistentBase,
const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegionLock guard;
- PersistentRegion& region = this->GetPersistentRegion(old_value);
- if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
- SetValue(ptr);
- this->CheckPointer(ptr);
- return;
+ old_value = GetValue();
+ // The fast path check (IsValid()) does not acquire the lock. Reload
+ // the value to ensure the reference has not been cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
+ SetValue(ptr);
+ this->CheckPointer(ptr);
+ return;
+ }
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
}
- region.FreeNode(GetNode());
- SetNode(nullptr);
}
SetValue(ptr);
if (!IsValid(ptr)) return;
@@ -274,7 +312,8 @@ class BasicCrossThreadPersistent final : public PersistentBase,
PersistentRegionLock::AssertLocked();
const void* old_value = GetValue();
if (IsValid(old_value)) {
- PersistentRegion& region = this->GetPersistentRegion(old_value);
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
SetValue(ptr);
this->CheckPointer(ptr);
diff --git a/deps/v8/include/cppgc/explicit-management.h b/deps/v8/include/cppgc/explicit-management.h
new file mode 100644
index 00000000000..8fb321c08ca
--- /dev/null
+++ b/deps/v8/include/cppgc/explicit-management.h
@@ -0,0 +1,73 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
+#define INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
+
+#include <cstddef>
+
+#include "cppgc/allocation.h"
+#include "cppgc/internal/logging.h"
+#include "cppgc/type-traits.h"
+
+namespace cppgc {
+namespace internal {
+
+V8_EXPORT void FreeUnreferencedObject(void*);
+V8_EXPORT bool Resize(void*, size_t);
+
+} // namespace internal
+
+namespace subtle {
+
+/**
+ * Informs the garbage collector that `object` can be immediately reclaimed. The
+ * destructor may not be invoked immediately but only on next garbage
+ * collection.
+ *
+ * It is up to the embedder to guarantee that no other object holds a reference
+ * to `object` after calling `FreeUnreferencedObject()`. In case such a
+ * reference exists, it's use results in a use-after-free.
+ *
+ * \param object Reference to an object that is of type `GarbageCollected` and
+ * should be immediately reclaimed.
+ */
+template <typename T>
+void FreeUnreferencedObject(T* object) {
+ static_assert(IsGarbageCollectedTypeV<T>,
+ "Object must be of type GarbageCollected.");
+ if (!object) return;
+ internal::FreeUnreferencedObject(object);
+}
+
+/**
+ * Tries to resize `object` of type `T` with additional bytes on top of
+ * sizeof(T). Resizing is only useful with trailing inlined storage, see e.g.
+ * `MakeGarbageCollected(AllocationHandle&, AdditionalBytes)`.
+ *
+ * `Resize()` performs growing or shrinking as needed and may skip the operation
+ * for internal reasons, see return value.
+ *
+ * It is up to the embedder to guarantee that in case of shrinking a larger
+ * object down, the reclaimed area is not used anymore. Any subsequent use
+ * results in a use-after-free.
+ *
+ * \param object Reference to an object that is of type `GarbageCollected` and
+ * should be resized.
+ * \param additional_bytes Bytes in addition to sizeof(T) that the object should
+ * provide.
+ * \returns true when the operation was successful and the result can be relied
+ * on, and false otherwise.
+ */
+template <typename T>
+bool Resize(T& object, AdditionalBytes additional_bytes) {
+ static_assert(IsGarbageCollectedTypeV<T>,
+ "Object must be of type GarbageCollected.");
+ return internal::Resize(&object, sizeof(T) + additional_bytes.value);
+}
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h
index d28a39074ae..a3839e1baa5 100644
--- a/deps/v8/include/cppgc/garbage-collected.h
+++ b/deps/v8/include/cppgc/garbage-collected.h
@@ -73,10 +73,11 @@ class GarbageCollectedBase {
* };
* \endcode
*/
-template <typename>
+template <typename T>
class GarbageCollected : public internal::GarbageCollectedBase {
public:
using IsGarbageCollectedTypeMarker = void;
+ using ParentMostGarbageCollectedType = T;
protected:
GarbageCollected() = default;
diff --git a/deps/v8/include/cppgc/heap-state.h b/deps/v8/include/cppgc/heap-state.h
index 0157282a560..3fd6b54a8a2 100644
--- a/deps/v8/include/cppgc/heap-state.h
+++ b/deps/v8/include/cppgc/heap-state.h
@@ -49,6 +49,17 @@ class V8_EXPORT HeapState final {
*/
static bool IsInAtomicPause(const HeapHandle& heap_handle);
+ /**
+ * Returns whether the last garbage collection was finalized conservatively
+ * (i.e., with a non-empty stack). This API is experimental and is expected to
+ * be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the last garbage collection was finalized conservatively,
+ * and false otherwise.
+ */
+ static bool PreviousGCWasConservative(const HeapHandle& heap_handle);
+
private:
HeapState() = delete;
};
diff --git a/deps/v8/include/cppgc/internal/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h
index 9c26d6aa5b4..b9074b1ad5d 100644
--- a/deps/v8/include/cppgc/internal/gc-info.h
+++ b/deps/v8/include/cppgc/internal/gc-info.h
@@ -5,7 +5,8 @@
#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
-#include <stdint.h>
+#include <atomic>
+#include <cstdint>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/name-trait.h"
@@ -17,27 +18,54 @@ namespace internal {
using GCInfoIndex = uint16_t;
-class V8_EXPORT RegisteredGCInfoIndex final {
- public:
- RegisteredGCInfoIndex(FinalizationCallback finalization_callback,
- TraceCallback trace_callback,
- NameCallback name_callback, bool has_v_table);
- GCInfoIndex GetIndex() const { return index_; }
+// Acquires a new GC info object and returns the index. In addition, also
+// updates `registered_index` atomically.
+V8_EXPORT GCInfoIndex
+EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
+ FinalizationCallback, TraceCallback, NameCallback, bool);
- private:
- const GCInfoIndex index_;
+// Fold types based on finalizer behavior. Note that finalizer characteristics
+// align with trace behavior, i.e., destructors are virtual when trace methods
+// are and vice versa.
+template <typename T, typename ParentMostGarbageCollectedType>
+struct GCInfoFolding {
+ static constexpr bool kHasVirtualDestructorAtBase =
+ std::has_virtual_destructor<ParentMostGarbageCollectedType>::value;
+ static constexpr bool kBothTypesAreTriviallyDestructible =
+ std::is_trivially_destructible<ParentMostGarbageCollectedType>::value &&
+ std::is_trivially_destructible<T>::value;
+ static constexpr bool kHasCustomFinalizerDispatchAtBase =
+ internal::HasFinalizeGarbageCollectedObject<
+ ParentMostGarbageCollectedType>::value;
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+ static constexpr bool kWantsDetailedObjectNames = true;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ static constexpr bool kWantsDetailedObjectNames = false;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+ // Folding would regresses name resolution when deriving names from C++
+ // class names as it would just folds a name to the base class name.
+ using ResultType = std::conditional_t<(kHasVirtualDestructorAtBase ||
+ kBothTypesAreTriviallyDestructible ||
+ kHasCustomFinalizerDispatchAtBase) &&
+ !kWantsDetailedObjectNames,
+ ParentMostGarbageCollectedType, T>;
};
// Trait determines how the garbage collector treats objects wrt. to traversing,
// finalization, and naming.
template <typename T>
-struct GCInfoTrait {
+struct GCInfoTrait final {
static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
- static const RegisteredGCInfoIndex registered_index(
- FinalizerTrait<T>::kCallback, TraceTrait<T>::Trace,
- NameTrait<T>::GetName, std::is_polymorphic<T>::value);
- return registered_index.GetIndex();
+ static std::atomic<GCInfoIndex>
+ registered_index; // Uses zero initialization.
+ const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
+ return index ? index
+ : EnsureGCInfoIndex(
+ registered_index, FinalizerTrait<T>::kCallback,
+ TraceTrait<T>::Trace, NameTrait<T>::GetName,
+ std::is_polymorphic<T>::value);
}
};
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
index 6524f326a56..5626b17820b 100644
--- a/deps/v8/include/cppgc/internal/persistent-node.h
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -19,6 +19,8 @@ class Visitor;
namespace internal {
+class CrossThreadPersistentRegion;
+
// PersistentNode represents a variant of two states:
// 1) traceable node with a back pointer to the Persistent object;
// 2) freelist entry.
@@ -30,6 +32,7 @@ class PersistentNode final {
PersistentNode& operator=(const PersistentNode&) = delete;
void InitializeAsUsedNode(void* owner, TraceCallback trace) {
+ CPPGC_DCHECK(trace);
owner_ = owner;
trace_ = trace;
}
@@ -89,12 +92,15 @@ class V8_EXPORT PersistentRegion final {
}
PersistentNode* node = free_list_head_;
free_list_head_ = free_list_head_->FreeListNext();
+ CPPGC_DCHECK(!node->IsUsed());
node->InitializeAsUsedNode(owner, trace);
nodes_in_use_++;
return node;
}
void FreeNode(PersistentNode* node) {
+ CPPGC_DCHECK(node);
+ CPPGC_DCHECK(node->IsUsed());
node->InitializeAsFreeNode(free_list_head_);
free_list_head_ = node;
CPPGC_DCHECK(nodes_in_use_ > 0);
@@ -113,6 +119,8 @@ class V8_EXPORT PersistentRegion final {
std::vector<std::unique_ptr<PersistentNodeSlots>> nodes_;
PersistentNode* free_list_head_ = nullptr;
size_t nodes_in_use_ = 0;
+
+ friend class CrossThreadPersistentRegion;
};
// CrossThreadPersistent uses PersistentRegion but protects it using this lock
@@ -125,6 +133,38 @@ class V8_EXPORT PersistentRegionLock final {
static void AssertLocked();
};
+// Variant of PersistentRegion that checks whether the PersistentRegionLock is
+// locked.
+class V8_EXPORT CrossThreadPersistentRegion final {
+ public:
+ CrossThreadPersistentRegion() = default;
+ // Clears Persistent fields to avoid stale pointers after heap teardown.
+ ~CrossThreadPersistentRegion();
+
+ CrossThreadPersistentRegion(const CrossThreadPersistentRegion&) = delete;
+ CrossThreadPersistentRegion& operator=(const CrossThreadPersistentRegion&) =
+ delete;
+
+ V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
+ PersistentRegionLock::AssertLocked();
+ return persistent_region_.AllocateNode(owner, trace);
+ }
+
+ V8_INLINE void FreeNode(PersistentNode* node) {
+ PersistentRegionLock::AssertLocked();
+ persistent_region_.FreeNode(node);
+ }
+
+ void Trace(Visitor*);
+
+ size_t NodesInUse() const;
+
+ void ClearAllUsedNodes();
+
+ private:
+ PersistentRegion persistent_region_;
+};
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
index ea86a0a7057..ceb002f02d5 100644
--- a/deps/v8/include/cppgc/internal/pointer-policies.h
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -16,6 +16,7 @@ namespace cppgc {
namespace internal {
class PersistentRegion;
+class CrossThreadPersistentRegion;
// Tags to distinguish between strong and weak member types.
class StrongMemberTag;
@@ -115,12 +116,14 @@ struct WeakPersistentPolicy {
struct StrongCrossThreadPersistentPolicy {
using IsStrongPersistent = std::true_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
+ static V8_EXPORT CrossThreadPersistentRegion& GetPersistentRegion(
+ const void* object);
};
struct WeakCrossThreadPersistentPolicy {
using IsStrongPersistent = std::false_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
+ static V8_EXPORT CrossThreadPersistentRegion& GetPersistentRegion(
+ const void* object);
};
// Forward declarations setting up the default policies.
diff --git a/deps/v8/include/cppgc/testing.h b/deps/v8/include/cppgc/testing.h
index f93897a9aaf..229ce140f94 100644
--- a/deps/v8/include/cppgc/testing.h
+++ b/deps/v8/include/cppgc/testing.h
@@ -44,6 +44,55 @@ class V8_EXPORT V8_NODISCARD OverrideEmbedderStackStateScope final {
HeapHandle& heap_handle_;
};
+/**
+ * Testing interface for managed heaps that allows for controlling garbage
+ * collection timings. Embedders should use this class when testing the
+ * interaction of their code with incremental/concurrent garbage collection.
+ */
+class V8_EXPORT StandaloneTestingHeap final {
+ public:
+ explicit StandaloneTestingHeap(HeapHandle&);
+
+ /**
+ * Start an incremental garbage collection.
+ */
+ void StartGarbageCollection();
+
+ /**
+ * Perform an incremental step. This will also schedule concurrent steps if
+ * needed.
+ *
+ * \param stack_state The state of the stack during the step.
+ */
+ bool PerformMarkingStep(EmbedderStackState stack_state);
+
+ /**
+ * Finalize the current garbage collection cycle atomically.
+ * Assumes that garbage collection is in progress.
+ *
+ * \param stack_state The state of the stack for finalizing the garbage
+ * collection cycle.
+ */
+ void FinalizeGarbageCollection(EmbedderStackState stack_state);
+
+ /**
+ * Toggle main thread marking on/off. Allows to stress concurrent marking
+ * (e.g. to better detect data races).
+ *
+ * \param should_mark Denotes whether the main thread should contribute to
+ * marking. Defaults to true.
+ */
+ void ToggleMainThreadMarking(bool should_mark);
+
+ /**
+ * Force enable compaction for the next garbage collection cycle.
+ */
+ void ForceCompactionForNextGarbageCollection();
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
} // namespace testing
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
index 95fd5fc842c..98de9957bd6 100644
--- a/deps/v8/include/cppgc/visitor.h
+++ b/deps/v8/include/cppgc/visitor.h
@@ -158,22 +158,67 @@ class V8_EXPORT Visitor {
}
/**
- * Trace method for ephemerons. Used for tracing raw ephemeron in which the
- * key and value are kept separately.
+ * Trace method for a single ephemeron. Used for tracing a raw ephemeron in
+ * which the `key` and `value` are kept separately.
*
- * \param key WeakMember reference weakly retaining a key object.
- * \param value Member reference weakly retaining a value object.
+ * \param weak_member_key WeakMember reference weakly retaining a key object.
+ * \param member_value Member reference with ephemeron semantics.
*/
- template <typename K, typename V>
- void TraceEphemeron(const WeakMember<K>& key, const V* value) {
- const K* k = key.GetRawAtomic();
- if (!k) return;
- TraceDescriptor value_desc = TraceTrait<V>::GetTraceDescriptor(value);
- // `value` must always be non-null. `value_desc.base_object_payload` may be
- // null in the case that value is not a garbage-collected object but only
- // traceable.
+ template <typename KeyType, typename ValueType>
+ void TraceEphemeron(const WeakMember<KeyType>& weak_member_key,
+ const Member<ValueType>* member_value) {
+ const KeyType* key = weak_member_key.GetRawAtomic();
+ if (!key) return;
+
+ // `value` must always be non-null.
+ CPPGC_DCHECK(member_value);
+ const ValueType* value = member_value->GetRawAtomic();
+ if (!value) return;
+
+ // KeyType and ValueType may refer to GarbageCollectedMixin.
+ TraceDescriptor value_desc =
+ TraceTrait<ValueType>::GetTraceDescriptor(value);
+ CPPGC_DCHECK(value_desc.base_object_payload);
+ const void* key_base_object_payload =
+ TraceTrait<KeyType>::GetTraceDescriptor(key).base_object_payload;
+ CPPGC_DCHECK(key_base_object_payload);
+
+ VisitEphemeron(key_base_object_payload, value, value_desc);
+ }
+
+ /**
+ * Trace method for a single ephemeron. Used for tracing a raw ephemeron in
+ * which the `key` and `value` are kept separately. Note that this overload
+ * is for non-GarbageCollected `value`s that can be traced though.
+ *
+ * \param key `WeakMember` reference weakly retaining a key object.
+ * \param value Reference weakly retaining a value object. Note that
+ * `ValueType` here should not be `Member`. It is expected that
+ * `TraceTrait<ValueType>::GetTraceDescriptor(value)` returns a
+ * `TraceDescriptor` with a null base pointer but a valid trace method.
+ */
+ template <typename KeyType, typename ValueType>
+ void TraceEphemeron(const WeakMember<KeyType>& weak_member_key,
+ const ValueType* value) {
+ static_assert(!IsGarbageCollectedOrMixinTypeV<ValueType>,
+ "garbage-collected types must use WeakMember and Member");
+ const KeyType* key = weak_member_key.GetRawAtomic();
+ if (!key) return;
+
+ // `value` must always be non-null.
CPPGC_DCHECK(value);
- VisitEphemeron(key, value, value_desc);
+ TraceDescriptor value_desc =
+ TraceTrait<ValueType>::GetTraceDescriptor(value);
+ // `value_desc.base_object_payload` must be null as this override is only
+ // taken for non-garbage-collected values.
+ CPPGC_DCHECK(!value_desc.base_object_payload);
+
+ // KeyType might be a GarbageCollectedMixin.
+ const void* key_base_object_payload =
+ TraceTrait<KeyType>::GetTraceDescriptor(key).base_object_payload;
+ CPPGC_DCHECK(key_base_object_payload);
+
+ VisitEphemeron(key_base_object_payload, value, value_desc);
}
/**
@@ -327,14 +372,6 @@ class V8_EXPORT Visitor {
friend class internal::VisitorBase;
};
-template <typename T>
-struct TraceTrait<Member<T>> {
- static TraceDescriptor GetTraceDescriptor(const void* self) {
- return TraceTrait<T>::GetTraceDescriptor(
- static_cast<const Member<T>*>(self)->GetRawAtomic());
- }
-};
-
} // namespace cppgc
#endif // INCLUDE_CPPGC_VISITOR_H_
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 2c22193046e..fba35f71c9a 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -9,6 +9,7 @@
#include <memory>
#include <vector>
+#include "cppgc/common.h"
#include "cppgc/custom-space.h"
#include "cppgc/heap-statistics.h"
#include "cppgc/internal/write-barrier.h"
@@ -118,6 +119,20 @@ class V8_EXPORT CppHeap {
cppgc::HeapStatistics CollectStatistics(
cppgc::HeapStatistics::DetailLevel detail_level);
+ /**
+ * Enables a detached mode that allows testing garbage collection using
+ * `cppgc::testing` APIs. Once used, the heap cannot be attached to an
+ * `Isolate` anymore.
+ */
+ void EnableDetachedGarbageCollectionsForTesting();
+
+ /**
+ * Performs a stop-the-world garbage collection for testing purposes.
+ *
+ * \param stack_state The stack state to assume for the garbage collection.
+ */
+ void CollectGarbageForTesting(cppgc::EmbedderStackState stack_state);
+
private:
CppHeap() = default;
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index ca5fc764a3d..f8b5acb0934 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -187,6 +187,9 @@
#include <stddef.h>
#include <stdint.h>
+#include <tuple>
+#include <type_traits>
+
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -205,39 +208,106 @@ class CTypeInfo {
kV8Value,
};
- // kCallbackOptionsType and kInvalidType are not part of the Type enum
- // because they are only used internally. Use values 255 and 254 that
- // are larger than any valid Type enum.
+ // kCallbackOptionsType is not part of the Type enum
+ // because it is only used internally. Use value 255 that is larger
+ // than any valid Type enum.
static constexpr Type kCallbackOptionsType = Type(255);
- static constexpr Type kInvalidType = Type(254);
- enum class ArgFlags : uint8_t {
+ enum class Flags : uint8_t {
kNone = 0,
};
- explicit constexpr CTypeInfo(Type type, ArgFlags flags = ArgFlags::kNone)
+ explicit constexpr CTypeInfo(Type type, Flags flags = Flags::kNone)
: type_(type), flags_(flags) {}
constexpr Type GetType() const { return type_; }
- constexpr ArgFlags GetFlags() const { return flags_; }
+ constexpr Flags GetFlags() const { return flags_; }
+
+ private:
+ Type type_;
+ Flags flags_;
+};
+
+class V8_EXPORT CFunctionInfo {
+ public:
+ // Construct a struct to hold a CFunction's type information.
+ // |return_info| describes the function's return type.
+ // |arg_info| is an array of |arg_count| CTypeInfos describing the
+ // arguments. Only the last argument may be of the special type
+ // CTypeInfo::kCallbackOptionsType.
+ CFunctionInfo(const CTypeInfo& return_info, unsigned int arg_count,
+ const CTypeInfo* arg_info);
+
+ const CTypeInfo& ReturnInfo() const { return return_info_; }
+
+ // The argument count, not including the v8::FastApiCallbackOptions
+ // if present.
+ unsigned int ArgumentCount() const {
+ return HasOptions() ? arg_count_ - 1 : arg_count_;
+ }
+
+ // |index| must be less than ArgumentCount().
+ // Note: if the last argument passed on construction of CFunctionInfo
+ // has type CTypeInfo::kCallbackOptionsType, it is not included in
+ // ArgumentCount().
+ const CTypeInfo& ArgumentInfo(unsigned int index) const;
- static const CTypeInfo& Invalid() {
- static CTypeInfo invalid = CTypeInfo(kInvalidType);
- return invalid;
+ bool HasOptions() const {
+ // The options arg is always the last one.
+ return arg_count_ > 0 && arg_info_[arg_count_ - 1].GetType() ==
+ CTypeInfo::kCallbackOptionsType;
}
private:
- Type type_;
- ArgFlags flags_;
+ const CTypeInfo return_info_;
+ const unsigned int arg_count_;
+ const CTypeInfo* arg_info_;
};
-class CFunctionInfo {
+class V8_EXPORT CFunction {
public:
- virtual const CTypeInfo& ReturnInfo() const = 0;
- virtual unsigned int ArgumentCount() const = 0;
- virtual const CTypeInfo& ArgumentInfo(unsigned int index) const = 0;
- virtual bool HasOptions() const = 0;
+ constexpr CFunction() : address_(nullptr), type_info_(nullptr) {}
+
+ const CTypeInfo& ReturnInfo() const { return type_info_->ReturnInfo(); }
+
+ const CTypeInfo& ArgumentInfo(unsigned int index) const {
+ return type_info_->ArgumentInfo(index);
+ }
+
+ unsigned int ArgumentCount() const { return type_info_->ArgumentCount(); }
+
+ const void* GetAddress() const { return address_; }
+ const CFunctionInfo* GetTypeInfo() const { return type_info_; }
+
+ template <typename F>
+ static CFunction Make(F* func) {
+ return ArgUnwrap<F*>::Make(func);
+ }
+
+ template <typename F>
+ V8_DEPRECATED("Use CFunctionBuilder instead.")
+ static CFunction MakeWithFallbackSupport(F* func) {
+ return ArgUnwrap<F*>::Make(func);
+ }
+
+ CFunction(const void* address, const CFunctionInfo* type_info);
+
+ private:
+ const void* address_;
+ const CFunctionInfo* type_info_;
+
+ template <typename F>
+ class ArgUnwrap {
+ static_assert(sizeof(F) != sizeof(F),
+ "CFunction must be created from a function pointer.");
+ };
+
+ template <typename R, typename... Args>
+ class ArgUnwrap<R (*)(Args...)> {
+ public:
+ static CFunction Make(R (*func)(Args...));
+ };
};
struct ApiObject {
@@ -272,37 +342,6 @@ struct FastApiCallbackOptions {
namespace internal {
-template <typename T>
-struct GetCType;
-
-#define SPECIALIZE_GET_C_TYPE_FOR(ctype, ctypeinfo) \
- template <> \
- struct GetCType<ctype> { \
- static constexpr CTypeInfo Get() { \
- return CTypeInfo(CTypeInfo::Type::ctypeinfo); \
- } \
- };
-
-#define SUPPORTED_C_TYPES(V) \
- V(void, kVoid) \
- V(bool, kBool) \
- V(int32_t, kInt32) \
- V(uint32_t, kUint32) \
- V(int64_t, kInt64) \
- V(uint64_t, kUint64) \
- V(float, kFloat32) \
- V(double, kFloat64) \
- V(ApiObject, kV8Value)
-
-SUPPORTED_C_TYPES(SPECIALIZE_GET_C_TYPE_FOR)
-
-template <>
-struct GetCType<FastApiCallbackOptions&> {
- static constexpr CTypeInfo Get() {
- return CTypeInfo(CTypeInfo::kCallbackOptionsType);
- }
-};
-
// Helper to count the number of occurances of `T` in `List`
template <typename T, typename... List>
struct count : std::integral_constant<int, 0> {};
@@ -312,108 +351,179 @@ struct count<T, T, Args...>
template <typename T, typename U, typename... Args>
struct count<T, U, Args...> : count<T, Args...> {};
-template <typename R, typename... Args>
+template <typename RetBuilder, typename... ArgBuilders>
class CFunctionInfoImpl : public CFunctionInfo {
- public:
static constexpr int kOptionsArgCount =
- count<FastApiCallbackOptions&, Args...>();
+ count<FastApiCallbackOptions&, ArgBuilders...>();
static constexpr int kReceiverCount = 1;
- CFunctionInfoImpl()
- : return_info_(internal::GetCType<R>::Get()),
- arg_count_(sizeof...(Args) - kOptionsArgCount),
- arg_info_{internal::GetCType<Args>::Get()...} {
- static_assert(kOptionsArgCount == 0 || kOptionsArgCount == 1,
- "Only one options parameter is supported.");
- static_assert(sizeof...(Args) >= kOptionsArgCount + kReceiverCount,
- "The receiver or the fallback argument is missing.");
- constexpr CTypeInfo::Type type = internal::GetCType<R>::Get().GetType();
- static_assert(type == CTypeInfo::Type::kVoid ||
- type == CTypeInfo::Type::kBool ||
- type == CTypeInfo::Type::kInt32 ||
- type == CTypeInfo::Type::kUint32 ||
- type == CTypeInfo::Type::kFloat32 ||
- type == CTypeInfo::Type::kFloat64,
+
+ static_assert(kOptionsArgCount == 0 || kOptionsArgCount == 1,
+ "Only one options parameter is supported.");
+
+ static_assert(sizeof...(ArgBuilders) >= kOptionsArgCount + kReceiverCount,
+ "The receiver or the options argument is missing.");
+
+ public:
+ constexpr CFunctionInfoImpl()
+ : CFunctionInfo(RetBuilder::Build(), sizeof...(ArgBuilders),
+ arg_info_storage_),
+ arg_info_storage_{ArgBuilders::Build()...} {
+ constexpr CTypeInfo::Type kReturnType = RetBuilder::Build().GetType();
+ static_assert(kReturnType == CTypeInfo::Type::kVoid ||
+ kReturnType == CTypeInfo::Type::kBool ||
+ kReturnType == CTypeInfo::Type::kInt32 ||
+ kReturnType == CTypeInfo::Type::kUint32 ||
+ kReturnType == CTypeInfo::Type::kFloat32 ||
+ kReturnType == CTypeInfo::Type::kFloat64,
"64-bit int and api object values are not currently "
"supported return types.");
}
- const CTypeInfo& ReturnInfo() const override { return return_info_; }
- unsigned int ArgumentCount() const override { return arg_count_; }
- const CTypeInfo& ArgumentInfo(unsigned int index) const override {
- if (index >= ArgumentCount()) {
- return CTypeInfo::Invalid();
- }
- return arg_info_[index];
- }
- bool HasOptions() const override { return kOptionsArgCount == 1; }
-
private:
- const CTypeInfo return_info_;
- const unsigned int arg_count_;
- const CTypeInfo arg_info_[sizeof...(Args)];
+ const CTypeInfo arg_info_storage_[sizeof...(ArgBuilders)];
};
-} // namespace internal
+template <typename T>
+struct TypeInfoHelper {
+ static_assert(sizeof(T) != sizeof(T), "This type is not supported");
+};
-class V8_EXPORT CFunction {
- public:
- constexpr CFunction() : address_(nullptr), type_info_(nullptr) {}
+#define SPECIALIZE_GET_TYPE_INFO_HELPER_FOR(T, Enum) \
+ template <> \
+ struct TypeInfoHelper<T> { \
+ static constexpr CTypeInfo::Flags Flags() { \
+ return CTypeInfo::Flags::kNone; \
+ } \
+ \
+ static constexpr CTypeInfo::Type Type() { return CTypeInfo::Type::Enum; } \
+ };
- const CTypeInfo& ReturnInfo() const { return type_info_->ReturnInfo(); }
+#define BASIC_C_TYPES(V) \
+ V(void, kVoid) \
+ V(bool, kBool) \
+ V(int32_t, kInt32) \
+ V(uint32_t, kUint32) \
+ V(int64_t, kInt64) \
+ V(uint64_t, kUint64) \
+ V(float, kFloat32) \
+ V(double, kFloat64) \
+ V(ApiObject, kV8Value)
- const CTypeInfo& ArgumentInfo(unsigned int index) const {
- return type_info_->ArgumentInfo(index);
+BASIC_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR)
+
+#undef BASIC_C_TYPES
+
+template <>
+struct TypeInfoHelper<FastApiCallbackOptions&> {
+ static constexpr CTypeInfo::Flags Flags() { return CTypeInfo::Flags::kNone; }
+
+ static constexpr CTypeInfo::Type Type() {
+ return CTypeInfo::kCallbackOptionsType;
}
+};
- unsigned int ArgumentCount() const { return type_info_->ArgumentCount(); }
+template <typename T, CTypeInfo::Flags... Flags>
+class CTypeInfoBuilder {
+ public:
+ using BaseType = T;
+
+ static constexpr CTypeInfo Build() {
+ // Get the flags and merge in any additional flags.
+ uint8_t flags = uint8_t(TypeInfoHelper<T>::Flags());
+ int unused[] = {0, (flags |= uint8_t(Flags), 0)...};
+ // With C++17, we could use a "..." fold expression over a parameter pack.
+ // Since we're still using C++14, we have to evaluate an OR expresion while
+ // constructing an unused list of 0's. This applies the binary operator
+ // for each value in Flags.
+ (void)unused;
+
+ // Return the same type with the merged flags.
+ return CTypeInfo(TypeInfoHelper<T>::Type(), CTypeInfo::Flags(flags));
+ }
+};
- const void* GetAddress() const { return address_; }
- const CFunctionInfo* GetTypeInfo() const { return type_info_; }
+template <typename RetBuilder, typename... ArgBuilders>
+class CFunctionBuilderWithFunction {
+ public:
+ explicit constexpr CFunctionBuilderWithFunction(const void* fn) : fn_(fn) {}
- template <typename F>
- static CFunction Make(F* func) {
- return ArgUnwrap<F*>::Make(func);
+ template <CTypeInfo::Flags... Flags>
+ constexpr auto Ret() {
+ return CFunctionBuilderWithFunction<
+ CTypeInfoBuilder<typename RetBuilder::BaseType, Flags...>,
+ ArgBuilders...>(fn_);
}
- template <typename F>
- V8_DEPRECATED("Use CFunction::Make instead.")
- static CFunction MakeWithFallbackSupport(F* func) {
- return ArgUnwrap<F*>::Make(func);
+ template <unsigned int N, CTypeInfo::Flags... Flags>
+ constexpr auto Arg() {
+ // Return a copy of the builder with the Nth arg builder merged with
+ // template parameter pack Flags.
+ return ArgImpl<N, Flags...>(
+ std::make_index_sequence<sizeof...(ArgBuilders)>());
}
- template <typename F>
- static CFunction Make(F* func, const CFunctionInfo* type_info) {
- return CFunction(reinterpret_cast<const void*>(func), type_info);
+ auto Build() {
+ static CFunctionInfoImpl<RetBuilder, ArgBuilders...> instance;
+ return CFunction(fn_, &instance);
}
private:
- const void* address_;
- const CFunctionInfo* type_info_;
+ template <bool Merge, unsigned int N, CTypeInfo::Flags... Flags>
+ struct GetArgBuilder;
+
+ // Returns the same ArgBuilder as the one at index N, including its flags.
+ // Flags in the template parameter pack are ignored.
+ template <unsigned int N, CTypeInfo::Flags... Flags>
+ struct GetArgBuilder<false, N, Flags...> {
+ using type =
+ typename std::tuple_element<N, std::tuple<ArgBuilders...>>::type;
+ };
- CFunction(const void* address, const CFunctionInfo* type_info);
+ // Returns an ArgBuilder with the same base type as the one at index N,
+ // but merges the flags with the flags in the template parameter pack.
+ template <unsigned int N, CTypeInfo::Flags... Flags>
+ struct GetArgBuilder<true, N, Flags...> {
+ using type = CTypeInfoBuilder<
+ typename std::tuple_element<N,
+ std::tuple<ArgBuilders...>>::type::BaseType,
+ std::tuple_element<N, std::tuple<ArgBuilders...>>::type::Build()
+ .GetFlags(),
+ Flags...>;
+ };
- template <typename R, typename... Args>
- static CFunctionInfo* GetCFunctionInfo() {
- static internal::CFunctionInfoImpl<R, Args...> instance;
- return &instance;
+ // Return a copy of the CFunctionBuilder, but merges the Flags on ArgBuilder
+ // index N with the new Flags passed in the template parameter pack.
+ template <unsigned int N, CTypeInfo::Flags... Flags, size_t... I>
+ constexpr auto ArgImpl(std::index_sequence<I...>) {
+ return CFunctionBuilderWithFunction<
+ RetBuilder, typename GetArgBuilder<N == I, I, Flags...>::type...>(fn_);
}
- template <typename F>
- class ArgUnwrap {
- static_assert(sizeof(F) != sizeof(F),
- "CFunction must be created from a function pointer.");
- };
+ const void* fn_;
+};
+
+class CFunctionBuilder {
+ public:
+ constexpr CFunctionBuilder() {}
template <typename R, typename... Args>
- class ArgUnwrap<R (*)(Args...)> {
- public:
- static CFunction Make(R (*func)(Args...)) {
- return CFunction(reinterpret_cast<const void*>(func),
- GetCFunctionInfo<R, Args...>());
- }
- };
+ constexpr auto Fn(R (*fn)(Args...)) {
+ return CFunctionBuilderWithFunction<CTypeInfoBuilder<R>,
+ CTypeInfoBuilder<Args>...>(
+ reinterpret_cast<const void*>(fn));
+ }
};
+} // namespace internal
+
+// static
+template <typename R, typename... Args>
+CFunction CFunction::ArgUnwrap<R (*)(Args...)>::Make(R (*func)(Args...)) {
+ return internal::CFunctionBuilder().Fn(func).Build();
+}
+
+using CFunctionBuilder = internal::CFunctionBuilder;
+
} // namespace v8
#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 8abbcfb416b..eb18f76504d 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -358,8 +358,9 @@ class Internals {
internal::Address heap_object_ptr, int offset) {
#ifdef V8_COMPRESS_POINTERS
uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
- internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
- return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
+ internal::Address base =
+ GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
+ return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
#else
return ReadRawField<internal::Address>(heap_object_ptr, offset);
#endif
@@ -411,18 +412,19 @@ class Internals {
#ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
- static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
- static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
+ static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
+ static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
- V8_INLINE static internal::Address GetRootFromOnHeapAddress(
+ V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
internal::Address addr) {
- return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
+ return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
}
V8_INLINE static internal::Address DecompressTaggedAnyField(
internal::Address heap_object_ptr, uint32_t value) {
- internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
- return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
+ internal::Address base =
+ GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
+ return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
}
#endif // V8_COMPRESS_POINTERS
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index e27d26cb692..fc9a357feb6 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -181,9 +181,8 @@ class JobDelegate {
/**
* Returns true if the current task is called from the thread currently
* running JobHandle::Join().
- * TODO(etiennep): Make pure virtual once custom embedders implement it.
*/
- virtual bool IsJoiningThread() const { return false; }
+ virtual bool IsJoiningThread() const = 0;
};
/**
@@ -220,19 +219,14 @@ class JobHandle {
* Forces all existing workers to yield ASAP but doesn’t wait for them.
* Warning, this is dangerous if the Job's callback is bound to or has access
* to state which may be deleted after this call.
- * TODO(etiennep): Cleanup once implemented by all embedders.
*/
- virtual void CancelAndDetach() { Cancel(); }
+ virtual void CancelAndDetach() = 0;
/**
* Returns true if there's any work pending or any worker running.
*/
virtual bool IsActive() = 0;
- // TODO(etiennep): Clean up once all overrides are removed.
- V8_DEPRECATED("Use !IsActive() instead.")
- virtual bool IsCompleted() { return !IsActive(); }
-
/**
* Returns true if associated with a Job and other methods may be called.
* Returns false after Join() or Cancel() was called. This may return true
@@ -240,10 +234,6 @@ class JobHandle {
*/
virtual bool IsValid() = 0;
- // TODO(etiennep): Clean up once all overrides are removed.
- V8_DEPRECATED("Use IsValid() instead.")
- virtual bool IsRunning() { return IsValid(); }
-
/**
* Returns true if job priority can be changed.
*/
@@ -272,10 +262,6 @@ class JobTask {
* it must not call back any JobHandle methods.
*/
virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
-
- // TODO(1114823): Clean up once all overrides are removed.
- V8_DEPRECATED("Use the version that takes |worker_count|.")
- virtual size_t GetMaxConcurrency() const { return 0; }
};
/**
@@ -408,7 +394,6 @@ class PageAllocator {
kNoAccess,
kRead,
kReadWrite,
- // TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute,
// Set this when reserving memory that will later require kReadWriteExecute
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 0e562ccd6f4..747b33f6da1 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 0
-#define V8_BUILD_NUMBER 257
-#define V8_PATCH_LEVEL 25
+#define V8_MINOR_VERSION 1
+#define V8_BUILD_NUMBER 269
+#define V8_PATCH_LEVEL 36
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e4448db1910..6b672ca750c 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1427,9 +1427,7 @@ class ScriptOriginOptions {
*/
class ScriptOrigin {
public:
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
-#endif
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, Local<Integer> resource_line_offset,
Local<Integer> resource_column_offset,
@@ -1440,9 +1438,7 @@ class ScriptOrigin {
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>(),
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON("Use constructor that takes an isolate")
-#endif
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, int resource_line_offset = 0,
int resource_column_offset = 0,
@@ -1495,7 +1491,7 @@ class V8_EXPORT UnboundScript {
*/
Local<Script> BindToCurrentContext();
- int GetId();
+ int GetId() const;
Local<Value> GetScriptName();
/**
@@ -1653,7 +1649,7 @@ class V8_EXPORT Module : public Data {
*/
int GetIdentityHash() const;
- using ResolveCallback =
+ using ResolveCallback V8_DEPRECATE_SOON("Use ResolveModuleCallback") =
MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
Local<Module> referrer);
using ResolveModuleCallback = MaybeLocal<Module> (*)(
@@ -1705,7 +1701,7 @@ class V8_EXPORT Module : public Data {
*
* The module must be a SourceTextModule and must not have a kErrored status.
*/
- int ScriptId();
+ int ScriptId() const;
/**
* Returns whether this module or any of its requested modules is async,
@@ -1940,13 +1936,11 @@ class V8_EXPORT ScriptCompiler {
*/
class V8_EXPORT StreamedSource {
public:
- enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
+ enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATED(
"This class takes ownership of source_stream, so use the constructor "
"taking a unique_ptr to make these semantics clearer")
-#endif
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
@@ -2013,7 +2007,8 @@ class V8_EXPORT ScriptCompiler {
*
* Note that when producing cached data, the source must point to NULL for
* cached data. When consuming cached data, the cached data must have been
- * produced by the same version of V8.
+ * produced by the same version of V8, and the embedder needs to ensure the
+ * cached data is the correct one for the given script.
*
* \param source Script source code.
* \return Compiled script object (context independent; for running it must be
@@ -2173,6 +2168,8 @@ class V8_EXPORT Message {
*/
Isolate* GetIsolate() const;
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
+ Local<Context> context) const;
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
Local<Context> context) const;
@@ -2348,6 +2345,17 @@ class V8_EXPORT StackFrame {
Local<String> GetScriptNameOrSourceURL() const;
/**
+ * Returns the source of the script for the function for this StackFrame.
+ */
+ Local<String> GetScriptSource() const;
+
+ /**
+ * Returns the source mapping URL (if one is present) of the script for
+ * the function for this StackFrame.
+ */
+ Local<String> GetScriptSourceMappingURL() const;
+
+ /**
* Returns the name of the function associated with this stack frame.
*/
Local<String> GetFunctionName() const;
@@ -3254,6 +3262,11 @@ class V8_EXPORT String : public Name {
V8_INLINE static Local<String> Empty(Isolate* isolate);
/**
+ * Returns true if the string is external.
+ */
+ bool IsExternal() const;
+
+ /**
* Returns true if the string is both external and two-byte.
*/
bool IsExternalTwoByte() const;
@@ -3528,12 +3541,12 @@ class V8_EXPORT String : public Name {
/**
* Returns true if this string can be made external.
*/
- bool CanMakeExternal();
+ bool CanMakeExternal() const;
/**
* Returns true if the strings values are equal. Same as JS ==/===.
*/
- bool StringEquals(Local<String> str);
+ bool StringEquals(Local<String> str) const;
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@@ -4143,7 +4156,7 @@ class V8_EXPORT Object : public Value {
Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
/** Gets the number of internal fields for this Object. */
- int InternalFieldCount();
+ int InternalFieldCount() const;
/** Same as above, but works for PersistentBase. */
V8_INLINE static int InternalFieldCount(
@@ -4253,10 +4266,10 @@ class V8_EXPORT Object : public Value {
Local<Context> context, Local<Name> key);
/** Tests for a named lookup interceptor.*/
- bool HasNamedLookupInterceptor();
+ bool HasNamedLookupInterceptor() const;
/** Tests for an index lookup interceptor.*/
- bool HasIndexedLookupInterceptor();
+ bool HasIndexedLookupInterceptor() const;
/**
* Returns the identity hash for this object. The current implementation
@@ -4296,12 +4309,12 @@ class V8_EXPORT Object : public Value {
* ObjectTemplate::SetCallAsFunctionHandler method.
* When an Object is callable this method returns true.
*/
- bool IsCallable();
+ bool IsCallable() const;
/**
* True if this object is a constructor.
*/
- bool IsConstructor();
+ bool IsConstructor() const;
/**
* True if this object can carry information relevant to the embedder in its
@@ -4310,14 +4323,14 @@ class V8_EXPORT Object : public Value {
* V8 automatically adds internal fields at compile time, such as e.g.
* v8::ArrayBuffer.
*/
- bool IsApiWrapper();
+ bool IsApiWrapper() const;
/**
* True if this object was created from an object template which was marked
* as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
* information.
*/
- bool IsUndetectable();
+ bool IsUndetectable() const;
/**
* Call an Object as a function if a callback is set by the
@@ -4376,7 +4389,7 @@ class V8_EXPORT Object : public Value {
*
* See also: v8::ObjectTemplate::SetCodeLike
*/
- bool IsCodeLike(Isolate* isolate);
+ bool IsCodeLike(Isolate* isolate) const;
private:
Object();
@@ -4874,7 +4887,7 @@ class V8_EXPORT Promise : public Object {
* Returns true if the promise has at least one derived promise, and
* therefore resolve/reject handlers (including default handler).
*/
- bool HasHandler();
+ bool HasHandler() const;
/**
* Returns the content of the [[PromiseResult]] field. The Promise must not
@@ -4982,7 +4995,7 @@ class V8_EXPORT Proxy : public Object {
public:
Local<Value> GetTarget();
Local<Value> GetHandler();
- bool IsRevoked();
+ bool IsRevoked() const;
void Revoke();
/**
@@ -5398,57 +5411,6 @@ class V8_EXPORT ArrayBuffer : public Object {
};
/**
- * The contents of an |ArrayBuffer|. Externalization of |ArrayBuffer|
- * returns an instance of this class, populated, with a pointer to data
- * and byte length.
- *
- * The Data pointer of ArrayBuffer::Contents must be freed using the provided
- * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
- * was allocated with ArraryBuffer::Allocator::Allocate.
- */
- class V8_EXPORT Contents { // NOLINT
- public:
- using DeleterCallback = void (*)(void* buffer, size_t length, void* info);
-
- Contents()
- : data_(nullptr),
- byte_length_(0),
- allocation_base_(nullptr),
- allocation_length_(0),
- allocation_mode_(Allocator::AllocationMode::kNormal),
- deleter_(nullptr),
- deleter_data_(nullptr) {}
-
- void* AllocationBase() const { return allocation_base_; }
- size_t AllocationLength() const { return allocation_length_; }
- Allocator::AllocationMode AllocationMode() const {
- return allocation_mode_;
- }
-
- void* Data() const { return data_; }
- size_t ByteLength() const { return byte_length_; }
- DeleterCallback Deleter() const { return deleter_; }
- void* DeleterData() const { return deleter_data_; }
-
- private:
- Contents(void* data, size_t byte_length, void* allocation_base,
- size_t allocation_length,
- Allocator::AllocationMode allocation_mode, DeleterCallback deleter,
- void* deleter_data);
-
- void* data_;
- size_t byte_length_;
- void* allocation_base_;
- size_t allocation_length_;
- Allocator::AllocationMode allocation_mode_;
- DeleterCallback deleter_;
- void* deleter_data_;
-
- friend class ArrayBuffer;
- };
-
-
- /**
* Data length in bytes.
*/
size_t ByteLength() const;
@@ -5462,22 +5424,6 @@ class V8_EXPORT ArrayBuffer : public Object {
static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
/**
- * Create a new ArrayBuffer over an existing memory block.
- * The created array buffer is by default immediately in externalized state.
- * In externalized state, the memory block will not be reclaimed when a
- * created ArrayBuffer is garbage-collected.
- * In internalized state, the memory block will be released using
- * |Allocator::Free| once all ArrayBuffers referencing it are collected by
- * the garbage collector.
- */
- V8_DEPRECATED(
- "Use the version that takes a BackingStore. "
- "See http://crbug.com/v8/9908.")
- static Local<ArrayBuffer> New(
- Isolate* isolate, void* data, size_t byte_length,
- ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
-
- /**
* Create a new ArrayBuffer with an existing backing store.
* The created array keeps a reference to the backing store until the array
* is garbage collected. Note that the IsExternal bit does not affect this
@@ -5516,15 +5462,6 @@ class V8_EXPORT ArrayBuffer : public Object {
void* deleter_data);
/**
- * Returns true if ArrayBuffer is externalized, that is, does not
- * own its memory block.
- */
- V8_DEPRECATED(
- "With v8::BackingStore externalized ArrayBuffers are "
- "the same as ordinary ArrayBuffers. See http://crbug.com/v8/9908.")
- bool IsExternal() const;
-
- /**
* Returns true if this ArrayBuffer may be detached.
*/
bool IsDetachable() const;
@@ -5538,46 +5475,10 @@ class V8_EXPORT ArrayBuffer : public Object {
void Detach();
/**
- * Make this ArrayBuffer external. The pointer to underlying memory block
- * and byte length are returned as |Contents| structure. After ArrayBuffer
- * had been externalized, it does no longer own the memory block. The caller
- * should take steps to free memory when it is no longer needed.
- *
- * The Data pointer of ArrayBuffer::Contents must be freed using the provided
- * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
- * was allocated with ArrayBuffer::Allocator::Allocate.
- */
- V8_DEPRECATED("Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
- Contents Externalize();
-
- /**
- * Marks this ArrayBuffer external given a witness that the embedder
- * has fetched the backing store using the new GetBackingStore() function.
- *
- * With the new lifetime management of backing stores there is no need for
- * externalizing, so this function exists only to make the transition easier.
- */
- V8_DEPRECATED("This will be removed together with IsExternal.")
- void Externalize(const std::shared_ptr<BackingStore>& backing_store);
-
- /**
- * Get a pointer to the ArrayBuffer's underlying memory block without
- * externalizing it. If the ArrayBuffer is not externalized, this pointer
- * will become invalid as soon as the ArrayBuffer gets garbage collected.
- *
- * The embedder should make sure to hold a strong reference to the
- * ArrayBuffer while accessing this pointer.
- */
- V8_DEPRECATED("Use GetBackingStore. See http://crbug.com/v8/9908.")
- Contents GetContents();
-
- /**
* Get a shared pointer to the backing store of this array buffer. This
* pointer coordinates the lifetime management of the internal storage
* with any live ArrayBuffers on the heap, even across isolates. The embedder
* should not attempt to manage lifetime of the storage through other means.
- *
- * This function replaces both Externalize() and GetContents().
*/
std::shared_ptr<BackingStore> GetBackingStore();
@@ -5589,7 +5490,6 @@ class V8_EXPORT ArrayBuffer : public Object {
private:
ArrayBuffer();
static void CheckCast(Value* obj);
- Contents GetContents(bool externalize);
};
@@ -5883,57 +5783,6 @@ class V8_EXPORT DataView : public ArrayBufferView {
class V8_EXPORT SharedArrayBuffer : public Object {
public:
/**
- * The contents of an |SharedArrayBuffer|. Externalization of
- * |SharedArrayBuffer| returns an instance of this class, populated, with a
- * pointer to data and byte length.
- *
- * The Data pointer of ArrayBuffer::Contents must be freed using the provided
- * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
- * was allocated with ArraryBuffer::Allocator::Allocate.
- */
- class V8_EXPORT Contents { // NOLINT
- public:
- using Allocator = v8::ArrayBuffer::Allocator;
- using DeleterCallback = void (*)(void* buffer, size_t length, void* info);
-
- Contents()
- : data_(nullptr),
- byte_length_(0),
- allocation_base_(nullptr),
- allocation_length_(0),
- allocation_mode_(Allocator::AllocationMode::kNormal),
- deleter_(nullptr),
- deleter_data_(nullptr) {}
-
- void* AllocationBase() const { return allocation_base_; }
- size_t AllocationLength() const { return allocation_length_; }
- Allocator::AllocationMode AllocationMode() const {
- return allocation_mode_;
- }
-
- void* Data() const { return data_; }
- size_t ByteLength() const { return byte_length_; }
- DeleterCallback Deleter() const { return deleter_; }
- void* DeleterData() const { return deleter_data_; }
-
- private:
- Contents(void* data, size_t byte_length, void* allocation_base,
- size_t allocation_length,
- Allocator::AllocationMode allocation_mode, DeleterCallback deleter,
- void* deleter_data);
-
- void* data_;
- size_t byte_length_;
- void* allocation_base_;
- size_t allocation_length_;
- Allocator::AllocationMode allocation_mode_;
- DeleterCallback deleter_;
- void* deleter_data_;
-
- friend class SharedArrayBuffer;
- };
-
- /**
* Data length in bytes.
*/
size_t ByteLength() const;
@@ -5947,19 +5796,6 @@ class V8_EXPORT SharedArrayBuffer : public Object {
static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
/**
- * Create a new SharedArrayBuffer over an existing memory block. The created
- * array buffer is immediately in externalized state unless otherwise
- * specified. The memory block will not be reclaimed when a created
- * SharedArrayBuffer is garbage-collected.
- */
- V8_DEPRECATED(
- "Use the version that takes a BackingStore. "
- "See http://crbug.com/v8/9908.")
- static Local<SharedArrayBuffer> New(
- Isolate* isolate, void* data, size_t byte_length,
- ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
-
- /**
* Create a new SharedArrayBuffer with an existing backing store.
* The created array keeps a reference to the backing store until the array
* is garbage collected. Note that the IsExternal bit does not affect this
@@ -5998,72 +5834,10 @@ class V8_EXPORT SharedArrayBuffer : public Object {
void* deleter_data);
/**
- * Create a new SharedArrayBuffer over an existing memory block. Propagate
- * flags to indicate whether the underlying buffer can be grown.
- */
- V8_DEPRECATED(
- "Use the version that takes a BackingStore. "
- "See http://crbug.com/v8/9908.")
- static Local<SharedArrayBuffer> New(
- Isolate* isolate, const SharedArrayBuffer::Contents&,
- ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
-
- /**
- * Returns true if SharedArrayBuffer is externalized, that is, does not
- * own its memory block.
- */
- V8_DEPRECATED(
- "With v8::BackingStore externalized SharedArrayBuffers are the same "
- "as ordinary SharedArrayBuffers. See http://crbug.com/v8/9908.")
- bool IsExternal() const;
-
- /**
- * Make this SharedArrayBuffer external. The pointer to underlying memory
- * block and byte length are returned as |Contents| structure. After
- * SharedArrayBuffer had been externalized, it does no longer own the memory
- * block. The caller should take steps to free memory when it is no longer
- * needed.
- *
- * The memory block is guaranteed to be allocated with |Allocator::Allocate|
- * by the allocator specified in
- * v8::Isolate::CreateParams::array_buffer_allocator.
- *
- */
- V8_DEPRECATED("Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
- Contents Externalize();
-
- /**
- * Marks this SharedArrayBuffer external given a witness that the embedder
- * has fetched the backing store using the new GetBackingStore() function.
- *
- * With the new lifetime management of backing stores there is no need for
- * externalizing, so this function exists only to make the transition easier.
- */
- V8_DEPRECATED("This will be removed together with IsExternal.")
- void Externalize(const std::shared_ptr<BackingStore>& backing_store);
-
- /**
- * Get a pointer to the ArrayBuffer's underlying memory block without
- * externalizing it. If the ArrayBuffer is not externalized, this pointer
- * will become invalid as soon as the ArrayBuffer became garbage collected.
- *
- * The embedder should make sure to hold a strong reference to the
- * ArrayBuffer while accessing this pointer.
- *
- * The memory block is guaranteed to be allocated with |Allocator::Allocate|
- * by the allocator specified in
- * v8::Isolate::CreateParams::array_buffer_allocator.
- */
- V8_DEPRECATED("Use GetBackingStore. See http://crbug.com/v8/9908.")
- Contents GetContents();
-
- /**
* Get a shared pointer to the backing store of this array buffer. This
* pointer coordinates the lifetime management of the internal storage
* with any live ArrayBuffers on the heap, even across isolates. The embedder
* should not attempt to manage lifetime of the storage through other means.
- *
- * This function replaces both Externalize() and GetContents().
*/
std::shared_ptr<BackingStore> GetBackingStore();
@@ -6074,7 +5848,6 @@ class V8_EXPORT SharedArrayBuffer : public Object {
private:
SharedArrayBuffer();
static void CheckCast(Value* obj);
- Contents GetContents(bool externalize);
};
@@ -7174,7 +6947,7 @@ class V8_EXPORT ObjectTemplate : public Template {
* Gets the number of internal fields for objects generated from
* this template.
*/
- int InternalFieldCount();
+ int InternalFieldCount() const;
/**
* Sets the number of internal fields for objects generated from
@@ -7185,7 +6958,7 @@ class V8_EXPORT ObjectTemplate : public Template {
/**
* Returns true if the object will be an immutable prototype exotic object.
*/
- bool IsImmutableProto();
+ bool IsImmutableProto() const;
/**
* Makes the ObjectTemplate for an immutable prototype exotic object, with an
@@ -7203,7 +6976,7 @@ class V8_EXPORT ObjectTemplate : public Template {
* Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
*/
void SetCodeLike();
- bool IsCodeLike();
+ bool IsCodeLike() const;
V8_INLINE static ObjectTemplate* Cast(Data* data);
@@ -7508,7 +7281,8 @@ using CallCompletedCallback = void (*)(Isolate*);
* fails (e.g. due to stack overflow), the embedder must propagate
* that exception by returning an empty MaybeLocal.
*/
-using HostImportModuleDynamicallyCallback =
+using HostImportModuleDynamicallyCallback V8_DEPRECATE_SOON(
+ "Use HostImportModuleDynamicallyWithImportAssertionsCallback instead") =
MaybeLocal<Promise> (*)(Local<Context> context,
Local<ScriptOrModule> referrer,
Local<String> specifier);
@@ -9551,6 +9325,11 @@ class V8_EXPORT Isolate {
void SetRAILMode(RAILMode rail_mode);
/**
+ * Update load start time of the RAIL mode
+ */
+ void UpdateLoadStartTime();
+
+ /**
* Optional notification to tell V8 the current isolate is used for debugging
* and requires higher heap limit.
*/
@@ -9716,6 +9495,13 @@ class V8_EXPORT Isolate {
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
/**
+ * This function can be called by the embedder to signal V8 that the dynamic
+ * enabling of features has finished. V8 can now set up dynamically added
+ * features.
+ */
+ void InstallConditionalFeatures(Local<Context> context);
+
+ /**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
@@ -10756,7 +10542,7 @@ class V8_EXPORT Context : public Data {
* Returns true if code generation from strings is allowed for the context.
* For more details see AllowCodeGenerationFromStrings(bool) documentation.
*/
- bool IsCodeGenerationFromStringsAllowed();
+ bool IsCodeGenerationFromStringsAllowed() const;
/**
* Sets the error description for the exception that is thrown when
@@ -10795,18 +10581,6 @@ class V8_EXPORT Context : public Data {
void SetContinuationPreservedEmbedderData(Local<Value> context);
/**
- * Set or clear hooks to be invoked for promise lifecycle operations.
- * To clear a hook, set it to an empty v8::Function. Each function will
- * receive the observed promise as the first argument. If a chaining
- * operation is used on a promise, the init will additionally receive
- * the parent promise as the second argument.
- */
- void SetPromiseHooks(Local<Function> init_hook,
- Local<Function> before_hook,
- Local<Function> after_hook,
- Local<Function> resolve_hook);
-
- /**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 2e66b1c99ea..c87192896ce 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -87,6 +87,7 @@
'V8 Mac64 GC Stress': 'debug_x64',
'V8 Mac64 ASAN': 'release_x64_asan_no_lsan',
'V8 Mac - arm64 - release builder': 'release_arm64',
+ 'V8 Mac - arm64 - debug builder': 'debug_arm64',
'V8 Mac - arm64 - sim - debug builder': 'debug_simulate_arm64',
'V8 Mac - arm64 - sim - release builder': 'release_simulate_arm64',
# Sanitizers.
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 7617d885de2..fc0d1c55b11 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -201,6 +201,33 @@
],
'shards': 3,
},
+ # No SSSE3.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'nossse3',
+ 'test_args': [
+ '--extra-flags',
+ '--noenable-ssse3 --noenable-sse4-1 --noenable-avx',
+ ],
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'nossse3',
+ 'variant': 'default',
+ 'test_args': [
+ '--extra-flags',
+ '--noenable-ssse3 --noenable-sse4-1 --noenable-avx',
+ ],
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'nossse3',
+ 'test_args': [
+ '--extra-flags',
+ '--noenable-ssse3 --noenable-sse4-1 --noenable-avx',
+ ],
+ 'shards': 3,
+ },
# No SSE4.
{
'name': 'mozilla',
@@ -372,6 +399,8 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
+ # Concurrent inlining.
+ {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
],
},
'v8_linux64_msan_rel_ng_triggered': {
@@ -550,7 +579,7 @@
'os': 'Ubuntu-16.04',
},
'tests': [
- {'name': 'v8testing', 'shards': 12},
+ {'name': 'v8testing', 'variant': 'default', 'shards': 4},
],
},
##############################################################################
@@ -696,7 +725,7 @@
'v8_mac_arm64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'arm64',
- 'os': 'Mac-11.0',
+ 'os': 'Mac-11',
'pool': 'chromium.tests.mac-arm64',
},
'tests': [
@@ -706,7 +735,7 @@
'v8_mac_arm64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'arm64',
- 'os': 'Mac-11.0',
+ 'os': 'Mac-11',
'pool': 'chromium.tests.mac-arm64',
},
'tests': [
@@ -716,7 +745,7 @@
'v8_mac_arm64_full_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'arm64',
- 'os': 'Mac-11.0',
+ 'os': 'Mac-11',
'pool': 'chromium.tests.mac-arm64',
},
'tests': [
@@ -800,6 +829,17 @@
'suffix': 'nosse3',
'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx']
},
+ # Nossse3.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'nossse3',
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'nossse3',
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ },
# Nosse4.
{
'name': 'mozilla',
@@ -893,6 +933,24 @@
'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
'shards': 3
},
+ # Nossse3.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'nossse3',
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'nossse3',
+ 'variant': 'default',
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'nossse3',
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
+ 'shards': 3
+ },
# Nosse4.
{
'name': 'mozilla',
@@ -935,11 +993,6 @@
'swarming_dimensions': {
'os': 'Ubuntu-16.04',
},
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 3600,
- 'priority': 35,
- },
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
],
@@ -1138,6 +1191,8 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
+ # Concurrent inlining.
+ {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
],
},
'V8 Linux64 - debug - perfetto': {
@@ -1167,6 +1222,8 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
+ # Concurrent inlining.
+ {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
],
},
'V8 Linux64 - gcov coverage': {
@@ -1360,7 +1417,7 @@
},
'V8 Mac - arm64 - release': {
'swarming_dimensions' : {
- 'os': 'Mac-11.0',
+ 'os': 'Mac-11',
'cpu': 'arm64',
'pool': 'chromium.tests.mac-arm64',
},
@@ -1374,6 +1431,22 @@
{'name': 'd8testing', 'variant': 'extra'},
],
},
+ 'V8 Mac - arm64 - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Mac-11',
+ 'cpu': 'arm64',
+ 'pool': 'chromium.tests.mac-arm64',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'd8testing', 'shards': 2},
+ {'name': 'd8testing', 'variant': 'extra', 'shards': 2},
+ ],
+ },
'V8 Mac - arm64 - sim - debug': {
'swarming_dimensions' : {
'cpu': 'x86-64',
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 2bdba94b46a..c3394e4b7a8 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -5,6 +5,9 @@ include_rules = [
"+src/asmjs/asm-js.h",
"-src/baseline",
"+src/baseline/baseline.h",
+ "+src/baseline/bytecode-offset-iterator.h",
+ "-src/bigint",
+ "+src/bigint/bigint.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
@@ -36,7 +39,6 @@ include_rules = [
"+src/heap/conservative-stack-visitor.h",
"-src/inspector",
"-src/interpreter",
- "+src/interpreter/bytecode-array-accessor.h",
"+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecode-array-random-iterator.h",
"+src/interpreter/bytecode-decoder.h",
@@ -71,4 +73,7 @@ specific_include_rules = {
"builtins-trace\.cc": [
"+protos/perfetto",
],
+ "system-jit-win\.cc": [
+ "+src/libplatform/tracing/recorder.h",
+ ],
}
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 10c8fb064dd..849364e655c 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -6,6 +6,7 @@
#define V8_API_API_INL_H_
#include "src/api/api.h"
+#include "src/execution/interrupts-scope.h"
#include "src/execution/microtask-queue.h"
#include "src/handles/handles-inl.h"
#include "src/objects/foreign-inl.h"
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index 584d326aaf5..56bf5bd47c4 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -531,6 +531,7 @@ MaybeHandle<JSFunction> InstantiateFunction(
}
return MaybeHandle<JSFunction>();
}
+ data->set_published(true);
return function;
}
@@ -626,6 +627,8 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
+ if (!getter.is_null()) getter->set_published(true);
+ if (!setter.is_null()) setter->set_published(true);
PropertyDetails details(kAccessor, attributes, PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
@@ -701,7 +704,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
Handle<Map> map = isolate->factory()->NewMap(type, instance_size,
TERMINAL_FAST_ELEMENTS_KIND);
- JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.
if (obj->undetectable()) {
@@ -737,6 +739,8 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (immutable_proto) map->set_is_immutable_proto(true);
+ JSFunction::SetInitialMap(isolate, result, map,
+ Handle<JSObject>::cast(prototype));
return result;
}
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index ca73b58a60b..a5c658a7992 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -66,6 +66,7 @@
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-object.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-promise-inl.h"
@@ -109,12 +110,16 @@
#include "src/trap-handler/trap-handler.h"
#include "src/utils/detachable-vector.h"
#include "src/utils/version.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
#include <signal.h>
@@ -125,12 +130,16 @@
#if V8_OS_WIN
#include <versionhelpers.h>
#include <windows.h>
+
#include "include/v8-wasm-trap-handler-win.h"
#include "src/trap-handler/handler-inside-win.h"
#if defined(V8_OS_WIN64)
#include "src/base/platform/wrappers.h"
#include "src/diagnostics/unwinding-info-win64.h"
#endif // V8_OS_WIN64
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#include "src/diagnostics/system-jit-win.h"
+#endif
#endif // V8_OS_WIN
// Has to be the last include (doesn't have include guards):
@@ -150,13 +159,16 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::FixedArray> host_defined_options(script->host_defined_options(),
isolate);
ScriptOriginOptions options(script->origin_options());
+ bool is_wasm = false;
+#if V8_ENABLE_WEBASSEMBLY
+ is_wasm = script->type() == i::Script::TYPE_WASM;
+#endif // V8_ENABLE_WEBASSEMBLY
v8::ScriptOrigin origin(
reinterpret_cast<v8::Isolate*>(isolate), Utils::ToLocal(scriptName),
script->line_offset(), script->column_offset(),
options.IsSharedCrossOrigin(), script->id(),
- Utils::ToLocal(source_map_url), options.IsOpaque(),
- script->type() == i::Script::TYPE_WASM, options.IsModule(),
- Utils::PrimitiveArrayToLocal(host_defined_options));
+ Utils::ToLocal(source_map_url), options.IsOpaque(), is_wasm,
+ options.IsModule(), Utils::PrimitiveArrayToLocal(host_defined_options));
return origin;
}
@@ -925,11 +937,9 @@ bool Data::IsModule() const { return Utils::OpenHandle(this)->IsModule(); }
bool Data::IsValue() const {
i::DisallowGarbageCollection no_gc;
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- if (self->IsSmi()) {
- return true;
- }
- i::HeapObject heap_object = i::HeapObject::cast(*self);
+ i::Object self = *Utils::OpenHandle(this);
+ if (self.IsSmi()) return true;
+ i::HeapObject heap_object = i::HeapObject::cast(self);
DCHECK(!heap_object.IsTheHole());
if (heap_object.IsSymbol()) {
return !i::Symbol::cast(heap_object).is_private();
@@ -1095,7 +1105,9 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
- CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
+ Utils::ApiCheck(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo(),
+ "v8::Template::Set",
+ "Invalid value, must be a primitive or a Template");
if (value_obj->IsObjectTemplateInfo()) {
templ->set_serial_number(0);
if (templ->IsFunctionTemplateInfo()) {
@@ -1163,24 +1175,31 @@ void FunctionTemplate::SetPrototypeProviderTemplate(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::FunctionTemplateInfo> result =
Utils::OpenHandle(*prototype_provider);
- CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate));
- CHECK(self->GetParentTemplate().IsUndefined(i_isolate));
+ Utils::ApiCheck(self->GetPrototypeTemplate().IsUndefined(i_isolate),
+ "v8::FunctionTemplate::SetPrototypeProviderTemplate",
+ "Protoype must be undefiend");
+ Utils::ApiCheck(self->GetParentTemplate().IsUndefined(i_isolate),
+ "v8::FunctionTemplate::SetPrototypeProviderTemplate",
+ "Prototype provider must be empty");
i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self,
result);
}
-static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
- const char* func) {
- Utils::ApiCheck(!info->instantiated(), func,
+static void EnsureNotPublished(i::Handle<i::FunctionTemplateInfo> info,
+ const char* func) {
+ DCHECK_IMPLIES(info->instantiated(), info->published());
+ Utils::ApiCheck(!info->published(), func,
"FunctionTemplate already instantiated");
}
void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
+ EnsureNotPublished(info, "v8::FunctionTemplate::Inherit");
i::Isolate* i_isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- CHECK(info->GetPrototypeProviderTemplate().IsUndefined(i_isolate));
+ Utils::ApiCheck(info->GetPrototypeProviderTemplate().IsUndefined(i_isolate),
+ "v8::FunctionTemplate::Inherit",
+ "Protoype provider must be empty");
i::FunctionTemplateInfo::SetParentTemplate(i_isolate, info,
Utils::OpenHandle(*value));
}
@@ -1275,7 +1294,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
SideEffectType side_effect_type,
const CFunction* c_function) {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
+ EnsureNotPublished(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
@@ -1365,7 +1384,7 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
void FunctionTemplate::SetLength(int length) {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::SetLength");
+ EnsureNotPublished(info, "v8::FunctionTemplate::SetLength");
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_length(length);
@@ -1373,7 +1392,7 @@ void FunctionTemplate::SetLength(int length) {
void FunctionTemplate::SetClassName(Local<String> name) {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
+ EnsureNotPublished(info, "v8::FunctionTemplate::SetClassName");
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_class_name(*Utils::OpenHandle(*name));
@@ -1381,7 +1400,7 @@ void FunctionTemplate::SetClassName(Local<String> name) {
void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
+ EnsureNotPublished(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_accept_any_receiver(value);
@@ -1389,7 +1408,7 @@ void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype");
+ EnsureNotPublished(info, "v8::FunctionTemplate::ReadOnlyPrototype");
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_read_only_prototype(true);
@@ -1397,7 +1416,7 @@ void FunctionTemplate::ReadOnlyPrototype() {
void FunctionTemplate::RemovePrototype() {
auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::RemovePrototype");
+ EnsureNotPublished(info, "v8::FunctionTemplate::RemovePrototype");
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_remove_prototype(true);
@@ -1625,7 +1644,7 @@ static void ObjectTemplateSetNamedPropertyHandler(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, templ);
- EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+ EnsureNotPublished(cons, "ObjectTemplateSetNamedPropertyHandler");
auto obj =
CreateNamedInterceptorInfo(isolate, getter, setter, query, descriptor,
remover, enumerator, definer, data, flags);
@@ -1645,7 +1664,7 @@ void ObjectTemplate::MarkAsUndetectable() {
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(cons, "v8::ObjectTemplate::MarkAsUndetectable");
+ EnsureNotPublished(cons, "v8::ObjectTemplate::MarkAsUndetectable");
cons->set_undetectable(true);
}
@@ -1655,7 +1674,7 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
+ EnsureNotPublished(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
i::Handle<i::Struct> struct_info = isolate->factory()->NewStruct(
i::ACCESS_CHECK_INFO_TYPE, i::AllocationType::kOld);
@@ -1684,8 +1703,8 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(
- cons, "v8::ObjectTemplate::SetAccessCheckCallbackWithHandler");
+ EnsureNotPublished(cons,
+ "v8::ObjectTemplate::SetAccessCheckCallbackWithHandler");
i::Handle<i::Struct> struct_info = isolate->factory()->NewStruct(
i::ACCESS_CHECK_INFO_TYPE, i::AllocationType::kOld);
@@ -1720,7 +1739,7 @@ void ObjectTemplate::SetHandler(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
+ EnsureNotPublished(cons, "v8::ObjectTemplate::SetHandler");
auto obj = CreateIndexedInterceptorInfo(
isolate, config.getter, config.setter, config.query, config.descriptor,
config.deleter, config.enumerator, config.definer, config.data,
@@ -1734,7 +1753,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
+ EnsureNotPublished(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::CallHandlerInfo> obj = isolate->factory()->NewCallHandlerInfo();
SET_FIELD_WRAPPED(isolate, obj, set_callback, callback);
SET_FIELD_WRAPPED(isolate, obj, set_js_callback, obj->redirected_callback());
@@ -1745,7 +1764,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::FunctionTemplateInfo::SetInstanceCallHandler(isolate, cons, obj);
}
-int ObjectTemplate::InternalFieldCount() {
+int ObjectTemplate::InternalFieldCount() const {
return Utils::OpenHandle(this)->embedder_field_count();
}
@@ -1766,7 +1785,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
Utils::OpenHandle(this)->set_embedder_field_count(value);
}
-bool ObjectTemplate::IsImmutableProto() {
+bool ObjectTemplate::IsImmutableProto() const {
return Utils::OpenHandle(this)->immutable_proto();
}
@@ -1777,7 +1796,7 @@ void ObjectTemplate::SetImmutableProto() {
self->set_immutable_proto(true);
}
-bool ObjectTemplate::IsCodeLike() {
+bool ObjectTemplate::IsCodeLike() const {
return Utils::OpenHandle(this)->code_like();
}
@@ -1832,15 +1851,11 @@ Local<Script> UnboundScript::BindToCurrentContext() {
return ToApiHandle<Script>(function);
}
-int UnboundScript::GetId() {
- auto function_info =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = function_info->GetIsolate();
+int UnboundScript::GetId() const {
+ auto function_info = i::SharedFunctionInfo::cast(*Utils::OpenHandle(this));
+ i::Isolate* isolate = function_info.GetIsolate();
LOG_API(isolate, UnboundScript, GetId);
- i::HandleScope scope(isolate);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()),
- isolate);
- return script->id();
+ return i::Script::cast(function_info.script()).id();
}
int UnboundScript::GetLineNumber(int code_pos) {
@@ -1909,6 +1924,17 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
+ // TODO(crbug.com/1193459): remove once ablation study is completed
+ if (i::FLAG_script_run_delay) {
+ v8::base::OS::Sleep(
+ v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay));
+ }
+ if (i::FLAG_script_run_delay_once && !isolate->did_run_script_delay()) {
+ v8::base::OS::Sleep(
+ v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay_once));
+ isolate->set_did_run_script_delay(true);
+ }
+
i::Handle<i::Object> receiver = isolate->global_proxy();
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
@@ -2040,33 +2066,33 @@ Local<Value> Module::GetException() const {
}
int Module::GetModuleRequestsLength() const {
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- if (self->IsSyntheticModule()) return 0;
- ASSERT_NO_SCRIPT_NO_EXCEPTION(self->GetIsolate());
- return i::Handle<i::SourceTextModule>::cast(self)
- ->info()
- .module_requests()
- .length();
+ i::Module self = *Utils::OpenHandle(this);
+ if (self.IsSyntheticModule()) return 0;
+ ASSERT_NO_SCRIPT_NO_EXCEPTION(self.GetIsolate());
+ return i::SourceTextModule::cast(self).info().module_requests().length();
}
Local<String> Module::GetModuleRequest(int i) const {
- CHECK_GE(i, 0);
+ Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest",
+ "index must be positive");
i::Handle<i::Module> self = Utils::OpenHandle(this);
- Utils::ApiCheck(self->IsSourceTextModule(), "Module::GetModuleRequest",
+ Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::GetModuleRequest",
"Expected SourceTextModule");
i::Isolate* isolate = self->GetIsolate();
ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::FixedArray> module_requests(
i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
isolate);
- CHECK_LT(i, module_requests->length());
+ Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest",
+ "index is out of bounds");
i::Handle<i::ModuleRequest> module_request(
i::ModuleRequest::cast(module_requests->get(i)), isolate);
return ToApiHandle<String>(i::handle(module_request->specifier(), isolate));
}
Location Module::GetModuleRequestLocation(int i) const {
- CHECK_GE(i, 0);
+ Utils::ApiCheck(i >= 0, "v8::Module::GetModuleRequest",
+ "index must be positive");
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -2077,7 +2103,8 @@ Location Module::GetModuleRequestLocation(int i) const {
i::Handle<i::FixedArray> module_requests(
i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
isolate);
- CHECK_LT(i, module_requests->length());
+ Utils::ApiCheck(i < module_requests->length(), "v8::Module::GetModuleRequest",
+ "index is out of bounds");
i::Handle<i::ModuleRequest> module_request(
i::ModuleRequest::cast(module_requests->get(i)), isolate);
int position = module_request->position();
@@ -2143,22 +2170,22 @@ Local<UnboundModuleScript> Module::GetUnboundModuleScript() {
isolate));
}
-int Module::ScriptId() {
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::ScriptId",
+int Module::ScriptId() const {
+ i::Module self = *Utils::OpenHandle(this);
+ Utils::ApiCheck(self.IsSourceTextModule(), "v8::Module::ScriptId",
"v8::Module::ScriptId must be used on an SourceTextModule");
- ASSERT_NO_SCRIPT_NO_EXCEPTION(self->GetIsolate());
- return i::Handle<i::SourceTextModule>::cast(self)->GetScript().id();
+ ASSERT_NO_SCRIPT_NO_EXCEPTION(self.GetIsolate());
+ return i::SourceTextModule::cast(self).GetScript().id();
}
bool Module::IsGraphAsync() const {
Utils::ApiCheck(
GetStatus() >= kInstantiated, "v8::Module::IsGraphAsync",
"v8::Module::IsGraphAsync must be used on an instantiated module");
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
+ i::Module self = *Utils::OpenHandle(this);
+ auto isolate = self.GetIsolate();
ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- return self->IsGraphAsync(isolate);
+ return self.IsGraphAsync(isolate);
}
bool Module::IsSourceTextModule() const {
@@ -2205,8 +2232,8 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
i::Handle<i::Module> self = Utils::OpenHandle(this);
- // It's an API error to call Evaluate before Instantiate.
- CHECK_GE(self->status(), i::Module::kInstantiated);
+ Utils::ApiCheck(self->status() >= i::Module::kInstantiated,
+ "Module::Evaluate", "Expected instantiated module");
Local<Value> result;
has_pending_exception = !ToLocal(i::Module::Evaluate(isolate, self), &result);
@@ -2360,8 +2387,9 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
MaybeLocal<Module> ScriptCompiler::CompileModule(
Isolate* isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
- CHECK(options == kNoCompileOptions || options == kConsumeCodeCache);
-
+ Utils::ApiCheck(options == kNoCompileOptions || options == kConsumeCodeCache,
+ "v8::ScriptCompiler::CompileModule",
+ "Invalid CompileOptions");
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
@@ -2592,7 +2620,9 @@ ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
i::Handle<i::SharedFunctionInfo> shared(js_function->shared(),
js_function->GetIsolate());
ASSERT_NO_SCRIPT_NO_EXCEPTION(shared->GetIsolate());
- CHECK(shared->is_wrapped());
+ Utils::ApiCheck(shared->is_wrapped(),
+ "v8::ScriptCompiler::CreateCodeCacheForFunction",
+ "Expected SharedFunctionInfo with wrapped source code.");
return i::CodeSerializer::Serialize(shared);
}
@@ -2822,6 +2852,7 @@ int Message::GetStartColumn() const {
}
int Message::GetWasmFunctionIndex() const {
+#if V8_ENABLE_WEBASSEMBLY
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -2839,6 +2870,9 @@ int Message::GetWasmFunctionIndex() const {
auto debug_script = ToApiHandle<debug::Script>(script);
return Local<debug::WasmScript>::Cast(debug_script)
->GetContainingFunction(start_position);
+#else
+ return Message::kNoWasmFunctionIndexInfo;
+#endif // V8_ENABLE_WEBASSEMBLY
}
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
@@ -2876,6 +2910,15 @@ bool Message::IsOpaque() const {
return self->script().origin_options().IsOpaque();
}
+MaybeLocal<String> Message::GetSource(Local<Context> context) const {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ i::Handle<i::String> source(self->GetSource(), isolate);
+ RETURN_ESCAPED(Utils::ToLocal(source));
+}
+
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -2945,6 +2988,23 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
return Local<String>::Cast(Utils::ToLocal(name_or_url));
}
+Local<String> StackFrame::GetScriptSource() const {
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ i::Handle<i::Object> source(self->GetScriptSource(), isolate);
+ if (!source->IsString()) return {};
+ return Local<String>::Cast(Utils::ToLocal(source));
+}
+
+Local<String> StackFrame::GetScriptSourceMappingURL() const {
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ i::Handle<i::Object> sourceMappingURL(self->GetScriptSourceMappingURL(),
+ isolate);
+ if (!sourceMappingURL->IsString()) return {};
+ return Local<String>::Cast(Utils::ToLocal(sourceMappingURL));
+}
+
Local<String> StackFrame::GetFunctionName() const {
auto self = Utils::OpenHandle(this);
auto name = i::StackFrameInfo::GetFunctionName(self);
@@ -2958,7 +3018,13 @@ bool StackFrame::IsConstructor() const {
return Utils::OpenHandle(this)->IsConstructor();
}
-bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->IsWasm(); }
+bool StackFrame::IsWasm() const {
+#if V8_ENABLE_WEBASSEMBLY
+ return Utils::OpenHandle(this)->IsWasm();
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
bool StackFrame::IsUserJavaScript() const {
return Utils::OpenHandle(this)->IsUserJavaScript();
@@ -3257,15 +3323,15 @@ bool Value::FullIsNull() const {
}
bool Value::IsTrue() const {
- i::Handle<i::Object> object = Utils::OpenHandle(this);
- if (object->IsSmi()) return false;
- return object->IsTrue();
+ i::Object object = *Utils::OpenHandle(this);
+ if (object.IsSmi()) return false;
+ return object.IsTrue();
}
bool Value::IsFalse() const {
- i::Handle<i::Object> object = Utils::OpenHandle(this);
- if (object->IsSmi()) return false;
- return object->IsFalse();
+ i::Object object = *Utils::OpenHandle(this);
+ if (object.IsSmi()) return false;
+ return object.IsFalse();
}
bool Value::IsFunction() const { return Utils::OpenHandle(this)->IsCallable(); }
@@ -3285,8 +3351,9 @@ bool Value::IsSymbol() const {
bool Value::IsArray() const { return Utils::OpenHandle(this)->IsJSArray(); }
bool Value::IsArrayBuffer() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj).is_shared();
+ i::Object obj = *Utils::OpenHandle(this);
+ if (!obj.IsJSArrayBuffer()) return false;
+ return !i::JSArrayBuffer::cast(obj).is_shared();
}
bool Value::IsArrayBufferView() const {
@@ -3313,8 +3380,9 @@ bool Value::IsDataView() const {
}
bool Value::IsSharedArrayBuffer() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj).is_shared();
+ i::Object obj = *Utils::OpenHandle(this);
+ if (!obj.IsJSArrayBuffer()) return false;
+ return i::JSArrayBuffer::cast(obj).is_shared();
}
bool Value::IsObject() const { return Utils::OpenHandle(this)->IsJSReceiver(); }
@@ -3340,8 +3408,13 @@ VALUE_IS_SPECIFIC_TYPE(SymbolObject, SymbolWrapper)
VALUE_IS_SPECIFIC_TYPE(Date, JSDate)
VALUE_IS_SPECIFIC_TYPE(Map, JSMap)
VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
+#if V8_ENABLE_WEBASSEMBLY
VALUE_IS_SPECIFIC_TYPE(WasmMemoryObject, WasmMemoryObject)
VALUE_IS_SPECIFIC_TYPE(WasmModuleObject, WasmModuleObject)
+#else
+bool Value::IsWasmMemoryObject() const { return false; }
+bool Value::IsWasmModuleObject() const { return false; }
+#endif // V8_ENABLE_WEBASSEMBLY
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
@@ -3350,23 +3423,23 @@ VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
bool Value::IsBoolean() const { return Utils::OpenHandle(this)->IsBoolean(); }
bool Value::IsExternal() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Handle<i::HeapObject> heap_obj = i::Handle<i::HeapObject>::cast(obj);
+ i::Object obj = *Utils::OpenHandle(this);
+ if (!obj.IsHeapObject()) return false;
+ i::HeapObject heap_obj = i::HeapObject::cast(obj);
// Check the instance type is JS_OBJECT (instance type of Externals) before
// attempting to get the Isolate since that guarantees the object is writable
// and GetIsolate will work.
- if (heap_obj->map().instance_type() != i::JS_OBJECT_TYPE) return false;
- i::Isolate* isolate = i::JSObject::cast(*heap_obj).GetIsolate();
+ if (heap_obj.map().instance_type() != i::JS_OBJECT_TYPE) return false;
+ i::Isolate* isolate = i::JSObject::cast(heap_obj).GetIsolate();
ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- return heap_obj->IsExternal(isolate);
+ return heap_obj.IsExternal(isolate);
}
bool Value::IsInt32() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return true;
- if (obj->IsNumber()) {
- return i::IsInt32Double(obj->Number());
+ i::Object obj = *Utils::OpenHandle(this);
+ if (obj.IsSmi()) return true;
+ if (obj.IsNumber()) {
+ return i::IsInt32Double(obj.Number());
}
return false;
}
@@ -3392,18 +3465,18 @@ bool Value::IsRegExp() const {
}
bool Value::IsAsyncFunction() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsJSFunction()) return false;
- i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
- return i::IsAsyncFunction(func->shared().kind());
+ i::Object obj = *Utils::OpenHandle(this);
+ if (!obj.IsJSFunction()) return false;
+ i::JSFunction func = i::JSFunction::cast(obj);
+ return i::IsAsyncFunction(func.shared().kind());
}
bool Value::IsGeneratorFunction() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsJSFunction()) return false;
- i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
- ASSERT_NO_SCRIPT_NO_EXCEPTION(func->GetIsolate());
- return i::IsGeneratorFunction(func->shared().kind());
+ i::Object obj = *Utils::OpenHandle(this);
+ if (!obj.IsJSFunction()) return false;
+ i::JSFunction func = i::JSFunction::cast(obj);
+ ASSERT_NO_SCRIPT_NO_EXCEPTION(func.GetIsolate());
+ return i::IsGeneratorFunction(func.shared().kind());
}
bool Value::IsGeneratorObject() const {
@@ -3700,7 +3773,8 @@ std::unique_ptr<v8::BackingStore> v8::BackingStore::Reallocate(
size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, BackingStore_Reallocate);
- CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
+ Utils::ApiCheck(byte_length <= i::JSArrayBuffer::kMaxByteLength,
+ "v8::BackingStore::Reallocate", "byte_lenght is too large");
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::BackingStore* i_backing_store =
reinterpret_cast<i::BackingStore*>(backing_store.get());
@@ -3723,7 +3797,6 @@ std::shared_ptr<v8::BackingStore> v8::ArrayBuffer::GetBackingStore() {
backing_store =
i::BackingStore::EmptyBackingStore(i::SharedFlag::kNotShared);
}
- i::GlobalBackingStoreRegistry::Register(backing_store);
std::shared_ptr<i::BackingStoreBase> bs_base = backing_store;
return std::static_pointer_cast<v8::BackingStore>(bs_base);
}
@@ -3734,7 +3807,6 @@ std::shared_ptr<v8::BackingStore> v8::SharedArrayBuffer::GetBackingStore() {
if (!backing_store) {
backing_store = i::BackingStore::EmptyBackingStore(i::SharedFlag::kShared);
}
- i::GlobalBackingStoreRegistry::Register(backing_store);
std::shared_ptr<i::BackingStoreBase> bs_base = backing_store;
return std::static_pointer_cast<v8::BackingStore>(bs_base);
}
@@ -4311,7 +4383,7 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map().EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map().EnumLength() == 0 ||
- self->map().instance_descriptors(kRelaxedLoad).enum_cache().keys() !=
+ self->map().instance_descriptors(isolate).enum_cache().keys() !=
*value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
@@ -4606,16 +4678,16 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
return result;
}
-bool v8::Object::HasNamedLookupInterceptor() {
- auto self = Utils::OpenHandle(this);
- return self->IsJSObject() &&
- i::Handle<i::JSObject>::cast(self)->HasNamedInterceptor();
+bool v8::Object::HasNamedLookupInterceptor() const {
+ auto self = *Utils::OpenHandle(this);
+ if (self.IsJSObject()) return false;
+ return i::JSObject::cast(self).HasNamedInterceptor();
}
-bool v8::Object::HasIndexedLookupInterceptor() {
- auto self = Utils::OpenHandle(this);
- return self->IsJSObject() &&
- i::Handle<i::JSObject>::cast(self)->HasIndexedInterceptor();
+bool v8::Object::HasIndexedLookupInterceptor() const {
+ auto self = *Utils::OpenHandle(this);
+ if (self.IsJSObject()) return false;
+ return i::JSObject::cast(self).HasIndexedInterceptor();
}
MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
@@ -4705,8 +4777,7 @@ Local<v8::Object> v8::Object::Clone() {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto result = isolate->factory()->CopyJSObject(self);
- CHECK(!result.is_null());
+ i::Handle<i::JSObject> result = isolate->factory()->CopyJSObject(self);
return Utils::ToLocal(result);
}
@@ -4751,22 +4822,22 @@ int v8::Object::GetIdentityHash() {
return self->GetOrCreateIdentityHash(isolate).value();
}
-bool v8::Object::IsCallable() {
+bool v8::Object::IsCallable() const {
auto self = Utils::OpenHandle(this);
return self->IsCallable();
}
-bool v8::Object::IsConstructor() {
+bool v8::Object::IsConstructor() const {
auto self = Utils::OpenHandle(this);
return self->IsConstructor();
}
-bool v8::Object::IsApiWrapper() {
+bool v8::Object::IsApiWrapper() const {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
return self->IsApiWrapper();
}
-bool v8::Object::IsUndetectable() {
+bool v8::Object::IsUndetectable() const {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
return self->IsUndetectable();
}
@@ -5007,17 +5078,11 @@ int Function::GetScriptColumnNumber() const {
}
int Function::ScriptId() const {
- auto self = Utils::OpenHandle(this);
- if (!self->IsJSFunction()) {
- return v8::UnboundScript::kNoScriptId;
- }
- auto func = i::Handle<i::JSFunction>::cast(self);
- if (!func->shared().script().IsScript()) {
- return v8::UnboundScript::kNoScriptId;
- }
- i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
- func->GetIsolate());
- return script->id();
+ i::JSReceiver self = *Utils::OpenHandle(this);
+ if (!self.IsJSFunction()) return v8::UnboundScript::kNoScriptId;
+ auto func = i::JSFunction::cast(self);
+ if (!func.shared().script().IsScript()) return v8::UnboundScript::kNoScriptId;
+ return i::Script::cast(func.shared().script()).id();
}
Local<v8::Value> Function::GetBoundFunction() const {
@@ -5377,6 +5442,11 @@ int String::Write(Isolate* isolate, uint16_t* buffer, int start, int length,
start, length, options);
}
+bool v8::String::IsExternal() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ return i::StringShape(*str).IsExternal();
+}
+
bool v8::String::IsExternalTwoByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return i::StringShape(*str).IsExternalTwoByte();
@@ -5546,36 +5616,36 @@ bool Boolean::Value() const {
}
int64_t Integer::Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::ToInt(*obj);
+ i::Object obj = *Utils::OpenHandle(this);
+ if (obj.IsSmi()) {
+ return i::Smi::ToInt(obj);
} else {
- return static_cast<int64_t>(obj->Number());
+ return static_cast<int64_t>(obj.Number());
}
}
int32_t Int32::Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::ToInt(*obj);
+ i::Object obj = *Utils::OpenHandle(this);
+ if (obj.IsSmi()) {
+ return i::Smi::ToInt(obj);
} else {
- return static_cast<int32_t>(obj->Number());
+ return static_cast<int32_t>(obj.Number());
}
}
uint32_t Uint32::Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::ToInt(*obj);
+ i::Object obj = *Utils::OpenHandle(this);
+ if (obj.IsSmi()) {
+ return i::Smi::ToInt(obj);
} else {
- return static_cast<uint32_t>(obj->Number());
+ return static_cast<uint32_t>(obj.Number());
}
}
-int v8::Object::InternalFieldCount() {
- i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
- if (!self->IsJSObject()) return 0;
- return i::Handle<i::JSObject>::cast(self)->GetEmbedderFieldCount();
+int v8::Object::InternalFieldCount() const {
+ i::JSReceiver self = *Utils::OpenHandle(this);
+ if (!self.IsJSObject()) return 0;
+ return i::JSObject::cast(self).GetEmbedderFieldCount();
}
static bool InternalFieldOK(i::Handle<i::JSReceiver> obj, int index,
@@ -5818,7 +5888,9 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
void V8::SetIsCrossOriginIsolated() {
i::FLAG_harmony_sharedarraybuffer = true;
+#if V8_ENABLE_WEBASSEMBLY
i::FLAG_experimental_wasm_threads = true;
+#endif // V8_ENABLE_WEBASSEMBLY
}
template <typename ObjectType>
@@ -6065,7 +6137,8 @@ v8::Isolate* Context::GetIsolate() {
v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
- CHECK(env->IsNativeContext());
+ Utils::ApiCheck(env->IsNativeContext(), "v8::Context::GetMicrotaskQueue",
+ "Must be calld on a native context");
return i::Handle<i::NativeContext>::cast(env)->microtask_queue();
}
@@ -6105,9 +6178,9 @@ void Context::AllowCodeGenerationFromStrings(bool allow) {
: i::ReadOnlyRoots(isolate).false_value());
}
-bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Handle<i::Context> context = Utils::OpenHandle(this);
- return !context->allow_code_gen_from_strings().IsFalse(context->GetIsolate());
+bool Context::IsCodeGenerationFromStringsAllowed() const {
+ i::Context context = *Utils::OpenHandle(this);
+ return !context.allow_code_gen_from_strings().IsFalse(context.GetIsolate());
}
void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
@@ -6147,45 +6220,6 @@ void Context::SetContinuationPreservedEmbedderData(Local<Value> data) {
*i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*data)));
}
-void v8::Context::SetPromiseHooks(Local<Function> init_hook,
- Local<Function> before_hook,
- Local<Function> after_hook,
- Local<Function> resolve_hook) {
- i::Handle<i::Context> context = Utils::OpenHandle(this);
- i::Isolate* isolate = context->GetIsolate();
-
- i::Handle<i::Object> init = isolate->factory()->undefined_value();
- i::Handle<i::Object> before = isolate->factory()->undefined_value();
- i::Handle<i::Object> after = isolate->factory()->undefined_value();
- i::Handle<i::Object> resolve = isolate->factory()->undefined_value();
-
- bool has_hook = false;
-
- if (!init_hook.IsEmpty()) {
- init = Utils::OpenHandle(*init_hook);
- has_hook = true;
- }
- if (!before_hook.IsEmpty()) {
- before = Utils::OpenHandle(*before_hook);
- has_hook = true;
- }
- if (!after_hook.IsEmpty()) {
- after = Utils::OpenHandle(*after_hook);
- has_hook = true;
- }
- if (!resolve_hook.IsEmpty()) {
- resolve = Utils::OpenHandle(*resolve_hook);
- has_hook = true;
- }
-
- isolate->SetHasContextPromiseHooks(has_hook);
-
- context->native_context().set_promise_hook_init_function(*init);
- context->native_context().set_promise_hook_before_function(*before);
- context->native_context().set_promise_hook_after_function(*after);
- context->native_context().set_promise_hook_resolve_function(*resolve);
-}
-
MaybeLocal<Context> metrics::Recorder::GetContext(
Isolate* isolate, metrics::Recorder::ContextId id) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6551,8 +6585,7 @@ bool v8::String::MakeExternal(
return result;
}
-bool v8::String::CanMakeExternal() {
- i::DisallowGarbageCollection no_gc;
+bool v8::String::CanMakeExternal() const {
i::String obj = *Utils::OpenHandle(this);
if (obj.IsThinString()) {
@@ -6567,7 +6600,7 @@ bool v8::String::CanMakeExternal() {
return !i::Heap::InYoungGeneration(obj);
}
-bool v8::String::StringEquals(Local<String> that) {
+bool v8::String::StringEquals(Local<String> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
return self->Equals(*other);
@@ -6664,10 +6697,9 @@ Local<v8::Object> v8::Object::New(Isolate* isolate,
// properties, and so we default to creating a properties backing store
// large enough to hold all of them, while we start with no elements
// (see http://bit.ly/v8-fast-object-create-cpp for the motivation).
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- i::Handle<i::OrderedNameDictionary> properties =
- i::OrderedNameDictionary::Allocate(i_isolate, static_cast<int>(length))
- .ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ i::Handle<i::SwissNameDictionary> properties =
+ i_isolate->factory()->NewSwissNameDictionary(static_cast<int>(length));
AddPropertiesAndElementsToObject(i_isolate, properties, elements, names,
values, length);
i::Handle<i::JSObject> obj =
@@ -6739,12 +6771,11 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
}
bool v8::BooleanObject::ValueOf() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
- i::Handle<i::JSPrimitiveWrapper>::cast(obj);
- i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
+ i::Object obj = *Utils::OpenHandle(this);
+ i::JSPrimitiveWrapper js_primitive_wrapper = i::JSPrimitiveWrapper::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper.GetIsolate();
LOG_API(isolate, BooleanObject, BooleanValue);
- return js_primitive_wrapper->value().IsTrue(isolate);
+ return js_primitive_wrapper.value().IsTrue(isolate);
}
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
@@ -6835,8 +6866,12 @@ MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
MaybeLocal<v8::RegExp> v8::RegExp::NewWithBacktrackLimit(
Local<Context> context, Local<String> pattern, Flags flags,
uint32_t backtrack_limit) {
- CHECK(i::Smi::IsValid(backtrack_limit));
- CHECK_NE(backtrack_limit, i::JSRegExp::kNoBacktrackLimit);
+ Utils::ApiCheck(i::Smi::IsValid(backtrack_limit),
+ "v8::RegExp::NewWithBacktrackLimit",
+ "backtrack_limit is too large or too small.");
+ Utils::ApiCheck(backtrack_limit != i::JSRegExp::kNoBacktrackLimit,
+ "v8::RegExp::NewWithBacktrackLimit",
+ "Must set backtrack_limit");
PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
Local<v8::RegExp> result;
has_pending_exception = !ToLocal<RegExp>(
@@ -7267,16 +7302,13 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
-bool Promise::HasHandler() {
- i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
+bool Promise::HasHandler() const {
+ i::JSReceiver promise = *Utils::OpenHandle(this);
+ i::Isolate* isolate = promise.GetIsolate();
LOG_API(isolate, Promise, HasRejectHandler);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (promise->IsJSPromise()) {
- i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
- return js_promise->has_handler();
- }
- return false;
+ if (!promise.IsJSPromise()) return false;
+ return i::JSPromise::cast(promise).has_handler();
}
Local<Value> Promise::Result() {
@@ -7315,7 +7347,7 @@ Local<Value> Proxy::GetHandler() {
return Utils::ToLocal(handler);
}
-bool Proxy::IsRevoked() {
+bool Proxy::IsRevoked() const {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
return self->IsRevoked();
}
@@ -7346,6 +7378,7 @@ CompiledWasmModule::CompiledWasmModule(
}
OwnedBuffer CompiledWasmModule::Serialize() {
+#if V8_ENABLE_WEBASSEMBLY
TRACE_EVENT0("v8.wasm", "wasm.SerializeModule");
i::wasm::WasmSerializer wasm_serializer(native_module_.get());
size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
@@ -7353,33 +7386,48 @@ OwnedBuffer CompiledWasmModule::Serialize() {
if (!wasm_serializer.SerializeNativeModule({buffer.get(), buffer_size}))
return {};
return {std::move(buffer), buffer_size};
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
+#if V8_ENABLE_WEBASSEMBLY
i::Vector<const uint8_t> bytes_vec = native_module_->wire_bytes();
return {bytes_vec.begin(), bytes_vec.size()};
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
Local<ArrayBuffer> v8::WasmMemoryObject::Buffer() {
+#if V8_ENABLE_WEBASSEMBLY
i::Handle<i::WasmMemoryObject> obj = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer(obj->array_buffer(), obj->GetIsolate());
return Utils::ToLocal(buffer);
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
CompiledWasmModule WasmModuleObject::GetCompiledModule() {
- i::Handle<i::WasmModuleObject> obj =
- i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- auto source_url = i::String::cast(obj->script().source_url());
+#if V8_ENABLE_WEBASSEMBLY
+ auto obj = i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
+ auto url =
+ i::handle(i::String::cast(obj->script().name()), obj->GetIsolate());
int length;
- std::unique_ptr<char[]> cstring = source_url.ToCString(
- i::DISALLOW_NULLS, i::FAST_STRING_TRAVERSAL, &length);
- i::Handle<i::String> url(source_url, obj->GetIsolate());
+ std::unique_ptr<char[]> cstring =
+ url->ToCString(i::DISALLOW_NULLS, i::FAST_STRING_TRAVERSAL, &length);
return CompiledWasmModule(std::move(obj->shared_native_module()),
cstring.get(), length);
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
Isolate* isolate, const CompiledWasmModule& compiled_module) {
+#if V8_ENABLE_WEBASSEMBLY
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::WasmModuleObject> module_object =
i_isolate->wasm_engine()->ImportNativeModule(
@@ -7387,6 +7435,9 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
i::VectorOf(compiled_module.source_url()));
return Local<WasmModuleObject>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
@@ -7423,169 +7474,17 @@ v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
}
-bool v8::ArrayBuffer::IsExternal() const {
- return Utils::OpenHandle(this)->is_external();
-}
-
bool v8::ArrayBuffer::IsDetachable() const {
return Utils::OpenHandle(this)->is_detachable();
}
namespace {
-// The backing store deleter just deletes the indirection, which downrefs
-// the shared pointer. It will get collected normally.
-void BackingStoreDeleter(void* buffer, size_t length, void* info) {
- std::shared_ptr<i::BackingStore>* bs_indirection =
- reinterpret_cast<std::shared_ptr<i::BackingStore>*>(info);
- if (bs_indirection) {
- i::BackingStore* backing_store = bs_indirection->get();
- TRACE_BS("API:delete bs=%p mem=%p (length=%zu)\n", backing_store,
- backing_store->buffer_start(), backing_store->byte_length());
- USE(backing_store);
- }
- delete bs_indirection;
-}
-
-void* MakeDeleterData(std::shared_ptr<i::BackingStore> backing_store) {
- if (!backing_store) return nullptr;
- TRACE_BS("API:extern bs=%p mem=%p (length=%zu)\n", backing_store.get(),
- backing_store->buffer_start(), backing_store->byte_length());
- return new std::shared_ptr<i::BackingStore>(backing_store);
-}
-
-std::shared_ptr<i::BackingStore> LookupOrCreateBackingStore(
- i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared,
- ArrayBufferCreationMode mode) {
- // "internalized" means that the storage was allocated by the
- // ArrayBufferAllocator and thus should be freed upon destruction.
- bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized;
-
- // Try to lookup a previously-registered backing store in the global
- // registry. If found, use that instead of wrapping an embedder allocation.
- std::shared_ptr<i::BackingStore> backing_store =
- i::GlobalBackingStoreRegistry::Lookup(data, byte_length);
-
- if (backing_store) {
- // Check invariants for a previously-found backing store.
-
- // 1. We cannot allow an embedder to first allocate a backing store that
- // should not be freed upon destruct, and then allocate an alias that should
- // destruct it. The other order is fine.
- bool changing_destruct_mode =
- free_on_destruct && !backing_store->free_on_destruct();
- Utils::ApiCheck(
- !changing_destruct_mode, "v8_[Shared]ArrayBuffer_New",
- "previous backing store found that should not be freed on destruct");
-
- // 2. We cannot allow embedders to use the same backing store for both
- // SharedArrayBuffers and regular ArrayBuffers.
- bool changing_shared_flag =
- (shared == i::SharedFlag::kShared) != backing_store->is_shared();
- Utils::ApiCheck(
- !changing_shared_flag, "v8_[Shared]ArrayBuffer_New",
- "previous backing store found that does not match shared flag");
- } else {
- // No previous backing store found.
- backing_store = i::BackingStore::WrapAllocation(
- i_isolate, data, byte_length, shared, free_on_destruct);
-
- // The embedder already has a direct pointer to the buffer start, so
- // globally register the backing store in case they come back with the
- // same buffer start and the backing store is marked as free_on_destruct.
- i::GlobalBackingStoreRegistry::Register(backing_store);
- }
- return backing_store;
-}
-
std::shared_ptr<i::BackingStore> ToInternal(
std::shared_ptr<i::BackingStoreBase> backing_store) {
return std::static_pointer_cast<i::BackingStore>(backing_store);
}
} // namespace
-v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
- void* allocation_base,
- size_t allocation_length,
- Allocator::AllocationMode allocation_mode,
- DeleterCallback deleter, void* deleter_data)
- : data_(data),
- byte_length_(byte_length),
- allocation_base_(allocation_base),
- allocation_length_(allocation_length),
- allocation_mode_(allocation_mode),
- deleter_(deleter),
- deleter_data_(deleter_data) {
- DCHECK_LE(allocation_base_, data_);
- DCHECK_LE(byte_length_, allocation_length_);
-}
-
-v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
- return GetContents(true);
-}
-
-void v8::ArrayBuffer::Externalize(
- const std::shared_ptr<BackingStore>& backing_store) {
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
- "ArrayBuffer already externalized");
- self->set_is_external(true);
- DCHECK_EQ(self->backing_store(), backing_store->Data());
-}
-
-v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
- return GetContents(false);
-}
-
-v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) {
- // TODO(titzer): reduce duplication between shared/unshared GetContents()
- using BufferType = v8::ArrayBuffer;
-
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
-
- std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
-
- void* deleter_data = nullptr;
- if (externalize) {
- Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
- "ArrayBuffer already externalized");
- self->set_is_external(true);
- // When externalizing, upref the shared pointer to the backing store
- // and store that as the deleter data. When the embedder calls the deleter
- // callback, we will delete the additional (on-heap) shared_ptr.
- deleter_data = MakeDeleterData(backing_store);
- }
-
- if (!backing_store) {
- // If the array buffer has zero length or was detached, return empty
- // contents.
- DCHECK_EQ(0, self->byte_length());
- BufferType::Contents contents(
- nullptr, 0, nullptr, 0,
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
- BackingStoreDeleter, deleter_data);
- return contents;
- }
-
- // Backing stores that given to the embedder might be passed back through
- // the API using only the start of the buffer. We need to find such
- // backing stores using global registration until the API is changed.
- i::GlobalBackingStoreRegistry::Register(backing_store);
-
- auto allocation_mode =
- backing_store->is_wasm_memory()
- ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
- : v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
-
- BufferType::Contents contents(backing_store->buffer_start(), // --
- backing_store->byte_length(), // --
- backing_store->buffer_start(), // --
- backing_store->byte_length(), // --
- allocation_mode, // --
- BackingStoreDeleter, // --
- deleter_data);
- return contents;
-}
-
void v8::ArrayBuffer::Detach() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@@ -7619,27 +7518,6 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
return Utils::ToLocal(array_buffer);
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
- size_t byte_length,
- ArrayBufferCreationMode mode) {
- // Embedders must guarantee that the external backing store is valid.
- CHECK_IMPLIES(byte_length != 0, data != nullptr);
- CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, ArrayBuffer, New);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
-
- std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
- i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode);
-
- i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
- if (mode == ArrayBufferCreationMode::kExternalized) {
- obj->set_is_external(true);
- }
- return Utils::ToLocal(obj);
-}
-
Local<ArrayBuffer> v8::ArrayBuffer::New(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
CHECK_IMPLIES(backing_store->ByteLength() != 0,
@@ -7813,119 +7691,6 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
return Utils::ToLocal(obj);
}
-namespace {
-i::Handle<i::JSArrayBuffer> SetupSharedArrayBuffer(
- Isolate* isolate, void* data, size_t byte_length,
- ArrayBufferCreationMode mode) {
- CHECK(i::FLAG_harmony_sharedarraybuffer);
- // Embedders must guarantee that the external backing store is valid.
- CHECK(byte_length == 0 || data != nullptr);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SharedArrayBuffer, New);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
-
- std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
- i_isolate, data, byte_length, i::SharedFlag::kShared, mode);
-
- i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
-
- if (mode == ArrayBufferCreationMode::kExternalized) {
- obj->set_is_external(true);
- }
- return obj;
-}
-
-} // namespace
-
-bool v8::SharedArrayBuffer::IsExternal() const {
- return Utils::OpenHandle(this)->is_external();
-}
-
-v8::SharedArrayBuffer::Contents::Contents(
- void* data, size_t byte_length, void* allocation_base,
- size_t allocation_length, Allocator::AllocationMode allocation_mode,
- DeleterCallback deleter, void* deleter_data)
- : data_(data),
- byte_length_(byte_length),
- allocation_base_(allocation_base),
- allocation_length_(allocation_length),
- allocation_mode_(allocation_mode),
- deleter_(deleter),
- deleter_data_(deleter_data) {
- DCHECK_LE(allocation_base_, data_);
- DCHECK_LE(byte_length_, allocation_length_);
-}
-
-v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
- return GetContents(true);
-}
-
-void v8::SharedArrayBuffer::Externalize(
- const std::shared_ptr<BackingStore>& backing_store) {
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
- "SharedArrayBuffer already externalized");
- self->set_is_external(true);
-
- DCHECK_EQ(self->backing_store(), backing_store->Data());
-}
-
-v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
- return GetContents(false);
-}
-
-v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents(
- bool externalize) {
- // TODO(titzer): reduce duplication between shared/unshared GetContents()
- using BufferType = v8::SharedArrayBuffer;
-
- i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
-
- std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
-
- void* deleter_data = nullptr;
- if (externalize) {
- Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
- "SharedArrayBuffer already externalized");
- self->set_is_external(true);
- // When externalizing, upref the shared pointer to the backing store
- // and store that as the deleter data. When the embedder calls the deleter
- // callback, we will delete the additional (on-heap) shared_ptr.
- deleter_data = MakeDeleterData(backing_store);
- }
-
- if (!backing_store) {
- // If the array buffer has zero length or was detached, return empty
- // contents.
- DCHECK_EQ(0, self->byte_length());
- BufferType::Contents contents(
- nullptr, 0, nullptr, 0,
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
- BackingStoreDeleter, deleter_data);
- return contents;
- }
-
- // Backing stores that given to the embedder might be passed back through
- // the API using only the start of the buffer. We need to find such
- // backing stores using global registration until the API is changed.
- i::GlobalBackingStoreRegistry::Register(backing_store);
-
- auto allocation_mode =
- backing_store->is_wasm_memory()
- ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
- : v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
-
- BufferType::Contents contents(backing_store->buffer_start(), // --
- backing_store->byte_length(), // --
- backing_store->buffer_start(), // --
- backing_store->byte_length(), // --
- allocation_mode, // --
- BackingStoreDeleter, // --
- deleter_data);
- return contents;
-}
-
size_t v8::SharedArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return obj->byte_length();
@@ -7954,14 +7719,6 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
}
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
- Isolate* isolate, void* data, size_t byte_length,
- ArrayBufferCreationMode mode) {
- i::Handle<i::JSArrayBuffer> buffer =
- SetupSharedArrayBuffer(isolate, data, byte_length, mode);
- return Utils::ToLocalShared(buffer);
-}
-
-Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
CHECK_IMPLIES(backing_store->ByteLength() != 0,
@@ -7978,14 +7735,6 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
return Utils::ToLocalShared(obj);
}
-Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
- Isolate* isolate, const SharedArrayBuffer::Contents& contents,
- ArrayBufferCreationMode mode) {
- i::Handle<i::JSArrayBuffer> buffer = SetupSharedArrayBuffer(
- isolate, contents.Data(), contents.ByteLength(), mode);
- return Utils::ToLocalShared(buffer);
-}
-
std::unique_ptr<v8::BackingStore> v8::SharedArrayBuffer::NewBackingStore(
Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -8331,12 +8080,18 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
}
bool Isolate::HasPendingBackgroundTasks() {
+#if V8_ENABLE_WEBASSEMBLY
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->wasm_engine()->HasRunningCompileJob(isolate);
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
- CHECK(i::FLAG_expose_gc);
+ Utils::ApiCheck(i::FLAG_expose_gc,
+ "v8::Isolate::RequestGarbageCollectionForTesting",
+ "Must use --expose-gc");
if (type == kMinorGarbageCollection) {
reinterpret_cast<i::Isolate*>(this)->heap()->CollectGarbage(
i::NEW_SPACE, i::GarbageCollectionReason::kTesting,
@@ -8388,6 +8143,12 @@ void Isolate::Initialize(Isolate* isolate,
code_event_handler = i::GDBJITInterface::EventHandler;
}
#endif // ENABLE_GDB_JIT_INTERFACE
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ if (code_event_handler == nullptr && i::FLAG_enable_system_instrumentation) {
+ code_event_handler = i::ETWJITInterface::EventHandler;
+ }
+#endif // defined(V8_OS_WIN)
+
if (code_event_handler) {
i_isolate->InitializeLoggingAndCounters();
i_isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
@@ -8645,16 +8406,21 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
// now we just add the values, thereby over-approximating the peak slightly.
heap_statistics->malloced_memory_ =
isolate->allocator()->GetCurrentMemoryUsage() +
- isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage() +
isolate->string_table()->GetCurrentMemoryUsage();
heap_statistics->external_memory_ = isolate->heap()->backing_store_bytes();
heap_statistics->peak_malloced_memory_ =
- isolate->allocator()->GetMaxMemoryUsage() +
- isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
+ isolate->allocator()->GetMaxMemoryUsage();
heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
heap_statistics->number_of_detached_contexts_ =
heap->NumberOfDetachedContexts();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
+
+#if V8_ENABLE_WEBASSEMBLY
+ heap_statistics->malloced_memory_ +=
+ isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
+ heap_statistics->peak_malloced_memory_ +=
+ isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
+#endif // V8_ENABLE_WEBASSEMBLY
}
size_t Isolate::NumberOfHeapSpaces() {
@@ -8959,6 +8725,7 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+#if V8_ENABLE_WEBASSEMBLY
if (!dependant_context) {
if (!isolate->context().is_null()) {
// We left the current context, we can abort all WebAssembly compilations
@@ -8969,6 +8736,7 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
isolate->native_context());
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
@@ -9007,6 +8775,11 @@ void Isolate::SetRAILMode(RAILMode rail_mode) {
return isolate->SetRAILMode(rail_mode);
}
+void Isolate::UpdateLoadStartTime() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->UpdateLoadStartTime();
+}
+
void Isolate::IncreaseHeapLimitForDebugging() {
// No-op.
}
@@ -9041,8 +8814,8 @@ void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
void Isolate::GetEmbeddedCodeRange(const void** start,
size_t* length_in_bytes) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::EmbeddedData d = i::EmbeddedData::FromBlob(isolate);
+ // Note, we should return the embedded code rande from the .text section here.
+ i::EmbeddedData d = i::EmbeddedData::FromBlob();
*start = reinterpret_cast<const void*>(d.code());
*length_in_bytes = d.code_size();
}
@@ -9118,6 +8891,15 @@ CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
wasm_exceptions_enabled_callback)
+void Isolate::InstallConditionalFeatures(Local<Context> context) {
+#if V8_ENABLE_WEBASSEMBLY
+ v8::HandleScope handle_scope(this);
+ v8::Context::Scope context_scope(context);
+ i::WasmJs::InstallConditionalFeatures(reinterpret_cast<i::Isolate*>(this),
+ Utils::OpenHandle(*context));
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -9249,7 +9031,7 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#endif // V8_INTL_SUPPORT
}
-bool v8::Object::IsCodeLike(v8::Isolate* isolate) {
+bool v8::Object::IsCodeLike(v8::Isolate* isolate) const {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Object, IsCodeLike);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -9300,18 +9082,21 @@ MicrotasksScope::~MicrotasksScope() {
#endif
}
+// static
void MicrotasksScope::PerformCheckpoint(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
auto* microtask_queue = isolate->default_microtask_queue();
microtask_queue->PerformCheckpoint(v8_isolate);
}
+// static
int MicrotasksScope::GetCurrentDepth(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
auto* microtask_queue = isolate->default_microtask_queue();
return microtask_queue->GetMicrotasksScopeDepth();
}
+// static
bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
auto* microtask_queue = isolate->default_microtask_queue();
@@ -9959,7 +9744,9 @@ void EmbedderHeapTracer::FinalizeTracing() {
void EmbedderHeapTracer::GarbageCollectionForTesting(
EmbedderStackState stack_state) {
CHECK(isolate_);
- CHECK(i::FLAG_expose_gc);
+ Utils::ApiCheck(i::FLAG_expose_gc,
+ "v8::EmbedderHeapTracer::GarbageCollectionForTesting",
+ "Must use --expose-gc");
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
heap->SetEmbedderStackStateForNextFinalization(stack_state);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
@@ -10026,6 +9813,21 @@ CFunction::CFunction(const void* address, const CFunctionInfo* type_info)
CHECK_NOT_NULL(type_info_);
}
+CFunctionInfo::CFunctionInfo(const CTypeInfo& return_info,
+ unsigned int arg_count, const CTypeInfo* arg_info)
+ : return_info_(return_info), arg_count_(arg_count), arg_info_(arg_info) {
+ if (arg_count_ > 0) {
+ for (unsigned int i = 0; i < arg_count_ - 1; ++i) {
+ DCHECK(arg_info_[i].GetType() != CTypeInfo::kCallbackOptionsType);
+ }
+ }
+}
+
+const CTypeInfo& CFunctionInfo::ArgumentInfo(unsigned int index) const {
+ DCHECK_LT(index, ArgumentCount());
+ return arg_info_[index];
+}
+
RegisterState::RegisterState()
: pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
RegisterState::~RegisterState() = default;
@@ -10050,6 +9852,42 @@ RegisterState& RegisterState::operator=(const RegisterState& other) {
return *this;
}
+#if !V8_ENABLE_WEBASSEMBLY
+// If WebAssembly is disabled, we still need to provide an implementation of the
+// WasmStreaming API. Since {WasmStreaming::Unpack} will always fail, all
+// methods are unreachable.
+
+class WasmStreaming::WasmStreamingImpl {};
+
+WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl>) {
+ UNREACHABLE();
+}
+
+WasmStreaming::~WasmStreaming() = default;
+
+void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
+ UNREACHABLE();
+}
+
+void WasmStreaming::Finish() { UNREACHABLE(); }
+
+void WasmStreaming::Abort(MaybeLocal<Value> exception) { UNREACHABLE(); }
+
+bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
+ UNREACHABLE();
+}
+
+void WasmStreaming::SetClient(std::shared_ptr<Client> client) { UNREACHABLE(); }
+
+void WasmStreaming::SetUrl(const char* url, size_t length) { UNREACHABLE(); }
+
+// static
+std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
+ Local<Value> value) {
+ FATAL("WebAssembly is disabled");
+}
+#endif // !V8_ENABLE_WEBASSEMBLY
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index b323f71bd8c..efb25c0e01b 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -135,7 +135,7 @@ class RegisteredExtension {
V(ScriptOrModule, Script) \
V(FixedArray, FixedArray) \
V(ModuleRequest, ModuleRequest) \
- V(WasmMemoryObject, WasmMemoryObject)
+ IF_WASM(V, WasmMemoryObject, WasmMemoryObject)
class Utils {
public:
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index c400f97de06..a8dae561423 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,3 +1,2 @@
ahaas@chromium.org
clemensb@chromium.org
-titzer@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 6467c40834b..2aecd86b692 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -223,6 +223,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
};
UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
+ DisallowHeapAccess no_heap_access;
+
// Step 1: Translate asm.js module to WebAssembly module.
Zone* compile_zone = &zone_;
Zone translate_zone(allocator_, ZONE_NAME);
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 1da57bd30d0..a0077986c61 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -1,8 +1,5 @@
-adamk@chromium.org
-bmeurer@chromium.org
gsathya@chromium.org
leszeks@chromium.org
-littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index fcc60f5f133..5515a4a3fcc 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -56,7 +56,6 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
}
void AstNode::Print(Isolate* isolate) {
- AllowHandleDereference allow_deref;
AstPrinter::PrintOut(isolate, this);
}
@@ -243,7 +242,6 @@ std::unique_ptr<char[]> FunctionLiteral::GetDebugName() const {
} else if (raw_inferred_name_ != nullptr && !raw_inferred_name_->IsEmpty()) {
cons_string = raw_inferred_name_;
} else if (!inferred_name_.is_null()) {
- AllowHandleDereference allow_deref;
return inferred_name_->ToCString();
} else {
char* empty_str = new char[1];
@@ -663,7 +661,7 @@ void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
boilerplate_descriptor_kind(),
GetMoreGeneralElementsKind(boilerplate_descriptor_kind(),
boilerplate_value.OptimalElementsKind(
- GetIsolateForPtrCompr(*elements))));
+ GetPtrComprCageBase(*elements))));
FixedArray::cast(*elements).set(array_index, boilerplate_value);
}
@@ -891,6 +889,22 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
MatchLiteralCompareNull(right_, op(), left_, expr);
}
+void CallBase::ComputeSpreadPosition() {
+ int arguments_length = arguments_.length();
+ int first_spread_index = 0;
+ for (; first_spread_index < arguments_length; first_spread_index++) {
+ if (arguments_.at(first_spread_index)->IsSpread()) break;
+ }
+ SpreadPosition position;
+ if (first_spread_index == arguments_length - 1) {
+ position = kHasFinalSpread;
+ } else {
+ DCHECK_LT(first_spread_index, arguments_length - 1);
+ position = kHasNonFinalSpread;
+ }
+ bit_field_ |= SpreadPositionField::encode(position);
+}
+
Call::CallType Call::GetCallType() const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != nullptr) {
@@ -935,22 +949,6 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
-void Call::ComputeSpreadPosition() {
- int arguments_length = arguments_.length();
- int first_spread_index = 0;
- for (; first_spread_index < arguments_length; first_spread_index++) {
- if (arguments_.at(first_spread_index)->IsSpread()) break;
- }
- SpreadPosition position;
- if (first_spread_index == arguments_length - 1) {
- position = kHasFinalSpread;
- } else {
- DCHECK_LT(first_spread_index, arguments_length - 1);
- position = kHasNonFinalSpread;
- }
- bit_field_ |= SpreadPositionField::encode(position);
-}
-
CaseClause::CaseClause(Zone* zone, Expression* label,
const ScopedPtrList<Statement>& statements)
: label_(label), statements_(statements.ToConstVector(), zone) {}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index e9744471acd..e11e6c458ff 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -629,6 +629,11 @@ class ReturnStatement final : public JumpStatement {
return type() == kSyntheticAsyncReturn;
}
+ // This constant is used to indicate that the return position
+ // from the FunctionLiteral should be used when emitting code.
+ static constexpr int kFunctionLiteralReturnPosition = -2;
+ STATIC_ASSERT(kFunctionLiteralReturnPosition == kNoSourcePosition - 1);
+
int end_position() const { return end_position_; }
private:
@@ -1618,11 +1623,44 @@ class Property final : public Expression {
Expression* key_;
};
-class Call final : public Expression {
+class CallBase : public Expression {
public:
Expression* expression() const { return expression_; }
const ZonePtrList<Expression>* arguments() const { return &arguments_; }
+ enum SpreadPosition { kNoSpread, kHasFinalSpread, kHasNonFinalSpread };
+ SpreadPosition spread_position() const {
+ return SpreadPositionField::decode(bit_field_);
+ }
+
+ protected:
+ CallBase(Zone* zone, NodeType type, Expression* expression,
+ const ScopedPtrList<Expression>& arguments, int pos, bool has_spread)
+ : Expression(pos, type),
+ expression_(expression),
+ arguments_(arguments.ToConstVector(), zone) {
+ DCHECK(type == kCall || type == kCallNew);
+ if (has_spread) {
+ ComputeSpreadPosition();
+ } else {
+ bit_field_ |= SpreadPositionField::encode(kNoSpread);
+ }
+ }
+
+ // Only valid to be called if there is a spread in arguments_.
+ void ComputeSpreadPosition();
+
+ using SpreadPositionField = Expression::NextBitField<SpreadPosition, 2>;
+
+ template <class T, int size>
+ using NextBitField = SpreadPositionField::Next<T, size>;
+
+ Expression* expression_;
+ ZonePtrList<Expression> arguments_;
+};
+
+class Call final : public CallBase {
+ public:
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
}
@@ -1635,16 +1673,6 @@ class Call final : public Expression {
return IsOptionalChainLinkField::decode(bit_field_);
}
- enum SpreadPosition { kNoSpread, kHasFinalSpread, kHasNonFinalSpread };
- SpreadPosition spread_position() const {
- return SpreadPositionField::decode(bit_field_);
- }
-
- // TODO(syg): Remove this and its users.
- bool only_last_arg_is_spread() {
- return !arguments_.is_empty() && arguments_.last()->IsSpread();
- }
-
enum CallType {
GLOBAL_CALL,
WITH_CALL,
@@ -1677,63 +1705,35 @@ class Call final : public Expression {
Call(Zone* zone, Expression* expression,
const ScopedPtrList<Expression>& arguments, int pos, bool has_spread,
PossiblyEval possibly_eval, bool optional_chain)
- : Expression(pos, kCall),
- expression_(expression),
- arguments_(arguments.ToConstVector(), zone) {
+ : CallBase(zone, kCall, expression, arguments, pos, has_spread) {
bit_field_ |=
IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
IsTaggedTemplateField::encode(false) |
- IsOptionalChainLinkField::encode(optional_chain) |
- SpreadPositionField::encode(kNoSpread);
- if (has_spread) ComputeSpreadPosition();
+ IsOptionalChainLinkField::encode(optional_chain);
}
Call(Zone* zone, Expression* expression,
const ScopedPtrList<Expression>& arguments, int pos,
TaggedTemplateTag tag)
- : Expression(pos, kCall),
- expression_(expression),
- arguments_(arguments.ToConstVector(), zone) {
+ : CallBase(zone, kCall, expression, arguments, pos, false) {
bit_field_ |= IsPossiblyEvalField::encode(false) |
IsTaggedTemplateField::encode(true) |
- IsOptionalChainLinkField::encode(false) |
- SpreadPositionField::encode(kNoSpread);
+ IsOptionalChainLinkField::encode(false);
}
- // Only valid to be called if there is a spread in arguments_.
- void ComputeSpreadPosition();
-
- using IsPossiblyEvalField = Expression::NextBitField<bool, 1>;
+ using IsPossiblyEvalField = CallBase::NextBitField<bool, 1>;
using IsTaggedTemplateField = IsPossiblyEvalField::Next<bool, 1>;
using IsOptionalChainLinkField = IsTaggedTemplateField::Next<bool, 1>;
- using SpreadPositionField = IsOptionalChainLinkField::Next<SpreadPosition, 2>;
-
- Expression* expression_;
- ZonePtrList<Expression> arguments_;
};
-
-class CallNew final : public Expression {
- public:
- Expression* expression() const { return expression_; }
- const ZonePtrList<Expression>* arguments() const { return &arguments_; }
-
- bool only_last_arg_is_spread() {
- return !arguments_.is_empty() && arguments_.last()->IsSpread();
- }
-
+class CallNew final : public CallBase {
private:
friend class AstNodeFactory;
friend Zone;
CallNew(Zone* zone, Expression* expression,
- const ScopedPtrList<Expression>& arguments, int pos)
- : Expression(pos, kCallNew),
- expression_(expression),
- arguments_(arguments.ToConstVector(), zone) {}
-
- Expression* expression_;
- ZonePtrList<Expression> arguments_;
+ const ScopedPtrList<Expression>& arguments, int pos, bool has_spread)
+ : CallBase(zone, kCallNew, expression, arguments, pos, has_spread) {}
};
// The CallRuntime class does not represent any official JavaScript
@@ -2864,20 +2864,22 @@ class AstNodeFactory final {
return zone_->New<BreakStatement>(target, pos);
}
- ReturnStatement* NewReturnStatement(Expression* expression, int pos,
- int end_position = kNoSourcePosition) {
+ ReturnStatement* NewReturnStatement(
+ Expression* expression, int pos,
+ int end_position = ReturnStatement::kFunctionLiteralReturnPosition) {
return zone_->New<ReturnStatement>(expression, ReturnStatement::kNormal,
pos, end_position);
}
- ReturnStatement* NewAsyncReturnStatement(
- Expression* expression, int pos, int end_position = kNoSourcePosition) {
+ ReturnStatement* NewAsyncReturnStatement(Expression* expression, int pos,
+ int end_position) {
return zone_->New<ReturnStatement>(
expression, ReturnStatement::kAsyncReturn, pos, end_position);
}
ReturnStatement* NewSyntheticAsyncReturnStatement(
- Expression* expression, int pos, int end_position = kNoSourcePosition) {
+ Expression* expression, int pos,
+ int end_position = ReturnStatement::kFunctionLiteralReturnPosition) {
return zone_->New<ReturnStatement>(
expression, ReturnStatement::kSyntheticAsyncReturn, pos, end_position);
}
@@ -3092,8 +3094,9 @@ class AstNodeFactory final {
}
CallNew* NewCallNew(Expression* expression,
- const ScopedPtrList<Expression>& arguments, int pos) {
- return zone_->New<CallNew>(zone_, expression, arguments, pos);
+ const ScopedPtrList<Expression>& arguments, int pos,
+ bool has_spread) {
+ return zone_->New<CallNew>(zone_, expression, arguments, pos, has_spread);
}
CallRuntime* NewCallRuntime(Runtime::FunctionId id,
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 4e396c457f7..de9b25a5c59 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -296,7 +296,9 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
void DeclarationScope::SetDefaults() {
is_declaration_scope_ = true;
has_simple_parameters_ = true;
+#if V8_ENABLE_WEBASSEMBLY
is_asm_module_ = false;
+#endif // V8_ENABLE_WEBASSEMBLY
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
uses_super_property_ = false;
@@ -373,6 +375,7 @@ void DeclarationScope::set_should_eager_compile() {
should_eager_compile_ = !was_lazily_parsed_;
}
+#if V8_ENABLE_WEBASSEMBLY
void DeclarationScope::set_is_asm_module() { is_asm_module_ = true; }
bool Scope::IsAsmModule() const {
@@ -393,6 +396,7 @@ bool Scope::ContainsAsmModule() const {
return false;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
ScopeInfo scope_info,
@@ -430,9 +434,11 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
} else if (scope_info.scope_type() == FUNCTION_SCOPE) {
outer_scope = zone->New<DeclarationScope>(
zone, FUNCTION_SCOPE, ast_value_factory, handle(scope_info, isolate));
+#if V8_ENABLE_WEBASSEMBLY
if (scope_info.IsAsmModule()) {
outer_scope->AsDeclarationScope()->set_is_asm_module();
}
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (scope_info.scope_type() == EVAL_SCOPE) {
outer_scope = zone->New<DeclarationScope>(
zone, EVAL_SCOPE, ast_value_factory, handle(scope_info, isolate));
@@ -1850,7 +1856,9 @@ void Scope::Print(int n) {
if (is_strict(language_mode())) {
Indent(n1, "// strict mode scope\n");
}
+#if V8_ENABLE_WEBASSEMBLY
if (IsAsmModule()) Indent(n1, "// scope is an asm module\n");
+#endif // V8_ENABLE_WEBASSEMBLY
if (is_declaration_scope() &&
AsDeclarationScope()->sloppy_eval_can_extend_vars()) {
Indent(n1, "// scope calls sloppy 'eval'\n");
@@ -2501,7 +2509,10 @@ void Scope::AllocateVariablesRecursively() {
// scope.
bool must_have_context =
scope->is_with_scope() || scope->is_module_scope() ||
- scope->IsAsmModule() || scope->ForceContextForLanguageMode() ||
+#if V8_ENABLE_WEBASSEMBLY
+ scope->IsAsmModule() ||
+#endif // V8_ENABLE_WEBASSEMBLY
+ scope->ForceContextForLanguageMode() ||
(scope->is_function_scope() &&
scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) ||
(scope->is_block_scope() && scope->is_declaration_scope() &&
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index eb97c95b328..717c797383b 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -389,10 +389,14 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool private_name_lookup_skips_outer_class() const {
return private_name_lookup_skips_outer_class_;
}
+
+#if V8_ENABLE_WEBASSEMBLY
bool IsAsmModule() const;
// Returns true if this scope or any inner scopes that might be eagerly
// compiled are asm modules.
bool ContainsAsmModule() const;
+#endif // V8_ENABLE_WEBASSEMBLY
+
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
// Returns if we need to force a context because the current scope is stricter
@@ -972,8 +976,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
scope_info_ = scope_info;
}
+#if V8_ENABLE_WEBASSEMBLY
bool is_asm_module() const { return is_asm_module_; }
void set_is_asm_module();
+#endif // V8_ENABLE_WEBASSEMBLY
bool should_ban_arguments() const {
return IsClassMembersInitializerFunction(function_kind());
@@ -1242,8 +1248,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void RecalcPrivateNameContextChain();
bool has_simple_parameters_ : 1;
+#if V8_ENABLE_WEBASSEMBLY
// This scope contains an "use asm" annotation.
bool is_asm_module_ : 1;
+#endif // V8_ENABLE_WEBASSEMBLY
bool force_eager_compilation_ : 1;
// This function scope has a rest parameter.
bool has_rest_ : 1;
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 36f2e5f5e3b..17ef42a299a 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -416,8 +416,8 @@ CPU::CPU()
architecture_(0),
variant_(-1),
part_(0),
- icache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
- dcache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
+ icache_line_size_(kUnknownCacheLineSize),
+ dcache_line_size_(kUnknownCacheLineSize),
has_fpu_(false),
has_cmov_(false),
has_sahf_(false),
@@ -446,6 +446,7 @@ CPU::CPU()
has_jscvt_(false),
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
+ is_running_in_vm_(false),
has_msa_(false) {
base::Memcpy(vendor_, "Unknown", 8);
@@ -500,6 +501,12 @@ CPU::CPU()
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
has_avx2_ = (cpu_info7[1] & 0x00000020) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
+ // "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
+ // See https://lwn.net/Articles/301888/
+ // This is checking for any hypervisor. Hypervisors may choose not to
+ // announce themselves. Hypervisors trap CPUID and sometimes return
+ // different results to underlying hardware.
+ is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
if (family_ == 0x6) {
switch (model_) {
@@ -544,6 +551,23 @@ CPU::CPU()
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
+ // This logic is replicated from cpu.cc present in chromium.src
+ if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
+ int cpu_info_hv[4] = {};
+ __cpuid(cpu_info_hv, 0x40000000);
+ if (cpu_info_hv[1] == 0x7263694D && // Micr
+ cpu_info_hv[2] == 0x666F736F && // osof
+ cpu_info_hv[3] == 0x76482074) { // t Hv
+ // If CPUID says we have a variant TSC and a hypervisor has identified
+ // itself and the hypervisor says it is Microsoft Hyper-V, then treat
+ // TSC as invariant.
+ //
+ // Microsoft Hyper-V hypervisor reports variant TSC as there are some
+ // scenarios (eg. VM live migration) where the TSC is variant, but for
+ // our purposes we can treat it as invariant.
+ has_non_stop_time_stamp_counter_ = true;
+ }
+ }
#elif V8_HOST_ARCH_ARM
#if V8_OS_LINUX
@@ -784,45 +808,45 @@ CPU::CPU()
part_ = -1;
if (auxv_cpu_type) {
if (strcmp(auxv_cpu_type, "power10") == 0) {
- part_ = PPC_POWER10;
- }
- else if (strcmp(auxv_cpu_type, "power9") == 0) {
- part_ = PPC_POWER9;
+ part_ = kPPCPower10;
+ } else if (strcmp(auxv_cpu_type, "power9") == 0) {
+ part_ = kPPCPower9;
} else if (strcmp(auxv_cpu_type, "power8") == 0) {
- part_ = PPC_POWER8;
+ part_ = kPPCPower8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) {
- part_ = PPC_POWER7;
+ part_ = kPPCPower7;
} else if (strcmp(auxv_cpu_type, "power6") == 0) {
- part_ = PPC_POWER6;
+ part_ = kPPCPower6;
} else if (strcmp(auxv_cpu_type, "power5") == 0) {
- part_ = PPC_POWER5;
+ part_ = kPPCPower5;
} else if (strcmp(auxv_cpu_type, "ppc970") == 0) {
- part_ = PPC_G5;
+ part_ = kPPCG5;
} else if (strcmp(auxv_cpu_type, "ppc7450") == 0) {
- part_ = PPC_G4;
+ part_ = kPPCG4;
} else if (strcmp(auxv_cpu_type, "pa6t") == 0) {
- part_ = PPC_PA6T;
+ part_ = kPPCPA6T;
}
}
#elif V8_OS_AIX
switch (_system_configuration.implementation) {
case POWER_10:
- part_ = PPC_POWER10;
+ part_ = kPPCPower10;
+ break;
case POWER_9:
- part_ = PPC_POWER9;
+ part_ = kPPCPower9;
break;
case POWER_8:
- part_ = PPC_POWER8;
+ part_ = kPPCPower8;
break;
case POWER_7:
- part_ = PPC_POWER7;
+ part_ = kPPCPower7;
break;
case POWER_6:
- part_ = PPC_POWER6;
+ part_ = kPPCPower6;
break;
case POWER_5:
- part_ = PPC_POWER5;
+ part_ = kPPCPower5;
break;
}
#endif // V8_OS_AIX
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index bfb7d7818a5..a2a4d38e1c1 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -44,43 +44,43 @@ class V8_BASE_EXPORT CPU final {
// arm implementer/part information
int implementer() const { return implementer_; }
- static const int ARM = 0x41;
- static const int NVIDIA = 0x4e;
- static const int QUALCOMM = 0x51;
+ static const int kArm = 0x41;
+ static const int kNvidia = 0x4e;
+ static const int kQualcomm = 0x51;
int architecture() const { return architecture_; }
int variant() const { return variant_; }
- static const int NVIDIA_DENVER = 0x0;
+ static const int kNvidiaDenver = 0x0;
int part() const { return part_; }
// ARM-specific part codes
- static const int ARM_CORTEX_A5 = 0xc05;
- static const int ARM_CORTEX_A7 = 0xc07;
- static const int ARM_CORTEX_A8 = 0xc08;
- static const int ARM_CORTEX_A9 = 0xc09;
- static const int ARM_CORTEX_A12 = 0xc0c;
- static const int ARM_CORTEX_A15 = 0xc0f;
+ static const int kArmCortexA5 = 0xc05;
+ static const int kArmCortexA7 = 0xc07;
+ static const int kArmCortexA8 = 0xc08;
+ static const int kArmCortexA9 = 0xc09;
+ static const int kArmCortexA12 = 0xc0c;
+ static const int kArmCortexA15 = 0xc0f;
// Denver-specific part code
- static const int NVIDIA_DENVER_V10 = 0x002;
+ static const int kNvidiaDenverV10 = 0x002;
// PPC-specific part codes
enum {
- PPC_POWER5,
- PPC_POWER6,
- PPC_POWER7,
- PPC_POWER8,
- PPC_POWER9,
- PPC_POWER10,
- PPC_G4,
- PPC_G5,
- PPC_PA6T
+ kPPCPower5,
+ kPPCPower6,
+ kPPCPower7,
+ kPPCPower8,
+ kPPCPower9,
+ kPPCPower10,
+ kPPCG4,
+ kPPCG5,
+ kPPCPA6T
};
// General features
bool has_fpu() const { return has_fpu_; }
int icache_line_size() const { return icache_line_size_; }
int dcache_line_size() const { return dcache_line_size_; }
- static const int UNKNOWN_CACHE_LINE_SIZE = 0;
+ static const int kUnknownCacheLineSize = 0;
// x86 features
bool has_cmov() const { return has_cmov_; }
@@ -104,6 +104,7 @@ class V8_BASE_EXPORT CPU final {
bool has_non_stop_time_stamp_counter() const {
return has_non_stop_time_stamp_counter_;
}
+ bool is_running_in_vm() const { return is_running_in_vm_; }
// arm features
bool has_idiva() const { return has_idiva_; }
@@ -160,6 +161,7 @@ class V8_BASE_EXPORT CPU final {
bool has_jscvt_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
+ bool is_running_in_vm_;
bool has_msa_;
};
diff --git a/deps/v8/src/base/immediate-crash.h b/deps/v8/src/base/immediate-crash.h
new file mode 100644
index 00000000000..ef1f9223177
--- /dev/null
+++ b/deps/v8/src/base/immediate-crash.h
@@ -0,0 +1,162 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_IMMEDIATE_CRASH_H_
+#define V8_BASE_IMMEDIATE_CRASH_H_
+
+#include "include/v8config.h"
+#include "src/base/build_config.h"
+
+// Crashes in the fastest possible way with no attempt at logging.
+// There are several constraints; see http://crbug.com/664209 for more context.
+//
+// - TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the
+// resulting exception or simply hit 'continue' to skip over it in a debugger.
+// - Different instances of TRAP_SEQUENCE_() must not be folded together, to
+// ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile
+// blocks will not be folded together.
+// Note: TRAP_SEQUENCE_() previously required an instruction with a unique
+// nonce since unlike clang, GCC folds together identical asm volatile
+// blocks.
+// - TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid
+// memory access.
+// - TRAP_SEQUENCE_() must be treated as a set of noreturn instructions.
+// __builtin_unreachable() is used to provide that hint here. clang also uses
+// this as a heuristic to pack the instructions in the function epilogue to
+// improve code density.
+//
+// Additional properties that are nice to have:
+// - TRAP_SEQUENCE_() should be as compact as possible.
+// - The first instruction of TRAP_SEQUENCE_() should not change, to avoid
+// shifting crash reporting clusters. As a consequence of this, explicit
+// assembly is preferred over intrinsics.
+// Note: this last bullet point may no longer be true, and may be removed in
+// the future.
+
+// Note: TRAP_SEQUENCE Is currently split into two macro helpers due to the fact
+// that clang emits an actual instruction for __builtin_unreachable() on certain
+// platforms (see https://crbug.com/958675). In addition, the int3/bkpt/brk will
+// be removed in followups, so splitting it up like this now makes it easy to
+// land the followups.
+
+#if V8_CC_GNU
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+
+// TODO(https://crbug.com/958675): In theory, it should be possible to use just
+// int3. However, there are a number of crashes with SIGILL as the exception
+// code, so it seems likely that there's a signal handler that allows execution
+// to continue after SIGTRAP.
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+
+#if V8_OS_MACOSX
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
+#define TRAP_SEQUENCE2_() asm volatile("")
+#else
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+#endif // V8_OS_MACOSX
+
+#elif V8_HOST_ARCH_ARM
+
+// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
+// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
+// cause a SIGTRAP from userspace without using a syscall (which would be a
+// problem for sandboxing).
+// TODO(https://crbug.com/958675): Remove bkpt from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("bkpt #0")
+#define TRAP_SEQUENCE2_() asm volatile("udf #0")
+
+#elif V8_HOST_ARCH_ARM64
+
+// This will always generate a SIGTRAP on arm64.
+// TODO(https://crbug.com/958675): Remove brk from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("brk #0")
+#define TRAP_SEQUENCE2_() asm volatile("hlt #0")
+
+#else
+
+// Crash report accuracy will not be guaranteed on other architectures, but at
+// least this will crash as expected.
+#define TRAP_SEQUENCE1_() __builtin_trap()
+#define TRAP_SEQUENCE2_() asm volatile("")
+
+#endif // V8_HOST_ARCH_*
+
+#elif V8_CC_MSVC
+
+#if !defined(__clang__)
+
+// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic.
+#define TRAP_SEQUENCE1_() __debugbreak()
+#define TRAP_SEQUENCE2_()
+
+#elif V8_HOST_ARCH_ARM64
+
+// Windows ARM64 uses "BRK #F000" as its breakpoint instruction, and
+// __debugbreak() generates that in both VC++ and clang.
+#define TRAP_SEQUENCE1_() __debugbreak()
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Win64,
+// https://crbug.com/958373
+#define TRAP_SEQUENCE2_() __asm volatile("")
+
+#else
+
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+
+#endif // __clang__
+
+#else
+
+#error No supported trap sequence!
+
+#endif // V8_CC_GNU
+
+#define TRAP_SEQUENCE_() \
+ do { \
+ TRAP_SEQUENCE1_(); \
+ TRAP_SEQUENCE2_(); \
+ } while (false)
+
+// CHECK() and the trap sequence can be invoked from a constexpr function.
+// This could make compilation fail on GCC, as it forbids directly using inline
+// asm inside a constexpr function. However, it allows calling a lambda
+// expression including the same asm.
+// The side effect is that the top of the stacktrace will not point to the
+// calling function, but to this anonymous lambda. This is still useful as the
+// full name of the lambda will typically include the name of the function that
+// calls CHECK() and the debugger will still break at the right line of code.
+#if !V8_CC_GNU
+
+#define WRAPPED_TRAP_SEQUENCE_() TRAP_SEQUENCE_()
+
+#else
+
+#define WRAPPED_TRAP_SEQUENCE_() \
+ do { \
+ [] { TRAP_SEQUENCE_(); }(); \
+ } while (false)
+
+#endif // !V8_CC_GCC
+
+#if defined(__clang__) || V8_CC_GCC
+
+// __builtin_unreachable() hints to the compiler that this is noreturn and can
+// be packed in the function epilogue.
+#define IMMEDIATE_CRASH() \
+ ({ \
+ WRAPPED_TRAP_SEQUENCE_(); \
+ __builtin_unreachable(); \
+ })
+
+#else
+
+// This is supporting build with MSVC where there is no __builtin_unreachable().
+#define IMMEDIATE_CRASH() WRAPPED_TRAP_SEQUENCE_()
+
+#endif // defined(__clang__) || defined(COMPILER_GCC)
+
+#endif // V8_BASE_IMMEDIATE_CRASH_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 9e1cf59f64b..a2766477de7 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -167,15 +167,6 @@ void V8_Fatal(const char* format, ...) {
v8::base::OS::Abort();
}
-#if !defined(DEBUG) && defined(OFFICIAL_BUILD)
-void V8_FatalNoContext() {
- v8::base::OS::PrintError("V8 CHECK or FATAL\n");
- if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
- fflush(stderr);
- v8::base::OS::Abort();
-}
-#endif
-
void V8_Dcheck(const char* file, int line, const char* message) {
v8::base::g_dcheck_function(file, line, message);
}
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index fe39f988225..2c4c536cf32 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -12,6 +12,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
+#include "src/base/immediate-crash.h"
#include "src/base/template-utils.h"
V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
@@ -24,28 +25,25 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
void V8_Fatal(const char* file, int line, const char* format, ...);
#define FATAL(...) V8_Fatal(__FILE__, __LINE__, __VA_ARGS__)
-#elif !defined(OFFICIAL_BUILD)
+#else
+[[noreturn]] PRINTF_FORMAT(1, 2) V8_BASE_EXPORT V8_NOINLINE
+ void V8_Fatal(const char* format, ...);
+#if !defined(OFFICIAL_BUILD)
// In non-official release, include full error message, but drop file & line
// numbers. It saves binary size to drop the |file| & |line| as opposed to just
// passing in "", 0 for them.
-[[noreturn]] PRINTF_FORMAT(1, 2) V8_BASE_EXPORT V8_NOINLINE
- void V8_Fatal(const char* format, ...);
#define FATAL(...) V8_Fatal(__VA_ARGS__)
#else
-// In official builds, include only messages that contain parameters because
-// single-message errors can always be derived from stack traces.
-[[noreturn]] V8_BASE_EXPORT V8_NOINLINE void V8_FatalNoContext();
-[[noreturn]] PRINTF_FORMAT(1, 2) V8_BASE_EXPORT V8_NOINLINE
- void V8_Fatal(const char* format, ...);
-// FATAL(msg) -> V8_FatalNoContext()
-// FATAL(msg, ...) -> V8_Fatal()
+// FATAL(msg) -> IMMEDIATE_CRASH()
+// FATAL(msg, ...) -> V8_Fatal(msg, ...)
#define FATAL_HELPER(_7, _6, _5, _4, _3, _2, _1, _0, ...) _0
-#define FATAL_DISCARD_ARG(arg) V8_FatalNoContext()
+#define FATAL_DISCARD_ARG(arg) IMMEDIATE_CRASH()
#define FATAL(...) \
FATAL_HELPER(__VA_ARGS__, V8_Fatal, V8_Fatal, V8_Fatal, V8_Fatal, V8_Fatal, \
- V8_Fatal, V8_Fatal, FATAL_DISCARD_ARG) \
+ V8_Fatal, FATAL_DISCARD_ARG) \
(__VA_ARGS__)
-#endif
+#endif // !defined(OFFICIAL_BUILD)
+#endif // DEBUG
#define UNIMPLEMENTED() FATAL("unimplemented code")
#define UNREACHABLE() FATAL("unreachable code")
@@ -53,6 +51,8 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
namespace v8 {
namespace base {
+class CheckMessageStream : public std::ostringstream {};
+
// Overwrite the default function that prints a stack trace.
V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
@@ -140,16 +140,37 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#define CONSTEXPR_DCHECK(cond)
#endif
+namespace detail {
+template <typename... Ts>
+std::string PrintToString(Ts&&... ts) {
+ CheckMessageStream oss;
+ int unused_results[]{((oss << std::forward<Ts>(ts)), 0)...};
+ (void)unused_results; // Avoid "unused variable" warning.
+ return oss.str();
+}
+
+template <typename T>
+auto GetUnderlyingEnumTypeForPrinting(T val) {
+ using underlying_t = typename std::underlying_type<T>::type;
+ // For single-byte enums, return a 16-bit integer to avoid printing the value
+ // as a character.
+ using int_t = typename std::conditional_t<
+ sizeof(underlying_t) != 1, underlying_t,
+ std::conditional_t<std::is_signed<underlying_t>::value, int16_t,
+ uint16_t> >;
+ return static_cast<int_t>(static_cast<underlying_t>(val));
+}
+} // namespace detail
+
// Define PrintCheckOperand<T> for each T which defines operator<< for ostream.
template <typename T>
typename std::enable_if<
!std::is_function<typename std::remove_pointer<T>::type>::value &&
- has_output_operator<T>::value,
+ !std::is_enum<T>::value &&
+ has_output_operator<T, CheckMessageStream>::value,
std::string>::type
PrintCheckOperand(T val) {
- std::ostringstream oss;
- oss << std::forward<T>(val);
- return oss.str();
+ return detail::PrintToString(std::forward<T>(val));
}
// Provide an overload for functions and function pointers. Function pointers
@@ -165,23 +186,39 @@ PrintCheckOperand(T val) {
return PrintCheckOperand(reinterpret_cast<const void*>(val));
}
-// Define PrintCheckOperand<T> for enums which have no operator<<.
+// Define PrintCheckOperand<T> for enums with an output operator.
template <typename T>
-typename std::enable_if<
- std::is_enum<T>::value && !has_output_operator<T>::value, std::string>::type
+typename std::enable_if<std::is_enum<T>::value &&
+ has_output_operator<T, CheckMessageStream>::value,
+ std::string>::type
PrintCheckOperand(T val) {
- using underlying_t = typename std::underlying_type<T>::type;
- // 8-bit types are not printed as number, so extend them to 16 bit.
- using int_t = typename std::conditional<
- std::is_same<underlying_t, uint8_t>::value, uint16_t,
- typename std::conditional<std::is_same<underlying_t, int8_t>::value,
- int16_t, underlying_t>::type>::type;
- return PrintCheckOperand(static_cast<int_t>(static_cast<underlying_t>(val)));
+ std::string val_str = detail::PrintToString(val);
+ std::string int_str =
+ detail::PrintToString(detail::GetUnderlyingEnumTypeForPrinting(val));
+ // Printing the original enum might have printed a single non-printable
+ // character. Ignore it in that case. Also ignore if it printed the same as
+ // the integral representation.
+ // TODO(clemensb): Can we somehow statically find out if the output operator
+ // is the default one, printing the integral value?
+ if ((val_str.length() == 1 && !std::isprint(val_str[0])) ||
+ val_str == int_str) {
+ return int_str;
+ }
+ return detail::PrintToString(val_str, " (", int_str, ")");
+}
+
+// Define PrintCheckOperand<T> for enums without an output operator.
+template <typename T>
+typename std::enable_if<std::is_enum<T>::value &&
+ !has_output_operator<T, CheckMessageStream>::value,
+ std::string>::type
+PrintCheckOperand(T val) {
+ return detail::PrintToString(detail::GetUnderlyingEnumTypeForPrinting(val));
}
// Define default PrintCheckOperand<T> for non-printable types.
template <typename T>
-typename std::enable_if<!has_output_operator<T>::value &&
+typename std::enable_if<!has_output_operator<T, CheckMessageStream>::value &&
!std::is_enum<T>::value,
std::string>::type
PrintCheckOperand(T val) {
@@ -210,7 +247,7 @@ template <typename Lhs, typename Rhs>
V8_NOINLINE std::string* MakeCheckOpString(Lhs lhs, Rhs rhs, char const* msg) {
std::string lhs_str = PrintCheckOperand<Lhs>(lhs);
std::string rhs_str = PrintCheckOperand<Rhs>(rhs);
- std::ostringstream ss;
+ CheckMessageStream ss;
ss << msg;
constexpr size_t kMaxInlineLength = 50;
if (lhs_str.size() <= kMaxInlineLength &&
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 9079d156628..248a23a1f42 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -183,12 +183,6 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define DISABLE_CFI_ICALL V8_CLANG_NO_SANITIZE("cfi-icall")
#endif
-#if V8_CC_GNU
-#define V8_IMMEDIATE_CRASH() __builtin_trap()
-#else
-#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
-#endif
-
// A convenience wrapper around static_assert without a string message argument.
// Once C++17 becomes the default, this macro can be removed in favor of the
// new static_assert(condition) overload.
@@ -417,4 +411,13 @@ bool is_inbounds(float_t v) {
#endif // V8_OS_WIN
+// Defines IF_WASM, to be used in macro lists for elements that should only be
+// there if WebAssembly is enabled.
+#if V8_ENABLE_WEBASSEMBLY
+// EXPAND is needed to work around MSVC's broken __VA_ARGS__ expansion.
+#define IF_WASM(V, ...) EXPAND(V(__VA_ARGS__))
+#else
+#define IF_WASM(V, ...)
+#endif // V8_ENABLE_WEBASSEMBLY
+
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/overflowing-math.h b/deps/v8/src/base/overflowing-math.h
index 14dcfb10de1..7ca58aefec4 100644
--- a/deps/v8/src/base/overflowing-math.h
+++ b/deps/v8/src/base/overflowing-math.h
@@ -83,13 +83,6 @@ inline float RecipSqrt(float a) {
return -std::numeric_limits<float>::infinity();
}
-template <typename T>
-inline T RoundingAverageUnsigned(T a, T b) {
- static_assert(std::is_unsigned<T>::value, "Only for unsiged types");
- static_assert(sizeof(T) < sizeof(uint64_t), "Must be smaller than uint64_t");
- return (static_cast<uint64_t>(a) + static_cast<uint64_t>(b) + 1) >> 1;
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index 782eb7c6844..80c7739180a 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -1,5 +1,6 @@
hpayer@chromium.org
mlippautz@chromium.org
ulan@chromium.org
+victorgomes@chromium.org
per-file platform-fuchsia.cc=wez@chromium.org
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index dd34d69f573..9538d81671c 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -121,7 +121,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
- return zx::vmar::root_self()->protect2(
+ return zx::vmar::root_self()->protect(
prot, reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 9e3a04579f5..ee787f7d9ab 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -499,7 +499,7 @@ void OS::Sleep(TimeDelta interval) {
void OS::Abort() {
if (g_hard_abort) {
- V8_IMMEDIATE_CRASH();
+ IMMEDIATE_CRASH();
}
// Redirect to std abort to signal abnormal program termination.
abort();
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 7f6c0e97d2f..50da60c72f5 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -929,7 +929,7 @@ void OS::Abort() {
fflush(stderr);
if (g_hard_abort) {
- V8_IMMEDIATE_CRASH();
+ IMMEDIATE_CRASH();
}
// Make the MSVCRT do a silent abort.
raise(SIGABRT);
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index d6d4ca32d94..4f082845d95 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -53,11 +53,11 @@ struct pass_value_or_ref {
};
// Uses expression SFINAE to detect whether using operator<< would work.
-template <typename T, typename = void>
+template <typename T, typename TStream = std::ostream, typename = void>
struct has_output_operator : std::false_type {};
-template <typename T>
-struct has_output_operator<T, decltype(void(std::declval<std::ostream&>()
- << std::declval<T>()))>
+template <typename T, typename TStream>
+struct has_output_operator<
+ T, TStream, decltype(void(std::declval<TStream&>() << std::declval<T>()))>
: std::true_type {};
// Fold all arguments from left to right with a given function.
diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h
new file mode 100644
index 00000000000..baeb5b9430d
--- /dev/null
+++ b/deps/v8/src/base/vlq.h
@@ -0,0 +1,85 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_VLQ_H_
+#define V8_BASE_VLQ_H_
+
+#include <limits>
+#include <vector>
+
+#include "src/base/memory.h"
+
+namespace v8 {
+namespace base {
+
+static constexpr uint32_t kContinueShift = 7;
+static constexpr uint32_t kContinueMask = 1 << kContinueShift;
+static constexpr uint32_t kDataMask = kContinueMask - 1;
+
+// Encodes an unsigned value using variable-length encoding and stores it using
+// the passed process_byte function.
+inline void VLQEncodeUnsigned(const std::function<void(byte)>& process_byte,
+ uint32_t value) {
+ bool has_next;
+ do {
+ byte cur_byte = value & kDataMask;
+ value >>= kContinueShift;
+ has_next = value != 0;
+ // The most significant bit is set when we are not done with the value yet.
+ cur_byte |= static_cast<uint32_t>(has_next) << kContinueShift;
+ process_byte(cur_byte);
+ } while (has_next);
+}
+
+// Encodes value using variable-length encoding and stores it using the passed
+// process_byte function.
+inline void VLQEncode(const std::function<void(byte)>& process_byte,
+ int32_t value) {
+ // This wouldn't handle kMinInt correctly if it ever encountered it.
+ DCHECK_NE(value, std::numeric_limits<int32_t>::min());
+ bool is_negative = value < 0;
+ // Encode sign in least significant bit.
+ uint32_t bits = static_cast<uint32_t>((is_negative ? -value : value) << 1) |
+ static_cast<uint32_t>(is_negative);
+ VLQEncodeUnsigned(process_byte, bits);
+}
+
+// Wrapper of VLQEncode for std::vector backed storage containers.
+template <typename A>
+inline void VLQEncode(std::vector<byte, A>* data, int32_t value) {
+ VLQEncode([data](byte value) { data->push_back(value); }, value);
+}
+
+// Wrapper of VLQEncodeUnsigned for std::vector backed storage containers.
+template <typename A>
+inline void VLQEncodeUnsigned(std::vector<byte, A>* data, uint32_t value) {
+ VLQEncodeUnsigned([data](byte value) { data->push_back(value); }, value);
+}
+
+// Decodes a variable-length encoded unsigned value stored in contiguous memory
+// starting at data_start + index, updating index to where the next encoded
+// value starts.
+inline uint32_t VLQDecodeUnsigned(byte* data_start, int* index) {
+ uint32_t bits = 0;
+ for (int shift = 0; true; shift += kContinueShift) {
+ byte cur_byte = data_start[(*index)++];
+ bits += (cur_byte & kDataMask) << shift;
+ if ((cur_byte & kContinueMask) == 0) break;
+ }
+ return bits;
+}
+
+// Decodes a variable-length encoded value stored in contiguous memory starting
+// at data_start + index, updating index to where the next encoded value starts.
+inline int32_t VLQDecode(byte* data_start, int* index) {
+ uint32_t bits = VLQDecodeUnsigned(data_start, index);
+ bool is_negative = (bits & 1) == 1;
+ int32_t result = bits >> 1;
+ return is_negative ? -result : result;
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_VLQ_H_
diff --git a/deps/v8/src/baseline/OWNERS b/deps/v8/src/baseline/OWNERS
index f9e17a90b1a..6b48a30d8df 100644
--- a/deps/v8/src/baseline/OWNERS
+++ b/deps/v8/src/baseline/OWNERS
@@ -1,4 +1,5 @@
cbruni@chromium.org
+ishell@chromium.org
leszeks@chromium.org
marja@chromium.org
pthier@chromium.org
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
new file mode 100644
index 00000000000..eca2b47cc0e
--- /dev/null
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -0,0 +1,483 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
+#define V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ DCHECK(wrapped_scope_.CanAcquire());
+ wrapped_scope_.Include(r8, r9);
+ wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister);
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
+enum class Condition : uint32_t {
+ kEqual = static_cast<uint32_t>(eq),
+ kNotEqual = static_cast<uint32_t>(ne),
+
+ kLessThan = static_cast<uint32_t>(lt),
+ kGreaterThan = static_cast<uint32_t>(gt),
+ kLessThanEqual = static_cast<uint32_t>(le),
+ kGreaterThanEqual = static_cast<uint32_t>(ge),
+
+ kUnsignedLessThan = static_cast<uint32_t>(lo),
+ kUnsignedGreaterThan = static_cast<uint32_t>(hi),
+ kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
+ kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
+
+ kOverflow = static_cast<uint32_t>(vs),
+ kNoOverflow = static_cast<uint32_t>(vc),
+
+ kZero = static_cast<uint32_t>(eq),
+ kNotZero = static_cast<uint32_t>(ne),
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ // This is important for arm, where the internal::Condition where each value
+ // represents an encoded bit field value.
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.rn() == target || op.rm() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP on arm.
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ b(target);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
+ __ b(AsMasmCondition(cc), target);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ // __ CallBuiltin(static_cast<int>(builtin));
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ __ tst(value, Operand(mask));
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ CompareObjectType(object, map, type, instance_type);
+}
+void BaselineAssembler::CmpInstanceType(Register map,
+ InstanceType instance_type) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (emit_debug_code()) {
+ __ AssertNotSmi(map);
+ __ CompareObjectType(map, type, type, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ CompareInstanceType(map, type, instance_type);
+}
+void BaselineAssembler::Cmp(Register value, Smi smi) {
+ __ cmp(value, Operand(smi));
+}
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ ldr(tmp, operand);
+ __ cmp(value, tmp);
+}
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ cmp(lhs, rhs);
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ ldr(tmp, operand);
+ __ cmp(value, tmp);
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ ldr(tmp, operand);
+ __ cmp(tmp, value);
+}
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ cmp(value, Operand(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ mov(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ str(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ mov(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ mov(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ mov(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ mov(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ mov(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
+ }
+};
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+// TODO(ishell): try to pack sequence of pops into one instruction by
+// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
+// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ ldr(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ ldr(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ ldr(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ ldrb(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ mov(tmp, Operand(value));
+ __ str(tmp, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ str(value, FieldMemOperand(target, offset));
+ __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ str(value, FieldMemOperand(target, offset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ ldr(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ add(interrupt_budget, interrupt_budget, Operand(weight), SetCC);
+ __ str(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ ldr(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ add(interrupt_budget, interrupt_budget, weight, SetCC);
+ __ str(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ __ add(lhs, lhs, Operand(rhs));
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ sub(reg, reg, Operand(case_value_base));
+ }
+
+ // Mostly copied from code-generator-arm.cc
+ ScratchRegisterScope scope(this);
+ __ cmp(reg, Operand(num_labels));
+ JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
+ // Ensure to emit the constant pool first if necessary.
+ __ CheckConstPool(true, true);
+ __ BlockConstPoolFor(num_labels);
+ int entry_size_log2 = 2;
+ __ add(pc, pc, Operand(reg, LSL, entry_size_log2), LeaveCC, lo);
+ __ b(&fallthrough);
+ for (int i = 0; i < num_labels; ++i) {
+ __ b(labels[i]);
+ }
+ __ bind(&fallthrough);
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by add
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->cmp(params_size, actual_params_size);
+ __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
+ __ masm()->mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->add(params_size, params_size,
+ Operand(1)); // Include the receiver.
+ __ masm()->Drop(params_size);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
diff --git a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h
new file mode 100644
index 00000000000..ff2b6d1a831
--- /dev/null
+++ b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h
@@ -0,0 +1,94 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
+#define V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
+
+#include "src/base/logging.h"
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
+ kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ for (int i = 0; i < new_target_index; i++) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+
+ } else {
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ __ masm()->sub(scratch, scratch, Operand(1), SetCC);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ __ masm()->add(scratch, sp,
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->cmp(scratch, fp);
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 021df8d9cf1..27b7c2b2d8d 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -23,6 +23,7 @@ class BaselineAssembler::ScratchRegisterScope {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
wrapped_scope_.Include(x14, x15);
+ wrapped_scope_.Include(x19);
}
assembler_->scratch_register_scope_ = this;
}
@@ -37,7 +38,7 @@ class BaselineAssembler::ScratchRegisterScope {
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint8_t {
+enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
@@ -87,6 +88,10 @@ void BaselineAssembler::Bind(Label* label) {
__ BindJumpTarget(label);
}
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ Bind(label); }
+
+void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
+
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target);
}
@@ -111,23 +116,40 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
- ScratchRegisterScope temps(this);
- Register temp = temps.AcquireScratch();
- __ LoadEntryFromBuiltinIndex(builtin, temp);
- __ Call(temp);
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ CallBuiltin(builtin);
+ } else {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+ }
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
- // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.]
- // `bti j`) landing pads for the tail-called code.
- Register temp = x17;
-
- // Make sure we're don't use this register as a temporary.
- UseScratchRegisterScope temps(masm());
- temps.Exclude(temp);
-
- __ LoadEntryFromBuiltinIndex(builtin, temp);
- __ Jump(temp);
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ TailCallBuiltin(builtin);
+ } else {
+ // The control flow integrity (CFI) feature allows us to "sign" code entry
+ // points as a target for calls, jumps or both. Arm64 has special
+ // instructions for this purpose, so-called "landing pads" (see
+ // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and
+ // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call"
+ // landing pads for CPP builtins. In order to allow tail calling to those
+ // builtins we have to use a workaround.
+ // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.
+ // `bti j`) landing pads for the tail-called code.
+ Register temp = x17;
+
+ // Make sure we're don't use this register as a temporary.
+ UseScratchRegisterScope temps(masm());
+ temps.Exclude(temp);
+
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+ }
}
void BaselineAssembler::Test(Register value, int mask) {
diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
index 2ce652d1a0c..e567be41d24 100644
--- a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
@@ -14,14 +14,13 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
- __ masm()->Mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
- DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- kInterpreterBytecodeArrayRegister,
- kJavaScriptCallNewTargetRegister);
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
__ masm()->AssertSpAligned();
PrologueFillFrame();
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
index d949425a19c..8fd54d63a2f 100644
--- a/deps/v8/src/baseline/baseline-assembler-inl.h
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -7,7 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#include <type_traits>
#include <unordered_map>
@@ -22,6 +23,10 @@
#include "src/baseline/x64/baseline-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/baseline/arm/baseline-assembler-arm-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -62,7 +67,7 @@ void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
__ LoadRoot(output, index);
}
void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
- __ LoadNativeContextSlot(index, output);
+ __ LoadNativeContextSlot(output, index);
}
void BaselineAssembler::Move(Register output, interpreter::Register source) {
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index de6bd239116..38874d556f0 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -1,4 +1,3 @@
-
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,15 +7,17 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#include "src/codegen/macro-assembler.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
namespace baseline {
-enum class Condition : uint8_t;
+enum class Condition : uint32_t;
class BaselineAssembler {
public:
@@ -39,6 +40,13 @@ class BaselineAssembler {
inline void DebugBreak();
inline void Bind(Label* label);
+ // Binds the label without marking it as a valid jump target.
+ // This is only useful, when the position is already marked as a valid jump
+ // target (i.e. at the beginning of the bytecode).
+ inline void BindWithoutJumpTarget(Label* label);
+ // Marks the current position as a valid jump target on CFI enabled
+ // architectures.
+ inline void JumpTarget();
inline void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar);
inline void Jump(Label* target, Label::Distance distance = Label::kFar);
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index 60be8c8386b..3d599c11fd5 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -4,14 +4,16 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#include "src/baseline/baseline-compiler.h"
+#include <algorithm>
#include <type_traits>
-#include <unordered_map>
#include "src/baseline/baseline-assembler-inl.h"
+#include "src/baseline/baseline-assembler.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/builtins/builtins.h"
@@ -22,7 +24,6 @@
#include "src/codegen/macro-assembler-inl.h"
#include "src/common/globals.h"
#include "src/execution/frame-constants.h"
-#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/objects/code.h"
@@ -35,6 +36,10 @@
#include "src/baseline/x64/baseline-compiler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-compiler-arm64-inl.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/baseline/arm/baseline-compiler-arm-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -220,7 +225,6 @@ void MoveArgumentsForDescriptor(BaselineAssembler* masm,
} // namespace detail
-
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode)
@@ -232,24 +236,13 @@ BaselineCompiler::BaselineCompiler(
basm_(&masm_),
iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME),
- labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())),
- handler_offsets_(&zone_) {
+ labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) {
MemsetPointer(labels_, nullptr, bytecode_->length());
}
#define __ basm_.
void BaselineCompiler::GenerateCode() {
- HandlerTable table(*bytecode_);
- {
- RuntimeCallTimerScope runtimeTimer(
- stats_, RuntimeCallCounterId::kCompileBaselinePrepareHandlerOffsets);
- for (int i = 0; i < table.NumberOfRangeEntries(); ++i) {
- int handler_offset = table.GetRangeHandler(i);
- handler_offsets_.insert(handler_offset);
- }
- }
-
{
RuntimeCallTimerScope runtimeTimer(
stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
@@ -267,13 +260,15 @@ void BaselineCompiler::GenerateCode() {
RuntimeCallTimerScope runtimeTimer(
stats_, RuntimeCallCounterId::kCompileBaselineVisit);
Prologue();
+ AddPosition();
for (; !iterator_.done(); iterator_.Advance()) {
VisitSingleBytecode();
+ AddPosition();
}
}
}
-Handle<Code> BaselineCompiler::Build(Isolate* isolate) {
+MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
CodeDesc desc;
__ GetCode(isolate, &desc);
// Allocate the bytecode offset table.
@@ -281,11 +276,11 @@ Handle<Code> BaselineCompiler::Build(Isolate* isolate) {
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
- .Build();
+ .TryBuild();
}
interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) {
- return accessor().GetRegisterOperand(operand_index);
+ return iterator().GetRegisterOperand(operand_index);
}
void BaselineCompiler::LoadRegister(Register output, int operand_index) {
@@ -299,36 +294,36 @@ void BaselineCompiler::StoreRegister(int operand_index, Register value) {
void BaselineCompiler::StoreRegisterPair(int operand_index, Register val0,
Register val1) {
interpreter::Register reg0, reg1;
- std::tie(reg0, reg1) = accessor().GetRegisterPairOperand(operand_index);
+ std::tie(reg0, reg1) = iterator().GetRegisterPairOperand(operand_index);
__ StoreRegister(reg0, val0);
__ StoreRegister(reg1, val1);
}
template <typename Type>
Handle<Type> BaselineCompiler::Constant(int operand_index) {
return Handle<Type>::cast(
- accessor().GetConstantForIndexOperand(operand_index, isolate_));
+ iterator().GetConstantForIndexOperand(operand_index, isolate_));
}
Smi BaselineCompiler::ConstantSmi(int operand_index) {
- return accessor().GetConstantAtIndexAsSmi(operand_index);
+ return iterator().GetConstantAtIndexAsSmi(operand_index);
}
template <typename Type>
void BaselineCompiler::LoadConstant(Register output, int operand_index) {
__ Move(output, Constant<Type>(operand_index));
}
uint32_t BaselineCompiler::Uint(int operand_index) {
- return accessor().GetUnsignedImmediateOperand(operand_index);
+ return iterator().GetUnsignedImmediateOperand(operand_index);
}
int32_t BaselineCompiler::Int(int operand_index) {
- return accessor().GetImmediateOperand(operand_index);
+ return iterator().GetImmediateOperand(operand_index);
}
uint32_t BaselineCompiler::Index(int operand_index) {
- return accessor().GetIndexOperand(operand_index);
+ return iterator().GetIndexOperand(operand_index);
}
uint32_t BaselineCompiler::Flag(int operand_index) {
- return accessor().GetFlagOperand(operand_index);
+ return iterator().GetFlagOperand(operand_index);
}
uint32_t BaselineCompiler::RegisterCount(int operand_index) {
- return accessor().GetRegisterCountOperand(operand_index);
+ return iterator().GetRegisterCountOperand(operand_index);
}
TaggedIndex BaselineCompiler::IndexAsTagged(int operand_index) {
return TaggedIndex::FromIntptr(Index(operand_index));
@@ -374,41 +369,65 @@ void BaselineCompiler::SelectBooleanConstant(
}
void BaselineCompiler::AddPosition() {
- bytecode_offset_table_builder_.AddPosition(__ pc_offset(),
- accessor().current_offset());
+ bytecode_offset_table_builder_.AddPosition(__ pc_offset());
}
void BaselineCompiler::PreVisitSingleBytecode() {
- if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) {
- EnsureLabels(accessor().GetJumpTargetOffset());
+ switch (iterator().current_bytecode()) {
+ case interpreter::Bytecode::kJumpLoop:
+ EnsureLabels(iterator().GetJumpTargetOffset());
+ break;
+
+ // TODO(leszeks): Update the max_call_args as part of the main bytecode
+ // visit loop, by patching the value passed to the prologue.
+ case interpreter::Bytecode::kCallProperty:
+ case interpreter::Bytecode::kCallAnyReceiver:
+ case interpreter::Bytecode::kCallWithSpread:
+ case interpreter::Bytecode::kCallNoFeedback:
+ case interpreter::Bytecode::kConstruct:
+ case interpreter::Bytecode::kConstructWithSpread:
+ return UpdateMaxCallArgs(
+ iterator().GetRegisterListOperand(1).register_count());
+ case interpreter::Bytecode::kCallUndefinedReceiver:
+ return UpdateMaxCallArgs(
+ iterator().GetRegisterListOperand(1).register_count() + 1);
+ case interpreter::Bytecode::kCallProperty0:
+ case interpreter::Bytecode::kCallUndefinedReceiver0:
+ return UpdateMaxCallArgs(1);
+ case interpreter::Bytecode::kCallProperty1:
+ case interpreter::Bytecode::kCallUndefinedReceiver1:
+ return UpdateMaxCallArgs(2);
+ case interpreter::Bytecode::kCallProperty2:
+ case interpreter::Bytecode::kCallUndefinedReceiver2:
+ return UpdateMaxCallArgs(3);
+
+ default:
+ break;
}
}
void BaselineCompiler::VisitSingleBytecode() {
- int offset = accessor().current_offset();
+ int offset = iterator().current_offset();
if (labels_[offset]) {
// Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables).
for (auto&& label : labels_[offset]->linked) {
- __ Bind(&label->label);
+ __ BindWithoutJumpTarget(&label->label);
}
#ifdef DEBUG
labels_[offset]->linked.Clear();
#endif
- __ Bind(&labels_[offset]->unlinked);
+ __ BindWithoutJumpTarget(&labels_[offset]->unlinked);
}
- // Record positions of exception handlers.
- if (handler_offsets_.find(accessor().current_offset()) !=
- handler_offsets_.end()) {
- AddPosition();
- __ ExceptionHandler();
- }
+ // Mark position as valid jump target. This is required for the deoptimizer
+ // and exception handling, when CFI is enabled.
+ __ JumpTarget();
if (FLAG_code_comments) {
std::ostringstream str;
str << "[ ";
- accessor().PrintTo(str);
+ iterator().PrintTo(str);
__ RecordComment(str.str().c_str());
}
@@ -418,7 +437,7 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
- switch (accessor().current_bytecode()) {
+ switch (iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
@@ -469,7 +488,7 @@ void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
SaveAccumulatorScope accumulator_scope(&basm_);
CallRuntime(function_id, bytecode_,
Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
- accessor().current_offset()),
+ iterator().current_offset()),
kInterpreterAccumulatorRegister);
__ RecordComment("]");
}
@@ -486,22 +505,25 @@ INTRINSICS_LIST(DECLARE_VISITOR)
void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
int weight, Label* label, Label* skip_interrupt_label) {
- __ RecordComment("[ Update Interrupt Budget");
- __ AddToInterruptBudget(weight);
-
- if (weight < 0) {
- // Use compare flags set by AddToInterruptBudget
- __ JumpIf(Condition::kGreaterThanEqual, skip_interrupt_label);
- SaveAccumulatorScope accumulator_scope(&basm_);
- CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode,
- __ FunctionOperand());
+ if (weight != 0) {
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ if (weight < 0) {
+ // Use compare flags set by AddToInterruptBudget
+ __ JumpIf(Condition::kGreaterThanEqual, skip_interrupt_label);
+ SaveAccumulatorScope accumulator_scope(&basm_);
+ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode,
+ __ FunctionOperand());
+ }
}
if (label) __ Jump(label);
- __ RecordComment("]");
+ if (weight != 0) __ RecordComment("]");
}
void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() {
- int weight = accessor().GetRelativeJumpTargetOffset();
+ int weight = iterator().GetRelativeJumpTargetOffset() -
+ iterator().current_bytecode_size_without_prefix();
UpdateInterruptBudgetAndJumpToLabel(weight, BuildForwardJumpLabel(), nullptr);
}
@@ -524,7 +546,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
}
Label* BaselineCompiler::BuildForwardJumpLabel() {
- int target_offset = accessor().GetJumpTargetOffset();
+ int target_offset = iterator().GetJumpTargetOffset();
ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>();
EnsureLabels(target_offset)->linked.Add(threaded_label);
return &threaded_label->label;
@@ -540,7 +562,6 @@ void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) {
__ LoadContext(descriptor.ContextRegister());
}
__ CallBuiltin(builtin);
- AddPosition();
__ RecordComment("]");
}
@@ -560,7 +581,6 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) {
__ LoadContext(kContextRegister);
int nargs = __ Push(args...);
__ CallRuntime(function, nargs);
- AddPosition();
}
// Returns into kInterpreterAccumulatorRegister
@@ -592,7 +612,7 @@ void BaselineCompiler::VisitLdaZero() {
}
void BaselineCompiler::VisitLdaSmi() {
- Smi constant = Smi::FromInt(accessor().GetImmediateOperand(0));
+ Smi constant = Smi::FromInt(iterator().GetImmediateOperand(0));
__ Move(kInterpreterAccumulatorRegister, constant);
}
@@ -691,7 +711,7 @@ void BaselineCompiler::VisitStaContextSlot() {
Register value = scratch_scope.AcquireScratch();
__ Move(value, kInterpreterAccumulatorRegister);
__ StoreTaggedFieldWithWriteBarrier(
- context, Context::OffsetOfElementAt(accessor().GetIndexOperand(1)),
+ context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)),
value);
}
@@ -906,6 +926,7 @@ void BaselineCompiler::VisitStaDataPropertyInLiteral() {
}
void BaselineCompiler::VisitCollectTypeProfile() {
+ SaveAccumulatorScope accumulator_scope(&basm_);
CallRuntime(Runtime::kCollectTypeProfile,
IntAsSmi(0), // position
kInterpreterAccumulatorRegister, // value
@@ -1110,13 +1131,13 @@ void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot,
}
void BaselineCompiler::VisitCallAnyReceiver() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count,
args);
@@ -1138,7 +1159,7 @@ void BaselineCompiler::VisitCallProperty2() {
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count,
RootIndex::kUndefinedValue, args);
@@ -1160,7 +1181,7 @@ void BaselineCompiler::VisitCallUndefinedReceiver2() {
}
void BaselineCompiler::VisitCallNoFeedback() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
CallBuiltin(Builtins::kCall_ReceiverIsAny,
RegisterOperand(0), // kFunction
@@ -1169,7 +1190,7 @@ void BaselineCompiler::VisitCallNoFeedback() {
}
void BaselineCompiler::VisitCallWithSpread() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
// Do not push the spread argument
interpreter::Register spread_register = args.last_register();
@@ -1186,24 +1207,25 @@ void BaselineCompiler::VisitCallWithSpread() {
}
void BaselineCompiler::VisitCallRuntime() {
- CallRuntime(accessor().GetRuntimeIdOperand(0),
- accessor().GetRegisterListOperand(1));
+ CallRuntime(iterator().GetRuntimeIdOperand(0),
+ iterator().GetRegisterListOperand(1));
}
void BaselineCompiler::VisitCallRuntimeForPair() {
- CallRuntime(accessor().GetRuntimeIdOperand(0),
- accessor().GetRegisterListOperand(1));
+ SaveAccumulatorScope accumulator_scope(&basm_);
+ CallRuntime(iterator().GetRuntimeIdOperand(0),
+ iterator().GetRegisterListOperand(1));
StoreRegisterPair(3, kReturnRegister0, kReturnRegister1);
}
void BaselineCompiler::VisitCallJSRuntime() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
// Load context for LoadNativeContextSlot.
__ LoadContext(kContextRegister);
__ LoadNativeContextSlot(kJavaScriptCallTargetRegister,
- accessor().GetNativeContextIndexOperand(0));
+ iterator().GetNativeContextIndexOperand(0));
CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined,
kJavaScriptCallTargetRegister, // kFunction
arg_count, // kActualArgumentsCount
@@ -1212,8 +1234,8 @@ void BaselineCompiler::VisitCallJSRuntime() {
}
void BaselineCompiler::VisitInvokeIntrinsic() {
- Runtime::FunctionId intrinsic_id = accessor().GetIntrinsicIdOperand(0);
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ Runtime::FunctionId intrinsic_id = iterator().GetIntrinsicIdOperand(0);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
switch (intrinsic_id) {
#define CASE(Name, ...) \
case Runtime::kInline##Name: \
@@ -1402,7 +1424,7 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
}
void BaselineCompiler::VisitConstruct() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
CallBuiltin(Builtins::kConstruct_Baseline,
RegisterOperand(0), // kFunction
@@ -1414,7 +1436,7 @@ void BaselineCompiler::VisitConstruct() {
}
void BaselineCompiler::VisitConstructWithSpread() {
- interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
// Do not push the spread argument
interpreter::Register spread_register = args.last_register();
@@ -1494,23 +1516,24 @@ void BaselineCompiler::VisitTestIn() {
CallBuiltin(Builtins::kKeyedHasICBaseline,
kInterpreterAccumulatorRegister, // object
RegisterOperand(0), // name
- IndexAsSmi(1)); // slot
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitTestUndetectable() {
- Label done, set_false;
- __ JumpIfSmi(kInterpreterAccumulatorRegister, &set_false, Label::kNear);
+ Label done, is_smi, not_undetectable;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask);
- __ JumpIf(Condition::kZero, &set_false, Label::kNear);
+ __ JumpIf(Condition::kZero, &not_undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
- __ Bind(&set_false);
+ __ Bind(&is_smi);
+ __ Bind(&not_undetectable);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
__ Bind(&done);
}
@@ -1534,36 +1557,172 @@ void BaselineCompiler::VisitTestUndefined() {
}
void BaselineCompiler::VisitTestTypeOf() {
- uint32_t literal_flag = Flag(0);
- CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister);
-
-#define TYPEOF_FLAG_VALUE(type_name) \
- static_cast< \
- std::underlying_type<interpreter::TestTypeOfFlags::LiteralFlag>::type>( \
- interpreter::TestTypeOfFlags::LiteralFlag::k##type_name)
-#define TYPEOF_COMPARE(type_name) \
- SelectBooleanConstant(kInterpreterAccumulatorRegister, \
- [&](Label* is_true, Label::Distance distance) { \
- __ JumpIfRoot(kInterpreterAccumulatorRegister, \
- RootIndex::k##type_name##_string, \
- is_true, distance); \
- });
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
-#define TYPEOF_CASE(type_upper, type_lower) \
- case TYPEOF_FLAG_VALUE(type_upper): \
- TYPEOF_COMPARE(type_lower); \
- break;
+ auto literal_flag =
+ static_cast<interpreter::TestTypeOfFlags::LiteralFlag>(Flag(0));
+ Label done;
switch (literal_flag) {
- default:
- __ Trap();
+ case interpreter::TestTypeOfFlags::LiteralFlag::kNumber: {
+ Label is_smi, is_heap_number;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+ __ CmpObjectType(kInterpreterAccumulatorRegister, HEAP_NUMBER_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kEqual, &is_heap_number, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&is_heap_number);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
break;
- TYPEOF_LITERAL_LIST(TYPEOF_CASE)
- }
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kString: {
+ Label is_smi, bad_instance_type;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+ STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
+ __ CmpObjectType(kInterpreterAccumulatorRegister, FIRST_NONSTRING_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kGreaterThanEqual, &bad_instance_type, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&bad_instance_type);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol: {
+ Label is_smi, bad_instance_type;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+ __ CmpObjectType(kInterpreterAccumulatorRegister, SYMBOL_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kNotEqual, &bad_instance_type, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&bad_instance_type);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean: {
+ Label is_true, is_false;
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue,
+ &is_true, Label::kNear);
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue,
+ &is_false, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_true);
+ __ Bind(&is_false);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt: {
+ Label is_smi, bad_instance_type;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+ __ CmpObjectType(kInterpreterAccumulatorRegister, BIGINT_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kNotEqual, &bad_instance_type, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&bad_instance_type);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined: {
+ Label is_smi, is_null, not_undetectable;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
-#undef TYPEOF_COMPARE
-#undef TYPEOF_FLAG_VALUE
-#undef TYPEOF_CASE
+ // null is undetectable, so test it explicitly, and return false.
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
+ &is_null, Label::kNear);
+
+ // All other undetectable maps are typeof undefined.
+ Register map_bit_field = kInterpreterAccumulatorRegister;
+ __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
+ __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask);
+ __ JumpIf(Condition::kZero, &not_undetectable, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&is_null);
+ __ Bind(&not_undetectable);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kFunction: {
+ Label is_smi, not_callable, undetectable;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+
+ // Check if the map is callable but not undetectable.
+ Register map_bit_field = kInterpreterAccumulatorRegister;
+ __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
+ __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ Test(map_bit_field, Map::Bits1::IsCallableBit::kMask);
+ __ JumpIf(Condition::kZero, &not_callable, Label::kNear);
+ __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask);
+ __ JumpIf(Condition::kNotZero, &undetectable, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&not_callable);
+ __ Bind(&undetectable);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kObject: {
+ Label is_smi, is_null, bad_instance_type, undetectable_or_callable;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+
+ // If the object is null, return true.
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
+ &is_null, Label::kNear);
+
+ // If the object's instance type isn't within the range, return false.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Register map = scratch_scope.AcquireScratch();
+ __ CmpObjectType(kInterpreterAccumulatorRegister, FIRST_JS_RECEIVER_TYPE,
+ map);
+ __ JumpIf(Condition::kLessThan, &bad_instance_type, Label::kNear);
+
+ // If the map is undetectable or callable, return false.
+ Register map_bit_field = kInterpreterAccumulatorRegister;
+ __ LoadByteField(map_bit_field, map, Map::kBitFieldOffset);
+ __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask |
+ Map::Bits1::IsCallableBit::kMask);
+ __ JumpIf(Condition::kNotZero, &undetectable_or_callable, Label::kNear);
+
+ __ Bind(&is_null);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&is_smi);
+ __ Bind(&bad_instance_type);
+ __ Bind(&undetectable_or_callable);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ break;
+ }
+ case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
+ default:
+ UNREACHABLE();
+ }
+ __ Bind(&done);
}
void BaselineCompiler::VisitToName() {
@@ -1602,15 +1761,16 @@ void BaselineCompiler::VisitCreateRegExpLiteral() {
void BaselineCompiler::VisitCreateArrayLiteral() {
uint32_t flags = Flag(2);
+ int32_t flags_raw = static_cast<int32_t>(
+ interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags));
if (flags &
interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) {
CallBuiltin(Builtins::kCreateShallowArrayLiteral,
FeedbackVector(), // feedback vector
IndexAsTagged(1), // slot
- Constant<HeapObject>(0)); // constant elements
+ Constant<HeapObject>(0), // constant elements
+ Smi::FromInt(flags_raw)); // flags
} else {
- int32_t flags_raw = static_cast<int32_t>(
- interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags));
CallRuntime(Runtime::kCreateArrayLiteral,
FeedbackVector(), // feedback vector
IndexAsTagged(1), // slot
@@ -1756,15 +1916,16 @@ void BaselineCompiler::VisitJumpLoop() {
Register osr_level = scratch;
__ LoadRegister(osr_level, interpreter::Register::bytecode_array());
__ LoadByteField(osr_level, osr_level, BytecodeArray::kOsrNestingLevelOffset);
- int loop_depth = accessor().GetImmediateOperand(1);
+ int loop_depth = iterator().GetImmediateOperand(1);
__ CompareByte(osr_level, loop_depth);
__ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed);
CallBuiltin(Builtins::kBaselineOnStackReplacement);
__ RecordComment("]");
__ Bind(&osr_not_armed);
- Label* label = &labels_[accessor().GetJumpTargetOffset()]->unlinked;
- int weight = accessor().GetRelativeJumpTargetOffset();
+ Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
+ int weight = iterator().GetRelativeJumpTargetOffset() -
+ iterator().current_bytecode_size_without_prefix();
// We can pass in the same label twice since it's a back edge and thus already
// bound.
DCHECK(label->is_bound());
@@ -1879,7 +2040,7 @@ void BaselineCompiler::VisitJumpIfJSReceiver() {
void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
interpreter::JumpTableTargetOffsets offsets =
- accessor().GetJumpTableTargetOffsets();
+ iterator().GetJumpTableTargetOffsets();
if (offsets.size() == 0) return;
@@ -1903,7 +2064,7 @@ void BaselineCompiler::VisitForInPrepare() {
StoreRegister(0, kInterpreterAccumulatorRegister);
CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister,
IndexAsTagged(1), FeedbackVector());
- interpreter::Register first = accessor().GetRegisterOperand(0);
+ interpreter::Register first = iterator().GetRegisterOperand(0);
interpreter::Register second(first.index() + 1);
interpreter::Register third(first.index() + 2);
__ StoreRegister(second, kReturnRegister0);
@@ -1923,7 +2084,7 @@ void BaselineCompiler::VisitForInContinue() {
void BaselineCompiler::VisitForInNext() {
interpreter::Register cache_type, cache_array;
- std::tie(cache_type, cache_array) = accessor().GetRegisterPairOperand(2);
+ std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2);
CallBuiltin(Builtins::kForInNext,
Index(3), // vector slot
RegisterOperand(0), // object
@@ -1961,7 +2122,8 @@ void BaselineCompiler::VisitReThrow() {
void BaselineCompiler::VisitReturn() {
__ RecordComment("[ Return");
- int profiling_weight = accessor().current_offset();
+ int profiling_weight = iterator().current_offset() +
+ iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
// We must pop all arguments from the stack (including the receiver). This
@@ -2043,7 +2205,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
__ StoreContext(context);
interpreter::JumpTableTargetOffsets offsets =
- accessor().GetJumpTableTargetOffsets();
+ iterator().GetJumpTableTargetOffsets();
if (0 < offsets.size()) {
DCHECK_EQ(0, (*offsets.begin()).case_value);
@@ -2064,73 +2226,30 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
}
void BaselineCompiler::VisitSuspendGenerator() {
- DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0));
- int register_count = RegisterCount(2);
- uint32_t suspend_id = Uint(3);
-
+ DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0));
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register generator_object = scratch_scope.AcquireScratch();
- Register parameters_and_registers_array = scratch_scope.AcquireScratch();
- Register value = scratch_scope.AcquireScratch();
-
LoadRegister(generator_object, 0);
- __ LoadTaggedPointerField(parameters_and_registers_array, generator_object,
- JSGeneratorObject::kParametersAndRegistersOffset);
-
- int formal_parameter_count =
- shared_function_info_->internal_formal_parameter_count();
- for (int i = 0; i < formal_parameter_count; ++i) {
- __ LoadRegister(value, interpreter::Register::FromParameterIndex(
- i + 1, bytecode_->parameter_count()));
- __ StoreTaggedFieldWithWriteBarrier(parameters_and_registers_array,
- FixedArray::OffsetOfElementAt(i),
- value);
- }
- for (int i = 0; i < register_count; ++i) {
- __ LoadRegister(value, interpreter::Register(i));
- __ StoreTaggedFieldWithWriteBarrier(
- parameters_and_registers_array,
- FixedArray::OffsetOfElementAt(formal_parameter_count + i), value);
- }
-
- __ LoadContext(value);
- __ StoreTaggedFieldWithWriteBarrier(generator_object,
- JSGeneratorObject::kContextOffset, value);
-
- __ StoreTaggedSignedField(generator_object,
- JSGeneratorObject::kContinuationOffset,
- Smi::FromInt(suspend_id));
+ {
+ SaveAccumulatorScope accumulator_scope(&basm_);
- __ StoreTaggedSignedField(
- generator_object, JSGeneratorObject::kInputOrDebugPosOffset,
- Smi::FromInt(BytecodeArray::kHeaderSize + accessor().current_offset()));
+ int bytecode_offset =
+ BytecodeArray::kHeaderSize + iterator().current_offset();
+ CallBuiltin(Builtins::kSuspendGeneratorBaseline, generator_object,
+ static_cast<int>(Uint(3)), // suspend_id
+ bytecode_offset,
+ static_cast<int>(RegisterCount(2))); // register_count
+ }
VisitReturn();
}
void BaselineCompiler::VisitResumeGenerator() {
- DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0));
- int register_count = RegisterCount(2);
-
+ DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0));
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register generator_object = scratch_scope.AcquireScratch();
- Register parameters_and_registers_array = scratch_scope.AcquireScratch();
- Register value = scratch_scope.AcquireScratch();
-
LoadRegister(generator_object, 0);
- __ LoadTaggedPointerField(parameters_and_registers_array, generator_object,
- JSGeneratorObject::kParametersAndRegistersOffset);
-
- int formal_parameter_count =
- shared_function_info_->internal_formal_parameter_count();
- for (int i = 0; i < register_count; ++i) {
- __ LoadTaggedAnyField(
- value, parameters_and_registers_array,
- FixedArray::OffsetOfElementAt(formal_parameter_count + i));
- __ StoreRegister(interpreter::Register(i), value);
- }
-
- __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, generator_object,
- JSGeneratorObject::kInputOrDebugPosOffset);
+ CallBuiltin(Builtins::kResumeGeneratorBaseline, generator_object,
+ static_cast<int>(RegisterCount(2))); // register_count
}
void BaselineCompiler::VisitGetIterator() {
@@ -2141,10 +2260,12 @@ void BaselineCompiler::VisitGetIterator() {
}
void BaselineCompiler::VisitDebugger() {
+ SaveAccumulatorScope accumulator_scope(&basm_);
CallBuiltin(Builtins::kHandleDebuggerStatement);
}
void BaselineCompiler::VisitIncBlockCounter() {
+ SaveAccumulatorScope accumulator_scope(&basm_);
CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(),
IndexAsSmi(0)); // coverage array slot
}
diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h
index 2ddd8fdb16c..dbb2f64f6c5 100644
--- a/deps/v8/src/baseline/baseline-compiler.h
+++ b/deps/v8/src/baseline/baseline-compiler.h
@@ -7,12 +7,12 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-
-#include <unordered_map>
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
+#include "src/base/vlq.h"
#include "src/baseline/baseline-assembler.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -21,7 +21,6 @@
#include "src/logging/counters.h"
#include "src/objects/map.h"
#include "src/objects/tagged-index.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -32,30 +31,19 @@ namespace baseline {
class BytecodeOffsetTableBuilder {
public:
- void AddPosition(size_t pc_offset, size_t bytecode_offset) {
- WriteUint(pc_offset - previous_pc_);
- WriteUint(bytecode_offset - previous_bytecode_);
+ void AddPosition(size_t pc_offset) {
+ size_t pc_diff = pc_offset - previous_pc_;
+ DCHECK_GE(pc_diff, 0);
+ DCHECK_LE(pc_diff, std::numeric_limits<uint32_t>::max());
+ base::VLQEncodeUnsigned(&bytes_, static_cast<uint32_t>(pc_diff));
previous_pc_ = pc_offset;
- previous_bytecode_ = bytecode_offset;
}
template <typename LocalIsolate>
Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate);
private:
- void WriteUint(size_t value) {
- bool has_next;
- do {
- uint8_t byte = value & ((1 << 7) - 1);
- value >>= 7;
- has_next = value != 0;
- byte |= (has_next << 7);
- bytes_.push_back(byte);
- } while (has_next);
- }
-
size_t previous_pc_ = 0;
- size_t previous_bytecode_ = 0;
std::vector<byte> bytes_;
};
@@ -66,7 +54,7 @@ class BaselineCompiler {
Handle<BytecodeArray> bytecode);
void GenerateCode();
- Handle<Code> Build(Isolate* isolate);
+ MaybeHandle<Code> Build(Isolate* isolate);
private:
void Prologue();
@@ -123,6 +111,10 @@ class BaselineCompiler {
// Misc. helpers.
+ void UpdateMaxCallArgs(int max_call_args) {
+ max_call_args_ = std::max(max_call_args_, max_call_args);
+ }
+
// Select the root boolean constant based on the jump in the given
// `jump_func` -- the function should jump to the given label if we want to
// select "true", otherwise it should fall through.
@@ -170,7 +162,7 @@ class BaselineCompiler {
INTRINSICS_LIST(DECLARE_VISITOR)
#undef DECLARE_VISITOR
- const interpreter::BytecodeArrayAccessor& accessor() { return iterator_; }
+ const interpreter::BytecodeArrayIterator& iterator() { return iterator_; }
Isolate* isolate_;
RuntimeCallStats* stats_;
@@ -182,6 +174,8 @@ class BaselineCompiler {
BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
Zone zone_;
+ int max_call_args_ = 0;
+
struct ThreadedLabel {
Label label;
ThreadedLabel* ptr;
@@ -201,7 +195,6 @@ class BaselineCompiler {
}
BaselineLabels** labels_;
- ZoneSet<int> handler_offsets_;
};
} // namespace baseline
diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc
index 3229c134f43..b5355660f94 100644
--- a/deps/v8/src/baseline/baseline.cc
+++ b/deps/v8/src/baseline/baseline.cc
@@ -4,9 +4,12 @@
#include "src/baseline/baseline.h"
+#include "src/handles/maybe-handles.h"
+
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
@@ -18,17 +21,17 @@
namespace v8 {
namespace internal {
-Handle<Code> GenerateBaselineCode(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
+MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
RuntimeCallTimerScope runtimeTimer(isolate,
RuntimeCallCounterId::kCompileBaseline);
baseline::BaselineCompiler compiler(
isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate));
compiler.GenerateCode();
- Handle<Code> code = compiler.Build(isolate);
- if (FLAG_print_code) {
- code->Print();
+ MaybeHandle<Code> code = compiler.Build(isolate);
+ if (FLAG_print_code && !code.is_null()) {
+ code.ToHandleChecked()->Print();
}
return code;
}
@@ -45,8 +48,8 @@ void EmitReturnBaseline(MacroAssembler* masm) {
namespace v8 {
namespace internal {
-Handle<Code> GenerateBaselineCode(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
+MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
UNREACHABLE();
}
diff --git a/deps/v8/src/baseline/baseline.h b/deps/v8/src/baseline/baseline.h
index 071c0bdbfb4..2dba2d9674b 100644
--- a/deps/v8/src/baseline/baseline.h
+++ b/deps/v8/src/baseline/baseline.h
@@ -14,8 +14,8 @@ class Code;
class SharedFunctionInfo;
class MacroAssembler;
-Handle<Code> GenerateBaselineCode(Isolate* isolate,
- Handle<SharedFunctionInfo> shared);
+MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
void EmitReturnBaseline(MacroAssembler* masm);
diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.cc b/deps/v8/src/baseline/bytecode-offset-iterator.cc
new file mode 100644
index 00000000000..bbedac8ef30
--- /dev/null
+++ b/deps/v8/src/baseline/bytecode-offset-iterator.cc
@@ -0,0 +1,65 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/baseline/bytecode-offset-iterator.h"
+
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+BytecodeOffsetIterator::BytecodeOffsetIterator(Handle<ByteArray> mapping_table,
+ Handle<BytecodeArray> bytecodes)
+ : mapping_table_(mapping_table),
+ data_start_address_(mapping_table_->GetDataStartAddress()),
+ data_length_(mapping_table_->length()),
+ current_index_(0),
+ bytecode_iterator_(bytecodes),
+ local_heap_(LocalHeap::Current()
+ ? LocalHeap::Current()
+ : Isolate::Current()->main_thread_local_heap()) {
+ local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this);
+ Initialize();
+}
+
+BytecodeOffsetIterator::BytecodeOffsetIterator(ByteArray mapping_table,
+ BytecodeArray bytecodes)
+ : data_start_address_(mapping_table.GetDataStartAddress()),
+ data_length_(mapping_table.length()),
+ current_index_(0),
+ bytecode_handle_storage_(bytecodes),
+ // In the non-handlified version, no GC is allowed. We use a "dummy"
+ // handle to pass the BytecodeArray to the BytecodeArrayIterator, which
+ // is fine since no objects will be moved.
+ bytecode_iterator_(Handle<BytecodeArray>(
+ reinterpret_cast<Address*>(&bytecode_handle_storage_))),
+ local_heap_(nullptr) {
+ no_gc.emplace();
+ Initialize();
+}
+
+BytecodeOffsetIterator::~BytecodeOffsetIterator() {
+ if (local_heap_ != nullptr) {
+ local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
+ }
+}
+
+void BytecodeOffsetIterator::Initialize() {
+ // Initialize values for the prologue.
+ // The first recorded position is at the start of the first bytecode.
+ current_pc_start_offset_ = 0;
+ current_pc_end_offset_ = ReadPosition();
+ current_bytecode_offset_ = kFunctionEntryBytecodeOffset;
+}
+
+void BytecodeOffsetIterator::UpdatePointers() {
+ DisallowGarbageCollection no_gc;
+ DCHECK(!mapping_table_.is_null());
+ data_start_address_ = mapping_table_->GetDataStartAddress();
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.h b/deps/v8/src/baseline/bytecode-offset-iterator.h
new file mode 100644
index 00000000000..6e78fba0614
--- /dev/null
+++ b/deps/v8/src/baseline/bytecode-offset-iterator.h
@@ -0,0 +1,98 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
+#define V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
+
+#include "src/base/vlq.h"
+#include "src/common/globals.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects/code.h"
+#include "src/objects/fixed-array.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace baseline {
+
+class V8_EXPORT_PRIVATE BytecodeOffsetIterator {
+ public:
+ explicit BytecodeOffsetIterator(Handle<ByteArray> mapping_table,
+ Handle<BytecodeArray> bytecodes);
+ // Non-handlified version for use when no GC can happen.
+ explicit BytecodeOffsetIterator(ByteArray mapping_table,
+ BytecodeArray bytecodes);
+ ~BytecodeOffsetIterator();
+
+ inline void Advance() {
+ DCHECK(!done());
+ current_pc_start_offset_ = current_pc_end_offset_;
+ current_pc_end_offset_ += ReadPosition();
+ current_bytecode_offset_ = bytecode_iterator_.current_offset();
+ bytecode_iterator_.Advance();
+ }
+
+ inline void AdvanceToBytecodeOffset(int bytecode_offset) {
+ while (current_bytecode_offset() < bytecode_offset) {
+ Advance();
+ }
+ DCHECK_EQ(bytecode_offset, current_bytecode_offset());
+ }
+
+ inline void AdvanceToPCOffset(Address pc_offset) {
+ while (current_pc_end_offset() < pc_offset) {
+ Advance();
+ }
+ DCHECK_GT(pc_offset, current_pc_start_offset());
+ DCHECK_LE(pc_offset, current_pc_end_offset());
+ }
+
+ // For this iterator, done() means that it is not safe to Advance().
+ // Values are cached, so reads are always allowed.
+ inline bool done() const { return current_index_ >= data_length_; }
+
+ inline Address current_pc_start_offset() const {
+ return current_pc_start_offset_;
+ }
+
+ inline Address current_pc_end_offset() const {
+ return current_pc_end_offset_;
+ }
+
+ inline int current_bytecode_offset() const {
+ return current_bytecode_offset_;
+ }
+
+ static void UpdatePointersCallback(void* iterator) {
+ reinterpret_cast<BytecodeOffsetIterator*>(iterator)->UpdatePointers();
+ }
+
+ void UpdatePointers();
+
+ private:
+ void Initialize();
+ inline int ReadPosition() {
+ return base::VLQDecodeUnsigned(data_start_address_, &current_index_);
+ }
+
+ Handle<ByteArray> mapping_table_;
+ byte* data_start_address_;
+ int data_length_;
+ int current_index_;
+ Address current_pc_start_offset_;
+ Address current_pc_end_offset_;
+ int current_bytecode_offset_;
+ BytecodeArray bytecode_handle_storage_;
+ interpreter::BytecodeArrayIterator bytecode_iterator_;
+ LocalHeap* local_heap_;
+ base::Optional<DisallowGarbageCollection> no_gc;
+};
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
new file mode 100644
index 00000000000..2cd34aef710
--- /dev/null
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -0,0 +1,445 @@
+// Use of this source code is governed by a BSD-style license that can be
+// Copyright 2021 the V8 project authors. All rights reserved.
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
+#define V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/ia32/register-ia32.h"
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+namespace detail {
+
+static constexpr Register kScratchRegisters[] = {ecx, edx, esi, edi};
+static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
+
+} // namespace detail
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ registers_used_(prev_scope_ == nullptr ? 0
+ : prev_scope_->registers_used_) {
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() {
+ DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
+ return detail::kScratchRegisters[registers_used_++];
+ }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ int registers_used_;
+};
+
+// TODO(v8:11461): Unify condition names in the MacroAssembler.
+enum class Condition : uint32_t {
+ kEqual = equal,
+ kNotEqual = not_equal,
+
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+
+ kUnsignedLessThan = below,
+ kUnsignedGreaterThan = above,
+ kUnsignedLessThanEqual = below_equal,
+ kUnsignedGreaterThanEqual = above_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = zero,
+ kNotZero = not_zero,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#define __ masm_->
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.is_reg(target);
+}
+#endif
+
+} // namespace detail
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(ebp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP on ia32.
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ jmp(target, distance);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target,
+ Label::Distance distance) {
+ __ j(AsMasmCondition(cc), target, distance);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance distance) {
+ __ JumpIfRoot(value, index, target, distance);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance distance) {
+ __ JumpIfNotRoot(value, index, target, distance);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance distance) {
+ __ JumpIfSmi(value, target, distance);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance distance) {
+ __ JumpIfNotSmi(value, target, distance);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ jmp(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ if ((mask & 0xff) == mask) {
+ __ test_b(value, Immediate(mask));
+ } else {
+ __ test(value, Immediate(mask));
+ }
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, instance_type, map);
+}
+void BaselineAssembler::CmpInstanceType(Register map,
+ InstanceType instance_type) {
+ if (emit_debug_code()) {
+ __ movd(xmm0, eax);
+ __ AssertNotSmi(map);
+ __ CmpObjectType(map, MAP_TYPE, eax);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ __ movd(eax, xmm0);
+ }
+ __ CmpInstanceType(map, instance_type);
+}
+void BaselineAssembler::Cmp(Register value, Smi smi) {
+ if (smi.value() == 0) {
+ __ test(value, value);
+ } else {
+ __ cmp(value, Immediate(smi));
+ }
+}
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ __ cmp(value, operand);
+}
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ cmp(lhs, rhs);
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ __ cmp(value, operand);
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ __ cmp(operand, value);
+}
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ cmpb(value, Immediate(byte));
+}
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ return __ mov(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ Move(output, Immediate(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ mov(output, source);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ Move(output, Immediate(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ Move(output, value);
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ Move(output, Immediate(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ mov(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ mov(output, source);
+}
+
+namespace detail {
+inline void PushSingle(MacroAssembler* masm, RootIndex source) {
+ masm->PushRoot(source);
+}
+inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
+inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
+ masm->Push(Immediate(value.ptr()));
+}
+inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
+inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
+ masm->Push(object);
+}
+inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
+ masm->Push(Immediate(immediate));
+}
+inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
+ masm->Push(operand);
+}
+inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
+ return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
+}
+
+template <typename Arg>
+struct PushHelper {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ PushSingle(basm->masm(), arg);
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+
+template <>
+struct PushHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushSingle(basm->masm(), list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushSingle(basm->masm(), list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* masm) { return 0; }
+ static int PushReverse(BaselineAssembler* masm) { return 0; }
+};
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
+ int nargs = PushHelper<Arg>::Push(masm, arg);
+ return nargs + PushAllHelper<Args...>::Push(masm, args...);
+ }
+ static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
+ return nargs + PushHelper<Arg>::PushReverse(masm, arg);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ ITERATE_PACK(__ Pop(registers));
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ mov(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ mov(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ mov(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ mov_b(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ __ mov(FieldOperand(target, offset), Immediate(value));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+
+ Register value) {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(this);
+ Register scratch = scratch_scope.AcquireScratch();
+ DCHECK(!AreAliased(scratch, target, value));
+ __ mov(FieldOperand(target, offset), value);
+ __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ DCHECK(!AreAliased(target, value));
+ __ mov(FieldOperand(target, offset), value);
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+ __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
+ Immediate(weight));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ DCHECK(!AreAliased(feedback_cell, weight));
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+ __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
+ weight);
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (rhs.value() == 0) return;
+ __ add(lhs, Immediate(rhs));
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ ScratchRegisterScope scope(this);
+ Register table = scope.AcquireScratch();
+ DCHECK(!AreAliased(reg, table));
+ Label fallthrough, jump_table;
+ if (case_value_base > 0) {
+ __ sub(reg, Immediate(case_value_base));
+ }
+ __ cmp(reg, Immediate(num_labels));
+ __ j(above_equal, &fallthrough);
+ __ lea(table, MemOperand(&jump_table));
+ __ jmp(Operand(table, reg, times_system_pointer_size, 0));
+ // Emit the jump table inline, under the assumption that it's not too big.
+ __ Align(kSystemPointerSize);
+ __ bind(&jump_table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ dd(labels[i]);
+ }
+ __ bind(&fallthrough);
+}
+
+#undef __
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by AddToInterruptBudget
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope scope(&basm);
+ Register scratch = scope.AcquireScratch();
+ DCHECK(!AreAliased(weight, params_size, scratch));
+
+ Register actual_params_size = scratch;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ masm()->mov(actual_params_size,
+ MemOperand(ebp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->cmp(params_size, actual_params_size);
+ __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
+ __ masm()->mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ Register return_pc = scratch;
+ __ masm()->PopReturnAddressTo(return_pc);
+ __ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size,
+ kSystemPointerSize));
+ __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
diff --git a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
new file mode 100644
index 00000000000..733c05fe185
--- /dev/null
+++ b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
@@ -0,0 +1,93 @@
+// Use of this source code is governed by a BSD-style license that can be
+// Copyright 2021 the V8 project authors. All rights reserved.
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
+#define V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
+
+#include "src/base/macros.h"
+#include "src/baseline/baseline-compiler.h"
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
+ kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ for (int i = 0; i < new_target_index; i++) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ } else {
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ BaselineAssembler::ScratchRegisterScope scope(&basm_);
+ Register scratch = scope.AcquireScratch();
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ __ masm()->dec(scratch);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ masm()->movd(xmm0, eax);
+ __ Move(eax, esp);
+ __ masm()->add(eax,
+ Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->cmp(eax, ebp);
+ __ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer);
+ __ masm()->movd(eax, xmm0);
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index 8fd564442ea..202f83c7615 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
+#include "src/base/macros.h"
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
@@ -17,12 +18,11 @@ namespace detail {
// Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use
// this scope and will conflict.
-static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15};
+static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r15};
static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
} // namespace detail
-// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
@@ -46,7 +46,7 @@ class BaselineAssembler::ScratchRegisterScope {
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint8_t {
+enum class Condition : uint32_t {
kEqual = equal,
kNotEqual = not_equal,
@@ -92,6 +92,11 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
}
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP on x64.
+}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance);
@@ -118,15 +123,25 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
- __ RecordCommentForOffHeapTrampoline(builtin);
- __ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ CallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+ }
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
- __ RecordCommentForOffHeapTrampoline(builtin);
- __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative jump.
+ __ TailCallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+ }
}
void BaselineAssembler::Test(Register value, int mask) {
diff --git a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
index e4f123e8e05..73b43770e56 100644
--- a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
@@ -16,12 +16,11 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
- __ Move(kInterpreterBytecodeArrayRegister, bytecode_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- kInterpreterBytecodeArrayRegister,
- kJavaScriptCallNewTargetRegister);
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/deps/v8/src/bigint/DEPS b/deps/v8/src/bigint/DEPS
new file mode 100644
index 00000000000..3700fc2b3bf
--- /dev/null
+++ b/deps/v8/src/bigint/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ # Don't depend on the rest of V8.
+ "-include",
+ "-src",
+ "+src/bigint",
+]
+
+specific_include_rules = {
+ # The public interface should not depend on internals.
+ "bigint.h": [
+ "-src/bigint",
+ ],
+}
diff --git a/deps/v8/src/bigint/OWNERS b/deps/v8/src/bigint/OWNERS
new file mode 100644
index 00000000000..f8c2fcc47aa
--- /dev/null
+++ b/deps/v8/src/bigint/OWNERS
@@ -0,0 +1,2 @@
+jkummerow@chromium.org
+thibaudm@chromium.org
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
new file mode 100644
index 00000000000..a87622b167a
--- /dev/null
+++ b/deps/v8/src/bigint/bigint.h
@@ -0,0 +1,131 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BIGINT_BIGINT_H_
+#define V8_BIGINT_BIGINT_H_
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <cstring>
+#include <iostream>
+
+namespace v8 {
+namespace bigint {
+
+// To play nice with embedders' macros, we define our own DCHECK here.
+// It's only used in this file, and undef'ed at the end.
+#ifdef DEBUG
+#define BIGINT_H_DCHECK(cond) \
+ if (!(cond)) { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": "; \
+ std::cerr << "Assertion failed: " #cond "\n"; \
+ abort(); \
+ }
+#else
+#define BIGINT_H_DCHECK(cond) (void(0))
+#endif
+
+// The type of a digit: a register-width unsigned integer.
+using digit_t = uintptr_t;
+using signed_digit_t = intptr_t;
+#if UINTPTR_MAX == 0xFFFFFFFF
+// 32-bit platform.
+using twodigit_t = uint64_t;
+#define HAVE_TWODIGIT_T 1
+static constexpr int kLog2DigitBits = 5;
+#elif UINTPTR_MAX == 0xFFFFFFFFFFFFFFFF
+// 64-bit platform.
+static constexpr int kLog2DigitBits = 6;
+#if defined(__SIZEOF_INT128__)
+using twodigit_t = __uint128_t;
+#define HAVE_TWODIGIT_T 1
+#endif // defined(__SIZEOF_INT128__)
+#else
+#error Unsupported platform.
+#endif
+static constexpr int kDigitBits = 1 << kLog2DigitBits;
+static_assert(kDigitBits == 8 * sizeof(digit_t), "inconsistent type sizes");
+
+// Describes an array of digits, also known as a BigInt. Unsigned.
+// Does not own the memory it points at, and only gives read-only access to it.
+// Digits are stored in little-endian order.
+class Digits {
+ public:
+ // This is the constructor intended for public consumption.
+ Digits(digit_t* mem, int len) : digits_(mem), len_(len) {
+ // Require 4-byte alignment (even on 64-bit platforms).
+ // TODO(jkummerow): See if we can tighten BigInt alignment in V8 to
+ // system pointer size, and raise this requirement to that.
+ BIGINT_H_DCHECK((reinterpret_cast<uintptr_t>(mem) & 3) == 0);
+ }
+ // Provides a "slice" view into another Digits object.
+ Digits(Digits src, int offset, int len)
+ : digits_(src.digits_ + offset),
+ len_(std::max(0, std::min(src.len_ - offset, len))) {
+ BIGINT_H_DCHECK(offset >= 0);
+ }
+ // Alternative way to get a "slice" view into another Digits object.
+ Digits operator+(int i) {
+ BIGINT_H_DCHECK(i >= 0 && i <= len_);
+ return Digits(digits_ + i, len_ - i);
+ }
+
+ // Provides access to individual digits.
+ digit_t operator[](int i) {
+ BIGINT_H_DCHECK(i >= 0 && i < len_);
+ return read_4byte_aligned(i);
+ }
+ // Convenience accessor for the most significant digit.
+ digit_t msd() {
+ BIGINT_H_DCHECK(len_ > 0);
+ return read_4byte_aligned(len_ - 1);
+ }
+ // Checks "pointer equality" (does not compare digits contents).
+ bool operator==(const Digits& other) const {
+ return digits_ == other.digits_ && len_ == other.len_;
+ }
+
+ // Decrements {len_} until there are no leading zero digits left.
+ void Normalize() {
+ while (len_ > 0 && msd() == 0) len_--;
+ }
+ // Unconditionally drops exactly one leading zero digit.
+ void TrimOne() {
+ BIGINT_H_DCHECK(len_ > 0 && msd() == 0);
+ len_--;
+ }
+
+ int len() { return len_; }
+ const digit_t* digits() const { return digits_; }
+
+ protected:
+ friend class TemporaryLeftShift;
+ digit_t* digits_;
+ int len_;
+
+ private:
+ // We require externally-provided digits arrays to be 4-byte aligned, but
+ // not necessarily 8-byte aligned; so on 64-bit platforms we use memcpy
+ // to allow unaligned reads.
+ digit_t read_4byte_aligned(int i) {
+ if (sizeof(digit_t) == 4) {
+ return digits_[i];
+ } else {
+ digit_t result;
+ memcpy(&result, digits_ + i, sizeof(result));
+ return result;
+ }
+ }
+};
+
+// Returns r such that r < 0 if A < B; r > 0 if A > B; r == 0 if A == B.
+int Compare(Digits A, Digits B);
+
+} // namespace bigint
+} // namespace v8
+
+#undef BIGINT_H_DCHECK
+
+#endif // V8_BIGINT_BIGINT_H_
diff --git a/deps/v8/src/bigint/vector-arithmetic.cc b/deps/v8/src/bigint/vector-arithmetic.cc
new file mode 100644
index 00000000000..9a28b168ba5
--- /dev/null
+++ b/deps/v8/src/bigint/vector-arithmetic.cc
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint.h"
+
+namespace v8 {
+namespace bigint {
+
+int Compare(Digits A, Digits B) {
+ A.Normalize();
+ B.Normalize();
+ int diff = A.len() - B.len();
+ if (diff != 0) return diff;
+ int i = A.len() - 1;
+ while (i >= 0 && A[i] == B[i]) i--;
+ if (i < 0) return 0;
+ return A[i] > B[i] ? 1 : -1;
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index cf2a18a34d8..c255184caeb 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -5,6 +5,7 @@
#include "src/builtins/accessors.h"
#include "src/api/api-inl.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
@@ -66,6 +67,11 @@ static V8_INLINE bool CheckForName(Isolate* isolate, Handle<Name> name,
// If true, *object_offset contains offset of object field.
bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
Handle<Name> name, FieldIndex* index) {
+ if (map->is_dictionary_map()) {
+ // There are not descriptors in a dictionary mode map.
+ return false;
+ }
+
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return CheckForName(isolate, name, isolate->factory()->length_string(),
@@ -695,7 +701,8 @@ void Accessors::FunctionCallerGetter(
MaybeHandle<JSFunction> maybe_caller;
maybe_caller = FindCaller(isolate, function);
Handle<JSFunction> caller;
- if (maybe_caller.ToHandle(&caller)) {
+ // We don't support caller access with correctness fuzzing.
+ if (!FLAG_correctness_fuzzer_suppressions && maybe_caller.ToHandle(&caller)) {
result = caller;
} else {
result = isolate->factory()->null_value();
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2762c61bde6..817d30fe26a 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -22,8 +22,11 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -306,12 +309,15 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
+ __ b(eq, is_baseline);
+ __ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ b(ne, &done);
__ ldr(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -401,11 +407,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, r3, r0);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, r3, r0, &is_baseline);
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -989,6 +997,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ __ RecordComment("[ Check optimization state");
+
+ __ ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ b(ne, has_optimized_code_or_marker);
+
+ __ RecordComment("]");
+}
+
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
@@ -1011,6 +1036,159 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
}
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ // Need a few extra registers
+ temps.Include(r8, r9);
+
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (__ emit_debug_code()) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = no_reg;
+ {
+ UseScratchRegisterScope temps(masm);
+ // optimization_state will be used only in |has_optimized_code_or_marker|
+ // and outside it can be reused.
+ optimization_state = temps.Acquire();
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ ldr(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ add(invocation_count, invocation_count, Operand(1));
+ __ str(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ __ RecordComment("[ Frame Setup");
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(0));
+ __ strh(scratch, FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrNestingLevelOffset));
+ }
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (__ emit_debug_code()) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+ __ Push(feedback_vector);
+ __ RecordComment("]");
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+
+ Register sp_minus_frame_size = temps.Acquire();
+ __ sub(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
+ __ cmp(sp_minus_frame_size, interrupt_limit);
+ __ b(&call_stack_guard, lo);
+ __ RecordComment("]");
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ UseScratchRegisterScope temps(masm);
+ // Ensure the optimization_state is not allocated again.
+ temps.Exclude(optimization_state);
+
+ __ RecordComment("[ Optimized marker check");
+ // Drop the frame created by the baseline call.
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ RecordComment("[ Stack/interrupt call");
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ __ RecordComment("]");
+ }
+
+ __ Ret();
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1035,7 +1213,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r8);
+
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, r8, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1058,17 +1239,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &push_stack_frame);
Register optimization_state = r4;
-
- // Read off the optimization state in the feedback vector.
- __ ldr(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
- __ tst(
- optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ b(ne, &has_optimized_code_or_marker);
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1217,6 +1390,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ ldr(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ ldr(r8, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ ldrh(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset));
+ __ cmp(r8, Operand(FEEDBACK_VECTOR_TYPE));
+ __ b(ne, &install_baseline_code);
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ ldr(r2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
+ __ JumpCodeObject(r2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1570,7 +1775,30 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ // Need a few extra registers
+ temps.Include(r8, r9);
+ Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire());
+}
+
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand::Zero()) {
+ // Compute the target address = entry_address + offset
+ if (offset.IsImmediate() && offset.immediate() == 0) {
+ __ mov(lr, entry_address);
+ } else {
+ __ add(lr, entry_address, offset);
+ }
+
+ // "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -1584,9 +1812,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1601,13 +1831,18 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
- // Compute the target address = code start + osr_offset
- __ add(lr, r0, Operand::SmiUntag(r1));
-
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ Generate_OSREntry(masm, r0, Operand::SmiUntag(r1));
}
}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, false);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -2187,7 +2422,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver the (original) target.
__ str(r1, __ ReceiverOperand(r0));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
+ __ LoadNativeContextSlot(r1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2297,7 +2532,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ str(r1, __ ReceiverOperand(r0));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
+ __ LoadNativeContextSlot(r1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2309,6 +2544,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2367,12 +2603,21 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+ STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
+ constexpr uint32_t last =
+ 31 - base::bits::CountLeadingZeros32(
+ WasmDebugBreakFrameConstants::kPushedFpRegs);
+ constexpr uint32_t first = base::bits::CountTrailingZeros32(
+ WasmDebugBreakFrameConstants::kPushedFpRegs);
+ static_assert(
+ base::bits::CountPopulation(
+ WasmDebugBreakFrameConstants::kPushedFpRegs) == last - first + 1,
+ "All registers in the range from first to last have to be set");
+
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
- constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(
- WasmDebugBreakFrameConstants::kFirstPushedFpReg);
- constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(
- WasmDebugBreakFrameConstants::kLastPushedFpReg);
+ constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(first);
+ constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(last);
// Store gp parameter registers.
__ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
@@ -2391,6 +2636,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2639,11 +2890,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -3226,6 +3472,151 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the current or next (in execution order) bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ Push(kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = r1;
+ __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Load the feedback vector.
+ Register feedback_vector = r2;
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CompareObjectType(feedback_vector, r3, r3, FEEDBACK_VECTOR_TYPE);
+ __ b(ne, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ ldr(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ // Replace BytecodeOffset with the feedback vector.
+ __ str(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ Register code_obj = r4;
+ __ ldr(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = r3;
+ __ Move(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ b(eq, &function_entry_bytecode);
+ }
+
+ __ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ Register arg_reg_1 = r0;
+ Register arg_reg_2 = r1;
+ Register arg_reg_3 = r2;
+ __ mov(arg_reg_1, code_obj);
+ __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, 0);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ add(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(0));
+ __ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
+ if (next_bytecode) {
+ __ Move(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ b(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ b(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f5a3cd98693..d095d60b302 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -23,8 +23,11 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#if defined(V8_OS_WIN)
#include "src/diagnostics/unwinding-info-win64.h"
@@ -406,6 +409,7 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ PushArgument(x1);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+ __ Unreachable();
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
@@ -632,6 +636,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in x0.
__ Mov(kRootRegister, x0);
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // Initialize the pointer cage base register.
+ __ Mov(kPointerCageBaseRegister, x0);
+#endif
}
// Set up fp. It points to the {fp, lr} pair pushed as the last step in
@@ -910,10 +919,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Mov(x23, x19);
__ Mov(x24, x19);
__ Mov(x25, x19);
+#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
__ Mov(x28, x19);
+#endif
// Don't initialize the reserved registers.
// x26 : root register (kRootRegister).
// x27 : context pointer (cp).
+ // x28 : pointer cage base register (kPointerCageBaseRegister).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -1223,8 +1235,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
- __ RecordComment("[ Check optimization state");
-
// Check for an optimization marker.
Label has_optimized_code_or_marker;
Register optimization_state = temps.AcquireW();
@@ -1248,7 +1258,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
// Normally the first thing we'd do here is Push(lr, fp), but we already
// entered the frame in BaselineCompiler::Prologue, as we had to use the
- // value lr had before the call to this BaselineOutOfLinePrologue builtin.
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
Register callee_context = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext);
@@ -1294,6 +1304,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
@@ -1302,11 +1314,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// building the frame we can quickly precheck both at once.
UseScratchRegisterScope temps(masm);
- Register frame_size = temps.AcquireW();
- __ Ldr(frame_size,
- FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
- Register sp_minus_frame_size = frame_size.X();
- __ Sub(sp_minus_frame_size, sp, frame_size.X());
+ Register sp_minus_frame_size = temps.AcquireX();
+ __ Sub(sp_minus_frame_size, sp, frame_size);
Register interrupt_limit = temps.AcquireX();
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
__ Cmp(sp_minus_frame_size, interrupt_limit);
@@ -1338,7 +1347,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt call");
// Save incoming new target or generator
__ Push(padreg, new_target);
- __ CallRuntime(Runtime::kStackGuard);
+ __ SmiTag(frame_size);
+ __ PushArgument(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(new_target, padreg);
__ RecordComment("]");
}
@@ -1508,10 +1519,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
__ Call(kJavaScriptCallCodeStartRegister);
- masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+ __ JumpTarget();
// Get bytecode array and bytecode offset from the stack frame.
__ Ldr(kInterpreterBytecodeArrayRegister,
@@ -1584,19 +1596,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm, optimization_state, feedback_vector,
&has_optimized_code_or_marker);
- // Read off the optimization state in the feedback vector.
- // TODO(v8:11429): Is this worth doing here? Baseline code will check it
- // anyway...
- __ Ldr(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to
- // be processed.
- __ TestAndBranchIfAnySet(
- optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
- &has_optimized_code_or_marker);
-
// Load the baseline code into the closure.
__ LoadTaggedPointerField(
x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@@ -1770,41 +1769,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
- // Set the return address to the correct point in the interpreter entry
- // trampoline.
- Label builtin_trampoline, trampoline_loaded;
- Smi interpreter_entry_return_pc_offset(
- masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
-
- // If the SFI function_data is an InterpreterData, the function will have a
- // custom copy of the interpreter entry trampoline for profiling. If so,
- // get the custom trampoline, otherwise grab the entry address of the global
- // trampoline.
- __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
- kInterpreterDispatchTableRegister,
- INTERPRETER_DATA_TYPE);
- __ B(ne, &builtin_trampoline);
-
- __ LoadTaggedPointerField(
- x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
- __ Add(x1, x1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ B(&trampoline_loaded);
-
- __ Bind(&builtin_trampoline);
- __ Mov(x1, ExternalReference::
- address_of_interpreter_entry_trampoline_instruction_start(
- masm->isolate()));
- __ Ldr(x1, MemOperand(x1));
-
- __ Bind(&trampoline_loaded);
- __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset.value()));
-
// Initialize the dispatch table register.
__ Mov(
kInterpreterDispatchTableRegister,
@@ -1838,6 +1802,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ bind(&okay);
}
+ // Set up LR to point to code below, so we return there after we're done
+ // executing the function.
+ Label return_from_bytecode_dispatch;
+ __ Adr(lr, &return_from_bytecode_dispatch);
+
// Dispatch to the target bytecode.
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1849,6 +1818,45 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
temps.Exclude(x17);
__ Mov(x17, kJavaScriptCallCodeStartRegister);
__ Jump(x17);
+
+ __ Bind(&return_from_bytecode_dispatch);
+
+ // We return here after having executed the function in the interpreter.
+ // Now jump to the correct point in the interpreter entry trampoline.
+ Label builtin_trampoline, trampoline_loaded;
+ Smi interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
+ __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ B(ne, &builtin_trampoline);
+
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add(x1, x1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ B(&trampoline_loaded);
+
+ __ Bind(&builtin_trampoline);
+ __ Mov(x1, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ldr(x1, MemOperand(x1));
+
+ __ Bind(&trampoline_loaded);
+
+ __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value()));
+ __ Br(x17);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -2015,6 +2023,27 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
}
namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(0)) {
+ // Pop the return address to this function's caller from the return stack
+ // buffer, since we'll never return to it.
+ Label jump;
+ __ Adr(lr, &jump);
+ __ Ret();
+
+ __ Bind(&jump);
+
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(x17);
+ if (offset.IsZero()) {
+ __ Mov(x17, entry_address);
+ } else {
+ __ Add(x17, entry_address, offset);
+ }
+ __ Br(x17);
+}
+
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2048,11 +2077,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add(x0, x0, x1);
- __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
-
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
}
+
} // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
@@ -2460,6 +2487,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ EnterFrame(StackFrame::INTERNAL);
__ PushArgument(x3);
__ CallRuntime(Runtime::kThrowNotConstructor);
+ __ Unreachable();
}
__ Bind(&new_target_constructor);
}
@@ -2598,6 +2626,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::INTERNAL);
__ PushArgument(x1);
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Unreachable();
}
}
@@ -2786,7 +2815,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ Poke(x1, __ ReceiverOperand(x0));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
+ __ LoadNativeContextSlot(x1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2797,6 +2826,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ PushArgument(x1);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Unreachable();
}
}
@@ -2904,7 +2934,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ Poke(x1, __ ReceiverOperand(x0));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
+ __ LoadNativeContextSlot(x1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2916,6 +2946,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
@@ -3001,6 +3032,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3286,11 +3323,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
// The number of register that CallApiFunctionAndReturn will need to save on
@@ -3948,6 +3980,149 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the current or next (in execution order) bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ Push(padreg, kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = x1;
+ __ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Load the feedback vector.
+ Register feedback_vector = x2;
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CompareObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
+ __ B(ne, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ Str(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ Register code_obj = x22;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = x3;
+ __ Mov(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ B(eq, &function_entry_bytecode);
+ }
+
+ __ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ Register arg_reg_1 = x0;
+ Register arg_reg_2 = x1;
+ Register arg_reg_3 = x2;
+ __ Mov(arg_reg_1, code_obj);
+ __ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister, padreg);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ Mov(kInterpreterBytecodeOffsetRegister, Operand(0));
+ if (next_bytecode) {
+ __ Mov(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ B(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PushArgument(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ B(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 3d76ff851db..6448c958752 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -3,8 +3,12 @@
// found in the LICENSE file.
namespace array {
+
type LoadJoinElementFn = builtin(Context, JSReceiver, uintptr) => JSAny;
+const kMaxArrayLength:
+ constexpr uint32 generates 'JSArray::kMaxArrayLength';
+
// Fast C call to write a fixed array (see Buffer.fixedArray) to a single
// string.
extern macro
@@ -555,8 +559,9 @@ ArrayPrototypeJoin(
// Only handle valid array lengths. Although the spec allows larger
// values, this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1)
+ if (len > kMaxArrayLength) {
ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ }
return CycleProtectedArrayJoin<JSArray>(
false, o, len, separator, Undefined, Undefined);
@@ -576,8 +581,9 @@ transitioning javascript builtin ArrayPrototypeToLocaleString(
// Only handle valid array lengths. Although the spec allows larger
// values, this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1)
+ if (len > kMaxArrayLength) {
ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ }
return CycleProtectedArrayJoin<JSArray>(true, o, len, ',', locales, options);
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index cfdc7cc98d8..08639c04daf 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -104,6 +104,7 @@ type uint8 extends uint16
type char8 extends uint8 constexpr 'char';
type char16 extends uint16 constexpr 'char16_t';
type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
+type uint64 generates 'TNode<Uint64T>' constexpr 'uint64_t';
type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
type float32 generates 'TNode<Float32T>' constexpr 'float';
@@ -112,6 +113,9 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
+type Simd128 generates 'TNode<Simd128T>';
+type I8X16 extends Simd128 generates 'TNode<I8x16T>';
+
// Represents a std::function which produces the generated TNode type of T.
// Useful for passing values to and from CSA code that uses LazyNode<T>, which
// is a typedef for std::function<TNode<T>()>. Can be created with %MakeLazy and
@@ -415,8 +419,6 @@ extern enum PropertyAttributes extends int31 {
...
}
-const kMaxArrayIndex:
- constexpr uint32 generates 'JSArray::kMaxArrayIndex';
const kArrayBufferMaxByteLength:
constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength';
const kTypedArrayMaxLength:
@@ -456,16 +458,17 @@ extern enum PrimitiveType { kString, kBoolean, kSymbol, kNumber }
const kNameDictionaryInitialCapacity:
constexpr int32 generates 'NameDictionary::kInitialCapacity';
-const kOrderedNameDictionaryInitialCapacity:
- constexpr int32 generates 'OrderedNameDictionary::kInitialCapacity';
-const kSwissNameDictionaryGroupWidth:
- constexpr int32 generates 'SwissNameDictionary::kGroupWidth';
+const kSwissNameDictionaryInitialCapacity:
+ constexpr int32 generates 'SwissNameDictionary::kInitialCapacity';
const kWasmArrayHeaderSize:
constexpr int32 generates 'WasmArray::kHeaderSize';
+const kHeapObjectHeaderSize:
+ constexpr int32 generates 'HeapObject::kHeaderSize';
+
const kDictModePrototypes:
- constexpr bool generates 'V8_DICT_MODE_PROTOTYPES_BOOL';
+ constexpr bool generates 'V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL';
type TheHole extends Oddball;
type Null extends Oddball;
@@ -688,6 +691,8 @@ extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
NameDictionary;
extern macro CodeStubAssembler::AllocateOrderedNameDictionary(constexpr int32):
OrderedNameDictionary;
+extern macro CodeStubAssembler::AllocateSwissNameDictionary(constexpr int32):
+ SwissNameDictionary;
extern builtin ToObject(Context, JSAny): JSReceiver;
extern macro ToObject_Inline(Context, JSAny): JSReceiver;
@@ -804,6 +809,8 @@ extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '~' macro WordNot(intptr): intptr;
extern operator '~' macro WordNot(uintptr): uintptr;
+extern operator '~' macro Word64Not(uint64): uint64;
+extern operator '~' macro Word64Not(int64): int64;
extern operator '~' macro ConstexprWordNot(constexpr intptr): constexpr intptr;
extern operator '~' macro ConstexprWordNot(constexpr uintptr):
constexpr uintptr;
@@ -878,32 +885,44 @@ extern operator '+' macro ConstexprIntPtrAdd(
constexpr intptr, constexpr intptr): constexpr intptr;
extern operator '+' macro ConstexprUintPtrAdd(
constexpr uintptr, constexpr uintptr): constexpr intptr;
+extern operator '+' macro Int64Add(int64, int64): int64;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
+extern operator '-' macro Int64Sub(int64, int64): int64;
extern operator '*' macro IntPtrMul(intptr, intptr): intptr;
+extern operator '*' macro Int64Mul(int64, int64): int64;
extern operator '/' macro IntPtrDiv(intptr, intptr): intptr;
+extern operator '/' macro Int64Div(int64, int64): int64;
extern operator '<<' macro WordShl(intptr, intptr): intptr;
extern operator '>>' macro WordSar(intptr, intptr): intptr;
extern operator '&' macro WordAnd(intptr, intptr): intptr;
extern operator '|' macro WordOr(intptr, intptr): intptr;
extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr;
+extern operator '+' macro Uint64Add(uint64, uint64): uint64;
extern operator '-' macro UintPtrSub(uintptr, uintptr): uintptr;
+extern operator '-' macro Uint64Sub(uint64, uint64): uint64;
+extern operator '*' macro Uint64Mul(uint64, uint64): uint64;
extern operator '<<' macro WordShl(uintptr, uintptr): uintptr;
extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
extern operator '&' macro WordAnd(uintptr, uintptr): uintptr;
extern operator '|' macro WordOr(uintptr, uintptr): uintptr;
extern operator '+' macro Int32Add(int32, int32): int32;
+extern operator '+' macro Uint32Add(uint32, uint32): uint32;
extern operator '+' macro ConstexprUint32Add(
constexpr uint32, constexpr int32): constexpr uint32;
extern operator '+' macro ConstexprInt31Add(
constexpr int31, constexpr int31): constexpr int31;
+extern operator '+' macro ConstexprInt32Add(
+ constexpr int32, constexpr int32): constexpr int32;
extern operator '*' macro ConstexprInt31Mul(
constexpr int31, constexpr int31): constexpr int31;
extern operator '-' macro Int32Sub(int16, int16): int32;
extern operator '-' macro Int32Sub(uint16, uint16): int32;
extern operator '-' macro Int32Sub(int32, int32): int32;
+extern operator '-' macro Uint32Sub(uint32, uint32): uint32;
extern operator '*' macro Int32Mul(int32, int32): int32;
+extern operator '*' macro Uint32Mul(uint32, uint32): uint32;
extern operator '/' macro Int32Div(int32, int32): int32;
extern operator '%' macro Int32Mod(int32, int32): int32;
extern operator '&' macro Word32And(int32, int32): int32;
@@ -940,6 +959,22 @@ extern operator '==' macro Word32Equal(bool, bool): bool;
extern operator '!=' macro Word32NotEqual(bool, bool): bool;
extern operator '|' macro ConstexprWord32Or(
constexpr int32, constexpr int32): constexpr int32;
+extern operator '^' macro Word32Xor(int32, int32): int32;
+extern operator '^' macro Word32Xor(uint32, uint32): uint32;
+
+extern operator '==' macro Word64Equal(int64, int64): bool;
+extern operator '==' macro Word64Equal(uint64, uint64): bool;
+extern operator '!=' macro Word64NotEqual(int64, int64): bool;
+extern operator '!=' macro Word64NotEqual(uint64, uint64): bool;
+extern operator '>>>' macro Word64Shr(uint64, uint64): uint64;
+extern operator '>>' macro Word64Sar(int64, int64): int64;
+extern operator '<<' macro Word64Shl(int64, int64): int64;
+extern operator '<<' macro Word64Shl(uint64, uint64): uint64;
+extern operator '|' macro Word64Or(int64, int64): int64;
+extern operator '|' macro Word64Or(uint64, uint64): uint64;
+extern operator '&' macro Word64And(uint64, uint64): uint64;
+extern operator '^' macro Word64Xor(int64, int64): int64;
+extern operator '^' macro Word64Xor(uint64, uint64): uint64;
extern operator '+' macro Float64Add(float64, float64): float64;
extern operator '-' macro Float64Sub(float64, float64): float64;
@@ -987,6 +1022,8 @@ ConstexprInt31Equal(
constexpr InstanceType, constexpr InstanceType): constexpr bool;
extern operator '-' macro ConstexprUint32Sub(
constexpr InstanceType, constexpr InstanceType): constexpr int32;
+extern operator '-' macro ConstexprInt32Sub(
+ constexpr int32, constexpr int32): constexpr int32;
extern operator '.instanceType' macro LoadInstanceType(HeapObject):
InstanceType;
@@ -1015,6 +1052,10 @@ operator '==' macro PromiseStateEquals(
return Word32Equal(s1, s2);
}
+extern macro CountLeadingZeros64(uint64): int64;
+extern macro CountTrailingZeros32(uint32): int32;
+extern macro CountTrailingZeros64(uint64): int64;
+
extern macro TaggedIsSmi(Object): bool;
extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
@@ -1028,17 +1069,20 @@ extern macro ChangeInt32ToTagged(int32): Number;
extern macro ChangeUint32ToTagged(uint32): Number;
extern macro ChangeUintPtrToFloat64(uintptr): float64;
extern macro ChangeUintPtrToTagged(uintptr): Number;
+extern macro Unsigned(int64): uint64;
extern macro Unsigned(int32): uint32;
extern macro Unsigned(int16): uint16;
extern macro Unsigned(int8): uint8;
extern macro Unsigned(intptr): uintptr;
extern macro Unsigned(RawPtr): uintptr;
+extern macro Signed(uint64): int64;
extern macro Signed(uint32): int32;
extern macro Signed(uint16): int16;
extern macro Signed(uint8): int8;
extern macro Signed(uintptr): intptr;
extern macro Signed(RawPtr): intptr;
extern macro TruncateIntPtrToInt32(intptr): int32;
+extern macro TruncateInt64ToInt32(int64): int32;
extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi;
@@ -1057,6 +1101,7 @@ extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
extern macro TaggedIndexToSmi(TaggedIndex): Smi;
extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
+extern macro IntPtrRoundUpToPowerOfTwo32(intptr): intptr;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
extern macro ChangeNumberToUint32(Number): uint32;
@@ -1066,9 +1111,12 @@ extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64;
extern macro ChangeFloat64ToTagged(float64): Number;
extern macro ChangeFloat64ToUintPtr(float64): uintptr;
extern macro ChangeFloat64ToIntPtr(float64): intptr;
+extern macro ChangeBoolToInt32(bool): int32;
extern macro ChangeInt32ToFloat64(int32): float64;
-extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
-extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
+extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
+extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
+extern macro ChangeInt32ToInt64(int32): intptr; // Sign-extends.
+extern macro ChangeUint32ToUint64(uint32): uint64; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
extern macro TruncateFloat64ToFloat32(float64): float32;
extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32;
@@ -1082,6 +1130,8 @@ extern macro IntPtrConstant(constexpr int32): intptr;
extern macro Uint16Constant(constexpr uint16): uint16;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
+extern macro Int64Constant(constexpr int64): int64;
+extern macro Uint64Constant(constexpr uint64): uint64;
extern macro Float64Constant(constexpr int31): float64;
extern macro Float64Constant(constexpr float64): float64;
extern macro SmiConstant(constexpr int31): Smi;
@@ -1101,7 +1151,7 @@ extern macro BitcastWordToTaggedSigned(intptr): Smi;
extern macro BitcastWordToTaggedSigned(uintptr): Smi;
extern macro BitcastWordToTagged(intptr): Object;
extern macro BitcastWordToTagged(uintptr): Object;
-extern macro BitcastTaggedToWord(Tagged): intptr;
+extern macro BitcastTaggedToWord(Object): intptr;
extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr;
extern macro FixedArrayMapConstant(): Map;
@@ -1795,3 +1845,12 @@ extern macro FeedbackIteratorEntrySize(): intptr;
extern macro FeedbackIteratorHandlerOffset(): intptr;
extern operator '[]' macro LoadWeakFixedArrayElement(
WeakFixedArray, intptr): MaybeObject;
+
+const kNoHashSentinel:
+ constexpr int32 generates 'PropertyArray::kNoHashSentinel';
+extern macro LoadNameHash(Name): uint32;
+
+extern macro LoadSimd128(intptr): Simd128;
+extern macro I8x16BitMask(I8X16): int32;
+extern macro I8x16Eq(I8X16, I8X16): I8X16;
+extern macro I8x16Splat(int32): I8X16;
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index feaa733031d..6b522fda6c0 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -19,9 +19,6 @@
namespace v8 {
namespace internal {
-using Node = compiler::Node;
-using IteratorRecord = TorqueStructIteratorRecord;
-
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
compiler::CodeAssemblerState* state)
: CodeStubAssembler(state),
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index ea21a19a86f..d3bbd980a55 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -15,6 +15,7 @@
#include "src/objects/contexts.h"
#include "src/objects/elements-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/lookup.h"
#include "src/objects/objects-inl.h"
@@ -333,7 +334,7 @@ V8_WARN_UNUSED_RESULT Object GenericArrayPush(Isolate* isolate,
Handle<Object> element = args->at(i + 1);
// b. Perform ? Set(O, ! ToString(len), E, true).
- if (length <= static_cast<double>(JSArray::kMaxArrayIndex)) {
+ if (length <= JSObject::kMaxElementIndex) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetElement(isolate, receiver, length, element,
ShouldThrow::kThrowOnError));
@@ -665,7 +666,10 @@ class ArrayConcatVisitor {
V8_WARN_UNUSED_RESULT bool visit(uint32_t i, Handle<Object> elm) {
uint32_t index = index_offset_ + i;
- if (i >= JSObject::kMaxElementCount - index_offset_) {
+ // Note we use >=kMaxArrayLength instead of the more appropriate
+ // >kMaxArrayIndex here due to overflowing arithmetic and
+ // increase_index_offset.
+ if (i >= JSArray::kMaxArrayLength - index_offset_) {
set_exceeds_array_limit(true);
// Exception hasn't been thrown at this point. Return true to
// break out, and caller will throw. !visit would imply that
@@ -710,8 +714,8 @@ class ArrayConcatVisitor {
uint32_t index_offset() const { return index_offset_; }
void increase_index_offset(uint32_t delta) {
- if (JSObject::kMaxElementCount - index_offset_ < delta) {
- index_offset_ = JSObject::kMaxElementCount;
+ if (JSArray::kMaxArrayLength - index_offset_ < delta) {
+ index_offset_ = JSArray::kMaxArrayLength;
} else {
index_offset_ += delta;
}
@@ -816,7 +820,7 @@ class ArrayConcatVisitor {
Isolate* isolate_;
Handle<Object> storage_; // Always a global handle.
// Index after last seen index. Always less than or equal to
- // JSObject::kMaxElementCount.
+ // JSArray::kMaxArrayLength.
uint32_t index_offset_;
uint32_t bit_field_;
};
@@ -1261,14 +1265,14 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
length_estimate = 1;
element_estimate = 1;
}
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
+ // Avoid overflows by capping at kMaxArrayLength.
+ if (JSArray::kMaxArrayLength - estimate_result_length < length_estimate) {
+ estimate_result_length = JSArray::kMaxArrayLength;
} else {
estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof < element_estimate) {
- estimate_nof = JSObject::kMaxElementCount;
+ if (JSArray::kMaxArrayLength - estimate_nof < element_estimate) {
+ estimate_nof = JSArray::kMaxArrayLength;
} else {
estimate_nof += element_estimate;
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 1644997ed01..49b00caa048 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -157,14 +157,12 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
async_function_object, JSAsyncFunctionObject::kPromiseOffset, promise);
- RunContextPromiseHookInit(context, promise, UndefinedConstant());
-
// Fire promise hooks if enabled and push the Promise under construction
// in an async function on the catch prediction stack to handle exceptions
// thrown before the first await.
Label if_instrumentation(this, Label::kDeferred),
if_instrumentation_done(this);
- Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_instrumentation, &if_instrumentation_done);
BIND(&if_instrumentation);
{
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 1a660abece1..9ee6037b2bd 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext {
@@ -99,11 +97,18 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
- RunContextPromiseHookInit(context, promise, outer_promise);
-
- InitAwaitPromise(Runtime::kAwaitPromisesInitOld, context, value, promise,
- outer_promise, on_reject, is_predicted_as_caught,
- &var_throwaway);
+ // Deal with PromiseHooks and debug support in the runtime. This
+ // also allocates the throwaway promise, which is only needed in
+ // case of PromiseHooks or debugging.
+ Label if_debugging(this, Label::kDeferred), do_resolve_promise(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_debugging, &do_resolve_promise);
+ BIND(&if_debugging);
+ var_throwaway =
+ CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value, promise,
+ outer_promise, on_reject, is_predicted_as_caught));
+ Goto(&do_resolve_promise);
+ BIND(&do_resolve_promise);
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
CallBuiltin(Builtins::kResolvePromise, context, promise, value);
@@ -163,46 +168,21 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
- InitAwaitPromise(Runtime::kAwaitPromisesInit, context, promise, promise,
- outer_promise, on_reject, is_predicted_as_caught,
- &var_throwaway);
-
- return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
- on_resolve, on_reject, var_throwaway.value());
-}
-
-void AsyncBuiltinsAssembler::InitAwaitPromise(
- Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
- TNode<Object> promise, TNode<Object> outer_promise,
- TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
- TVariable<HeapObject>* var_throwaway) {
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
// case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred),
- if_promise_hook(this, Label::kDeferred),
- not_debugging(this),
- do_nothing(this);
- TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
- Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- promiseHookFlags), &if_debugging, &not_debugging);
+ Label if_debugging(this, Label::kDeferred), do_perform_promise_then(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_debugging, &do_perform_promise_then);
BIND(&if_debugging);
- *var_throwaway =
- CAST(CallRuntime(id, context, value, promise,
+ var_throwaway =
+ CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise,
outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_nothing);
- BIND(&not_debugging);
-
- // This call to NewJSPromise is to keep behaviour parity with what happens
- // in Runtime::kAwaitPromisesInit above if native hooks are set. It will
- // create a throwaway promise that will trigger an init event and will get
- // passed into Builtins::kPerformPromiseThen below.
- Branch(IsContextPromiseHookEnabled(promiseHookFlags), &if_promise_hook,
- &do_nothing);
- BIND(&if_promise_hook);
- *var_throwaway = NewJSPromise(context, promise);
- Goto(&do_nothing);
- BIND(&do_nothing);
+ Goto(&do_perform_promise_then);
+ BIND(&do_perform_promise_then);
+
+ return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
+ on_resolve, on_reject, var_throwaway.value());
}
TNode<Object> AsyncBuiltinsAssembler::Await(
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 34b7a0ce1d6..833e78d45d5 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -62,12 +62,6 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
TNode<SharedFunctionInfo> on_resolve_sfi,
TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
-
- void InitAwaitPromise(
- Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
- TNode<Object> promise, TNode<Object> outer_promise,
- TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
- TVariable<HeapObject>* var_throwaway);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 5d053063ff9..03df9e307c7 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -14,8 +14,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
namespace {
class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
@@ -520,7 +518,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
// the "promiseResolve" hook would not be fired otherwise.
Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
GotoIfForceSlowPath(&if_slow);
- GotoIf(IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(), &if_slow);
+ GotoIf(IsPromiseHookEnabled(), &if_slow);
Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
BIND(&if_fast);
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 5c32e04f32f..7a1c65e4a19 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -121,9 +121,11 @@ BUILTIN(CallSitePrototypeGetThis) {
CHECK_CALLSITE(frame, "getThis");
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall);
+#if V8_ENABLE_WEBASSEMBLY
if (frame->IsAsmJsWasm()) {
return frame->GetWasmInstance().native_context().global_proxy();
}
+#endif // V8_ENABLE_WEBASSEMBLY
return frame->receiver_or_instance();
}
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 0d548b48b9a..785a1af90a2 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -19,7 +19,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>;
@@ -911,7 +910,7 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash(
BIND(&if_receiver);
{
- var_hash = LoadJSReceiverIdentityHash(key);
+ var_hash = LoadJSReceiverIdentityHash(CAST(key));
Goto(&done);
}
@@ -2723,7 +2722,7 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
GotoIfNotJSReceiver(key, &if_not_found);
- TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(key, &if_not_found);
+ TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(CAST(key), &if_not_found);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
TNode<IntPtrT> key_index =
FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
@@ -2788,7 +2787,7 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
GotoIfNotJSReceiver(key, &if_not_found);
- TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(key, &if_not_found);
+ TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(CAST(key), &if_not_found);
TNode<EphemeronHashTable> table = LoadTable(collection);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
TNode<IntPtrT> key_index =
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index e268ac868b8..0212c2349c9 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -175,8 +175,6 @@ void CallOrConstructBuiltinsAssembler::BuildConstructWithSpread(
CallOrConstructWithSpread(target, new_target, spread, argc, eager_context);
}
-using Node = compiler::Node;
-
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
auto shared_function_info =
Parameter<SharedFunctionInfo>(Descriptor::kSharedFunctionInfo);
@@ -329,9 +327,9 @@ TNode<JSObject> ConstructorBuiltinsAssembler::FastNewObject(
}
BIND(&allocate_properties);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- properties = AllocateOrderedNameDictionary(
- OrderedNameDictionary::kInitialCapacity);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ properties =
+ AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity);
} else {
properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
}
@@ -539,7 +537,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
TNode<Map> boilerplate_map = LoadMap(boilerplate);
CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
- TVARIABLE(FixedArray, var_properties);
+ TVARIABLE(HeapObject, var_properties);
{
TNode<Uint32T> bit_field_3 = LoadMapBitField3(boilerplate_map);
GotoIf(IsSetWord32<Map::Bits3::IsDeprecatedBit>(bit_field_3), call_runtime);
@@ -549,14 +547,14 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
&if_dictionary, &if_fast);
BIND(&if_dictionary);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), call_runtime);
- }
-
Comment("Copy dictionary properties");
- var_properties = CopyNameDictionary(CAST(LoadSlowProperties(boilerplate)),
- call_runtime);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ var_properties =
+ CopySwissNameDictionary(CAST(LoadSlowProperties(boilerplate)));
+ } else {
+ var_properties = CopyNameDictionary(
+ CAST(LoadSlowProperties(boilerplate)), call_runtime);
+ }
// Slow objects have no in-object properties.
Goto(&done);
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 3819c122feb..b0e608418eb 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -141,6 +141,9 @@ namespace internal {
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
ASM(BaselineOnStackReplacement, ContextOnly) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
+ ASM(BaselineEnterAtBytecode, Void) \
+ ASM(BaselineEnterAtNextBytecode, Void) \
+ ASM(InterpreterOnStackReplacement_ToBaseline, Void) \
\
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \
@@ -530,6 +533,8 @@ namespace internal {
/* ES6 #sec-generator.prototype.throw */ \
TFJ(GeneratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
+ TFC(SuspendGeneratorBaseline, SuspendGeneratorBaseline) \
+ TFC(ResumeGeneratorBaseline, ResumeGeneratorBaseline) \
\
/* Iterator Protocol */ \
TFC(GetIteratorWithFeedbackLazyDeoptContinuation, GetIteratorStackParameter) \
@@ -600,7 +605,7 @@ namespace internal {
TFS(IterableToListWithSymbolLookup, kIterable) \
TFS(IterableToFixedArrayWithSymbolLookupSlow, kIterable) \
TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \
- TFS(IterableToFixedArrayForWasm, kIterable, kExpectedLength) \
+ IF_WASM(TFS, IterableToFixedArrayForWasm, kIterable, kExpectedLength) \
\
/* #sec-createstringlistfromiterable */ \
TFS(StringListFromIterable, kIterable) \
@@ -855,14 +860,14 @@ namespace internal {
TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
- ASM(GenericJSToWasmWrapper, Dummy) \
- ASM(WasmCompileLazy, Dummy) \
- ASM(WasmDebugBreak, Dummy) \
- TFC(WasmFloat32ToNumber, WasmFloat32ToNumber) \
- TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \
- TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
- TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
- TFC(JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
+ IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \
+ IF_WASM(ASM, WasmCompileLazy, Dummy) \
+ IF_WASM(ASM, WasmDebugBreak, Dummy) \
+ IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
+ IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
+ IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
+ IF_WASM(TFC, WasmI64AtomicWait32, WasmI64AtomicWait32) \
+ IF_WASM(TFC, JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index dfcfffa3829..08fdbe5aa05 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -48,7 +48,6 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
builder.AppendCharacter('(');
builder.AppendCString(token);
builder.AppendCString(" anonymous(");
- bool parenthesis_in_arg_string = false;
if (argc > 1) {
for (int i = 1; i < argc; ++i) {
if (i > 1) builder.AppendCharacter(',');
@@ -70,14 +69,6 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
}
builder.AppendCString("\n})");
ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), Object);
-
- // The SyntaxError must be thrown after all the (observable) ToString
- // conversions are done.
- if (parenthesis_in_arg_string) {
- THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kParenthesisInArgString),
- Object);
- }
}
bool is_code_like = true;
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index d93ab2e1035..2e9d7e24e4f 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -202,5 +202,114 @@ TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
"[Generator].prototype.throw");
}
+// TODO(cbruni): Merge with corresponding bytecode handler.
+TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
+ auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ StoreJSGeneratorObjectContext(generator, context);
+ auto suspend_id = SmiTag(UncheckedParameter<IntPtrT>(Descriptor::kSuspendId));
+ StoreJSGeneratorObjectContinuation(generator, suspend_id);
+ // Store the bytecode offset in the [input_or_debug_pos] field, to be used by
+ // the inspector.
+ auto bytecode_offset =
+ SmiTag(UncheckedParameter<IntPtrT>(Descriptor::kBytecodeOffset));
+ // Avoid the write barrier by using the generic helper.
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSGeneratorObject::kInputOrDebugPosOffset, bytecode_offset);
+
+ TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
+ auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
+ TNode<IntPtrT> formal_parameter_count = Signed(
+ ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
+ CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
+ formal_parameter_count,
+ IntPtrConstant(kDontAdaptArgumentsSentinel))));
+
+ TNode<FixedArray> parameters_and_registers =
+ LoadJSGeneratorObjectParametersAndRegisters(generator);
+ auto parameters_and_registers_length =
+ SmiUntag(LoadFixedArrayBaseLength(parameters_and_registers));
+
+ // Copy over the function parameters
+ auto parameter_base_index = IntPtrConstant(
+ interpreter::Register::FromParameterIndex(0, 1).ToOperand() + 1);
+ CSA_CHECK(this, UintPtrLessThan(formal_parameter_count,
+ parameters_and_registers_length));
+ auto parent_frame_pointer = LoadParentFramePointer();
+ BuildFastLoop<IntPtrT>(
+ IntPtrConstant(0), formal_parameter_count,
+ [=](TNode<IntPtrT> index) {
+ auto reg_index = IntPtrAdd(parameter_base_index, index);
+ TNode<Object> value = LoadFullTagged(parent_frame_pointer,
+ TimesSystemPointerSize(reg_index));
+ UnsafeStoreFixedArrayElement(parameters_and_registers, index, value);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ // Iterate over register file and write values into array.
+ // The mapping of register to array index must match that used in
+ // BytecodeGraphBuilder::VisitResumeGenerator.
+ auto register_base_index =
+ IntPtrAdd(formal_parameter_count,
+ IntPtrConstant(interpreter::Register(0).ToOperand()));
+ auto register_count = UncheckedParameter<IntPtrT>(Descriptor::kRegisterCount);
+ auto end_index = IntPtrAdd(formal_parameter_count, register_count);
+ CSA_CHECK(this, UintPtrLessThan(end_index, parameters_and_registers_length));
+ BuildFastLoop<IntPtrT>(
+ formal_parameter_count, end_index,
+ [=](TNode<IntPtrT> index) {
+ auto reg_index = IntPtrSub(register_base_index, index);
+ TNode<Object> value = LoadFullTagged(parent_frame_pointer,
+ TimesSystemPointerSize(reg_index));
+ UnsafeStoreFixedArrayElement(parameters_and_registers, index, value);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ // The return value is unused, defaulting to undefined.
+ Return(UndefinedConstant());
+}
+
+// TODO(cbruni): Merge with corresponding bytecode handler.
+TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
+ auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
+ TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
+ auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
+ TNode<IntPtrT> formal_parameter_count = Signed(
+ ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
+ CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
+ formal_parameter_count,
+ IntPtrConstant(kDontAdaptArgumentsSentinel))));
+
+ TNode<FixedArray> parameters_and_registers =
+ LoadJSGeneratorObjectParametersAndRegisters(generator);
+
+ // Iterate over array and write values into register file. Also erase the
+ // array contents to not keep them alive artificially.
+ auto register_base_index =
+ IntPtrAdd(formal_parameter_count,
+ IntPtrConstant(interpreter::Register(0).ToOperand()));
+ auto register_count = UncheckedParameter<IntPtrT>(Descriptor::kRegisterCount);
+ auto end_index = IntPtrAdd(formal_parameter_count, register_count);
+ auto parameters_and_registers_length =
+ SmiUntag(LoadFixedArrayBaseLength(parameters_and_registers));
+ CSA_CHECK(this, UintPtrLessThan(end_index, parameters_and_registers_length));
+ auto parent_frame_pointer = LoadParentFramePointer();
+ BuildFastLoop<IntPtrT>(
+ formal_parameter_count, end_index,
+ [=](TNode<IntPtrT> index) {
+ TNode<Object> value =
+ UnsafeLoadFixedArrayElement(parameters_and_registers, index);
+ auto reg_index = IntPtrSub(register_base_index, index);
+ StoreFullTaggedNoWriteBarrier(parent_frame_pointer,
+ TimesSystemPointerSize(reg_index), value);
+ UnsafeStoreFixedArrayElement(parameters_and_registers, index,
+ StaleRegisterConstant(),
+ SKIP_WRITE_BARRIER);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Return(LoadJSGeneratorObjectInputOrDebugPos(generator));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 29cca9d93a1..0c4131dba96 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -312,6 +312,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Label incremental_wb(this);
Label exit(this);
+ // In this method we limit the allocatable registers so we have to use
+ // UncheckedParameter. Parameter does not work because the checked cast needs
+ // more registers.
auto remembered_set = UncheckedParameter<Smi>(Descriptor::kRememberedSet);
Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
&incremental_wb);
@@ -333,8 +336,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&test_old_to_young_flags);
{
// TODO(ishell): do a new-space range check instead.
- TNode<IntPtrT> value =
- BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
+ TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// TODO(albertnetymk): Try to cache the page flag for value and object,
// instead of calling IsPageFlagSet each time.
@@ -343,7 +345,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(value_is_young, &incremental_wb);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
@@ -353,7 +355,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &exit);
}
@@ -361,7 +363,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &incremental_wb);
}
}
@@ -371,8 +373,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Label call_incremental_wb(this);
auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
- TNode<IntPtrT> value =
- BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
+ TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
@@ -384,7 +385,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
&exit);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
Branch(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
&exit, &call_incremental_wb);
@@ -395,7 +396,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
ExternalReference::write_barrier_marking_from_code_function());
auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
CallCFunction2WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT>(
function, object, slot, fp_mode, &exit);
}
@@ -413,9 +414,12 @@ TF_BUILTIN(EphemeronKeyBarrier, RecordWriteCodeStubAssembler) {
ExternalReference::ephemeron_key_write_barrier_function());
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
+ // In this method we limit the allocatable registers so we have to use
+ // UncheckedParameter. Parameter does not work because the checked cast needs
+ // more registers.
auto address = UncheckedParameter<IntPtrT>(Descriptor::kSlotAddress);
TNode<IntPtrT> object =
- BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ BitcastTaggedToWord(UncheckedParameter<Object>(Descriptor::kObject));
TNode<Smi> fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
CallCFunction3WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT,
ExternalReference>(
@@ -431,20 +435,10 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
: AccessorAssembler(state) {}
- void DeleteDictionaryProperty(TNode<Object> receiver,
+ void DictionarySpecificDelete(TNode<JSReceiver> receiver,
TNode<NameDictionary> properties,
- TNode<Name> name, TNode<Context> context,
- Label* dont_delete, Label* notfound) {
- TVARIABLE(IntPtrT, var_name_index);
- Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
- &var_name_index, notfound);
-
- BIND(&dictionary_found);
- TNode<IntPtrT> key_index = var_name_index.value();
- TNode<Uint32T> details = LoadDetailsByKeyIndex(properties, key_index);
- GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
- dont_delete);
+ TNode<IntPtrT> key_index,
+ TNode<Context> context) {
// Overwrite the entry itself (see NameDictionary::SetEntry).
TNode<Oddball> filler = TheHoleConstant();
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue));
@@ -468,9 +462,49 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
TNode<Smi> capacity = GetCapacity<NameDictionary>(properties);
GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
- CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver);
+
+ TNode<NameDictionary> new_properties =
+ CAST(CallRuntime(Runtime::kShrinkNameDictionary, context, properties));
+
+ StoreJSReceiverPropertiesOrHash(receiver, new_properties);
+
Goto(&shrinking_done);
BIND(&shrinking_done);
+ }
+
+ void DictionarySpecificDelete(TNode<JSReceiver> receiver,
+ TNode<SwissNameDictionary> properties,
+ TNode<IntPtrT> key_index,
+ TNode<Context> context) {
+ Label shrunk(this), done(this);
+ TVARIABLE(SwissNameDictionary, shrunk_table);
+
+ SwissNameDictionaryDelete(properties, key_index, &shrunk, &shrunk_table);
+ Goto(&done);
+ BIND(&shrunk);
+ StoreJSReceiverPropertiesOrHash(receiver, shrunk_table.value());
+ Goto(&done);
+
+ BIND(&done);
+ }
+
+ template <typename Dictionary>
+ void DeleteDictionaryProperty(TNode<JSReceiver> receiver,
+ TNode<Dictionary> properties, TNode<Name> name,
+ TNode<Context> context, Label* dont_delete,
+ Label* notfound) {
+ TVARIABLE(IntPtrT, var_name_index);
+ Label dictionary_found(this, &var_name_index);
+ NameDictionaryLookup<Dictionary>(properties, name, &dictionary_found,
+ &var_name_index, notfound);
+
+ BIND(&dictionary_found);
+ TNode<IntPtrT> key_index = var_name_index.value();
+ TNode<Uint32T> details = LoadDetailsByKeyIndex(properties, key_index);
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
+ dont_delete);
+
+ DictionarySpecificDelete(receiver, properties, key_index, context);
Return(TrueConstant());
}
@@ -487,11 +521,6 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
Label if_index(this, &var_index), if_unique_name(this), if_notunique(this),
if_notfound(this), slow(this), if_proxy(this);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &slow);
- }
-
GotoIf(TaggedIsSmi(receiver), &slow);
TNode<Map> receiver_map = LoadMap(CAST(receiver));
TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
@@ -514,17 +543,17 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
Label dictionary(this), dont_delete(this);
GotoIf(IsDictionaryMap(receiver_map), &dictionary);
- // Fast properties need to clear recorded slots, which can only be done
- // in C++.
+ // Fast properties need to clear recorded slots and mark the deleted
+ // property as mutable, which can only be done in C++.
Goto(&slow);
BIND(&dictionary);
{
InvalidateValidityCellIfPrototype(receiver_map);
- TNode<NameDictionary> properties =
+ TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(receiver)));
- DeleteDictionaryProperty(receiver, properties, var_unique.value(),
+ DeleteDictionaryProperty(CAST(receiver), properties, var_unique.value(),
context, &dont_delete, &if_notfound);
}
@@ -926,7 +955,8 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
@@ -956,11 +986,6 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &if_slow);
- }
-
CodeStubAssembler::LookupPropertyInHolder lookup_property_in_holder =
[=](TNode<HeapObject> receiver, TNode<HeapObject> holder,
TNode<Map> holder_map, TNode<Int32T> holder_instance_type,
@@ -1016,11 +1041,6 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &if_slow);
- }
-
CodeStubAssembler::LookupPropertyInHolder lookup_property_in_holder =
[=](TNode<HeapObject> receiver, TNode<HeapObject> holder,
TNode<Map> holder_map, TNode<Int32T> holder_instance_type,
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 843adf71228..fe32a484a3e 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -149,8 +149,8 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
}
- RETURN_RESULT_OR_FAILURE(
- isolate, JSDateTimeFormat::FormatToParts(isolate, dtf, date_value));
+ RETURN_RESULT_OR_FAILURE(isolate, JSDateTimeFormat::FormatToParts(
+ isolate, dtf, date_value, false));
}
// Common code for DateTimeFormatPrototypeFormtRange(|ToParts)
@@ -608,11 +608,12 @@ BUILTIN(LocaleConstructor) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
+ const char* method = "Intl.Locale";
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(
- "Intl.Locale")));
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(method)));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
@@ -645,16 +646,11 @@ BUILTIN(LocaleConstructor) {
Object::ToString(isolate, tag));
}
+ // 10. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options_object;
- // 10. If options is undefined, then
- if (options->IsUndefined(isolate)) {
- // a. Let options be ! ObjectCreate(null).
- options_object = isolate->factory()->NewJSObjectWithNullProto();
- } else { // 11. Else
- // a. Let options be ? ToObject(options).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, options_object,
- Object::ToObject(isolate, options));
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, options_object,
+ Intl::CoerceOptionsToObject(isolate, options, method));
RETURN_RESULT_OR_FAILURE(
isolate, JSLocale::New(isolate, map, locale_string, options_object));
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 8cf52e5368d..94be522c58b 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -16,7 +16,6 @@ namespace v8 {
namespace internal {
using IteratorRecord = TorqueStructIteratorRecord;
-using compiler::Node;
TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(
TNode<Context> context, TNode<Object> object) {
@@ -55,8 +54,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
BIND(&get_next);
TNode<Object> next =
GetProperty(context, iterator, factory()->next_string());
- return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
- TNode<Object>::UncheckedCast(next)};
+ return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator), next};
}
}
@@ -196,6 +194,7 @@ TF_BUILTIN(IterableToFixedArray, IteratorBuiltinsAssembler) {
Return(IterableToFixedArray(context, iterable, iterator_fn));
}
+#if V8_ENABLE_WEBASSEMBLY
TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto iterable = Parameter<Object>(Descriptor::kIterable);
@@ -217,6 +216,7 @@ TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
BIND(&done);
Return(values.var_array()->value());
}
+#endif // V8_ENABLE_WEBASSEMBLY
TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
TNode<Context> context, TNode<Object> iterable) {
@@ -413,13 +413,13 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
auto receiver = Parameter<Object>(Descriptor::kReceiver);
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
auto call_slot_smi = Parameter<Smi>(Descriptor::kCallSlot);
- TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
auto feedback = Parameter<FeedbackVector>(Descriptor::kFeedback);
auto iterator_method = Parameter<Object>(Descriptor::kResult);
+ // Note, that the builtin also expects the call_slot as a Smi.
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
- iterator_method, call_slot, feedback);
+ iterator_method, call_slot_smi, feedback);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 6cea2c77ff4..c584708861a 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -10,8 +10,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
class GrowableFixedArray;
class IteratorBuiltinsAssembler : public CodeStubAssembler {
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 88cb3b88dca..8af0bef95d2 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -116,7 +116,9 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// feedback vector marker.
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
- TNode<Code> sfi_code = GetSharedFunctionInfoCode(shared, &compile_function);
+ TVARIABLE(Uint16T, sfi_data_type);
+ TNode<Code> sfi_code =
+ GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
@@ -149,14 +151,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TVARIABLE(Code, code);
// Check if we have baseline code.
- // TODO(v8:11429): We already know if we have baseline code in
- // GetSharedFunctionInfoCode, make that jump to here.
- TNode<Uint32T> code_flags =
- LoadObjectField<Uint32T>(sfi_code, Code::kFlagsOffset);
- TNode<Uint32T> code_kind = DecodeWord32<Code::KindField>(code_flags);
- TNode<BoolT> is_baseline =
- IsEqualInWord32<Code::KindField>(code_kind, CodeKind::BASELINE);
- GotoIf(is_baseline, &baseline);
+ GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
+ &baseline);
// Finally, check for presence of an NCI cached Code object - if an entry
// possibly exists, call into runtime to query the cache.
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 1ec9e350f67..9f16186d13b 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -46,11 +46,8 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> native_context);
void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
- void RunAllPromiseHooks(PromiseHookType type, TNode<Context> context,
- TNode<HeapObject> promise_or_capability);
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability,
- TNode<Uint32T> promiseHookFlags);
+ TNode<HeapObject> promise_or_capability);
};
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
@@ -202,7 +199,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
CAST(promise_to_resolve));
{
@@ -211,7 +208,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
promise_to_resolve, thenable, then);
}
- RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
CAST(promise_to_resolve));
RewindEnteredContext(saved_entered_context_count);
@@ -246,8 +243,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
- promise_or_capability);
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -256,8 +253,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
- promise_or_capability);
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -299,8 +296,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
- promise_or_capability);
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -309,8 +306,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
- promise_or_capability);
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -468,43 +465,12 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
saved_entered_context_count);
}
-void MicrotaskQueueBuiltinsAssembler::RunAllPromiseHooks(
- PromiseHookType type, TNode<Context> context,
- TNode<HeapObject> promise_or_capability) {
- Label hook(this, Label::kDeferred), done_hook(this);
- TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
- Branch(IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- promiseHookFlags), &hook, &done_hook);
- BIND(&hook);
- {
- switch (type) {
- case PromiseHookType::kBefore:
- RunContextPromiseHookBefore(context, promise_or_capability,
- promiseHookFlags);
- RunPromiseHook(Runtime::kPromiseHookBefore, context,
- promise_or_capability, promiseHookFlags);
- break;
- case PromiseHookType::kAfter:
- RunContextPromiseHookAfter(context, promise_or_capability,
- promiseHookFlags);
- RunPromiseHook(Runtime::kPromiseHookAfter, context,
- promise_or_capability, promiseHookFlags);
- break;
- default:
- UNREACHABLE();
- }
- Goto(&done_hook);
- }
- BIND(&done_hook);
-}
-
void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability,
- TNode<Uint32T> promiseHookFlags) {
+ TNode<HeapObject> promise_or_capability) {
Label hook(this, Label::kDeferred), done_hook(this);
- Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- promiseHookFlags), &hook, &done_hook);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
+ &done_hook);
BIND(&hook);
{
// Get to the underlying JSPromise instance.
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index b4d8372a030..c389abcc252 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -320,7 +320,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
IntPtrConstant(2));
StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER);
- value = TNode<JSArray>::UncheckedCast(array);
+ value = array;
}
StoreFixedArrayElement(values_or_entries, var_result_index.value(),
@@ -350,7 +350,7 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
TNode<JSArray> array = AllocateJSArray(array_map, result, SmiTag(size));
- return TNode<JSArray>::UncheckedCast(array);
+ return array;
}
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
@@ -1033,11 +1033,6 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &call_runtime);
- }
-
{
Comment("Argument 1 check: prototype");
GotoIf(IsNull(prototype), &prototype_valid);
@@ -1077,9 +1072,9 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&null_proto);
{
map = LoadSlowObjectWithNullPrototypeMap(native_context);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- properties = AllocateOrderedNameDictionary(
- OrderedNameDictionary::kInitialCapacity);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ properties =
+ AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity);
} else {
properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
}
@@ -1226,7 +1221,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
TNode<FixedArrayBase> parameters_and_registers =
- AllocateFixedArray(HOLEY_ELEMENTS, size);
+ AllocateFixedArray(HOLEY_ELEMENTS, size, kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), size, RootIndex::kUndefinedValue);
// TODO(cbruni): support start_offset to avoid double initialization.
@@ -1285,11 +1280,6 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
call_runtime(this, Label::kDeferred),
return_undefined(this, Label::kDeferred), if_notunique_name(this);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &call_runtime);
- }
-
TNode<Map> map = LoadMap(object);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
@@ -1372,14 +1362,8 @@ void ObjectBuiltinsAssembler::AddToDictionaryIf(
Label done(this);
GotoIfNot(condition, &done);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- CallRuntime(Runtime::kAddDictionaryProperty, context, object,
- HeapConstant(name), value);
- } else {
- Add<NameDictionary>(CAST(name_dictionary), HeapConstant(name), value,
- bailout);
- }
+ Add<PropertyDictionary>(CAST(name_dictionary), HeapConstant(name), value,
+ bailout);
Goto(&done);
BIND(&done);
@@ -1436,8 +1420,8 @@ TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
// We want to preallocate the slots for value, writable, get, set,
// enumerable and configurable - a total of 6
TNode<HeapObject> properties =
- V8_DICT_MODE_PROTOTYPES_BOOL
- ? TNode<HeapObject>(AllocateOrderedNameDictionary(6))
+ V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? TNode<HeapObject>(AllocateSwissNameDictionary(6))
: AllocateNameDictionary(6);
TNode<JSObject> js_desc = AllocateJSObjectFromMap(map, properties);
@@ -1480,12 +1464,9 @@ TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
js_descriptor = js_desc;
Goto(&return_desc);
- if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) make unconditional once OrderedNameDictionary supported.
- BIND(&bailout);
- CSA_ASSERT(this, Int32Constant(0));
- Unreachable();
- }
+ BIND(&bailout);
+ CSA_ASSERT(this, Int32Constant(0));
+ Unreachable();
}
BIND(&return_desc);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 367bc549403..16304a56a54 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -52,8 +52,8 @@ TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
BIND(&create_proxy);
TNode<HeapObject> proxy = Allocate(JSProxy::kSize);
StoreMapNoWriteBarrier(proxy, map.value());
- RootIndex empty_dict = V8_DICT_MODE_PROTOTYPES_BOOL
- ? RootIndex::kEmptyOrderedPropertyDictionary
+ RootIndex empty_dict = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? RootIndex::kEmptySwissPropertyDictionary
: RootIndex::kEmptyPropertyDictionary;
StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset, empty_dict);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 837f4d30aff..fce253b0a3a 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -10,7 +10,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
class ProxiesCodeStubAssembler : public CodeStubAssembler {
public:
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 0debd125e30..23648efb98b 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -22,8 +22,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
// Tail calls the regular expression interpreter.
// static
void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) {
@@ -325,9 +323,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = LoadSlowObjectWithNullPrototypeMap(native_context);
TNode<HeapObject> properties;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // AllocateOrderedNameDictionary always uses kAllowLargeObjectAllocation.
- properties = AllocateOrderedNameDictionary(num_properties);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ properties = AllocateSwissNameDictionary(num_properties);
} else {
properties =
AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
@@ -366,28 +363,18 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// - Receiver is extensible
// - Receiver has no interceptors
Label add_dictionary_property_slow(this, Label::kDeferred);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- CallRuntime(Runtime::kAddDictionaryProperty, context, group_object,
- name, capture);
- } else {
- Add<NameDictionary>(CAST(properties), name, capture,
- &add_dictionary_property_slow);
- }
+ Add<PropertyDictionary>(CAST(properties), name, capture,
+ &add_dictionary_property_slow);
var_i = i_plus_2;
Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length),
&maybe_build_indices, &loop);
- if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) make unconditional once OrderedNameDictionary
- // supported.
- BIND(&add_dictionary_property_slow);
- // If the dictionary needs resizing, the above Add call will jump here
- // before making any changes. This shouldn't happen because we allocated
- // the dictionary with enough space above.
- Unreachable();
- }
+ BIND(&add_dictionary_property_slow);
+ // If the dictionary needs resizing, the above Add call will jump here
+ // before making any changes. This shouldn't happen because we allocated
+ // the dictionary with enough space above.
+ Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index d46bbacadb5..dc3a4ac364e 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -18,8 +18,6 @@
namespace v8 {
namespace internal {
-using Node = compiler::Node;
-
TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
TNode<String> string, TNode<Word32T> string_instance_type) {
// Compute the effective offset of the first character.
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index dea43fcc5d8..65b1ab2f2b9 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -11,12 +11,11 @@
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
-using compiler::Node;
-
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
@@ -371,14 +370,14 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<IntPtrT> full_base = Signed(BitcastTaggedToWord(base));
TNode<Int32T> compressed_base = TruncateIntPtrToInt32(full_base);
// TODO(v8:9706): Add a way to directly use kRootRegister value.
- TNode<IntPtrT> isolate_root =
+ TNode<IntPtrT> ptr_compr_cage_base =
IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base)));
// Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
DCHECK_EQ(
isolate()->isolate_root(),
JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
// See JSTypedArray::SetOnHeapDataPtr() for details.
- offset = Unsigned(IntPtrAdd(offset, isolate_root));
+ offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base));
}
StoreJSTypedArrayBasePointer(holder, base);
@@ -434,9 +433,12 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromPreparedValue(
TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<UintPtrT> index, TNode<TValue> prepared_value,
ElementsKind elements_kind, Label* if_detached) {
- static_assert(std::is_same<TValue, UntaggedT>::value ||
- std::is_same<TValue, BigInt>::value,
- "Only UntaggedT or BigInt values are allowed");
+ static_assert(
+ std::is_same<TValue, Word32T>::value ||
+ std::is_same<TValue, Float32T>::value ||
+ std::is_same<TValue, Float64T>::value ||
+ std::is_same<TValue, BigInt>::value,
+ "Only Word32T, Float32T, Float64T or BigInt values are allowed");
// ToNumber/ToBigInt may execute JavaScript code, which could detach
// the array's buffer.
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
@@ -450,20 +452,48 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<UintPtrT> index, TNode<Object> value, ElementsKind elements_kind,
Label* if_detached) {
- if (elements_kind == BIGINT64_ELEMENTS ||
- elements_kind == BIGUINT64_ELEMENTS) {
- TNode<BigInt> prepared_value =
- PrepareValueForWriteToTypedArray<BigInt>(value, elements_kind, context);
- StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
- prepared_value, elements_kind,
- if_detached);
- } else {
- TNode<UntaggedT> prepared_value =
- PrepareValueForWriteToTypedArray<UntaggedT>(value, elements_kind,
- context);
- StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
- prepared_value, elements_kind,
- if_detached);
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS: {
+ auto prepared_value = PrepareValueForWriteToTypedArray<Word32T>(
+ value, elements_kind, context);
+ StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
+ prepared_value, elements_kind,
+ if_detached);
+ break;
+ }
+ case FLOAT32_ELEMENTS: {
+ auto prepared_value = PrepareValueForWriteToTypedArray<Float32T>(
+ value, elements_kind, context);
+ StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
+ prepared_value, elements_kind,
+ if_detached);
+ break;
+ }
+ case FLOAT64_ELEMENTS: {
+ auto prepared_value = PrepareValueForWriteToTypedArray<Float64T>(
+ value, elements_kind, context);
+ StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
+ prepared_value, elements_kind,
+ if_detached);
+ break;
+ }
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS: {
+ auto prepared_value = PrepareValueForWriteToTypedArray<BigInt>(
+ value, elements_kind, context);
+ StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
+ prepared_value, elements_kind,
+ if_detached);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 1e94dac811f..73302ba75cc 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -106,13 +106,13 @@ void Builtins::TearDown() { initialized_ = false; }
const char* Builtins::Lookup(Address pc) {
// Off-heap pc's can be looked up through binary search.
- Code maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
- if (!maybe_builtin.is_null()) return name(maybe_builtin.builtin_index());
+ Builtins::Name builtin = InstructionStream::TryLookupCode(isolate_, pc);
+ if (Builtins::IsBuiltinId(builtin)) return name(builtin);
// May be called during initialization (disassembler).
if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
- if (isolate_->heap()->builtin(i).contains(pc)) return name(i);
+ if (isolate_->heap()->builtin(i).contains(isolate_, pc)) return name(i);
}
}
return nullptr;
@@ -287,7 +287,7 @@ bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
// static
void Builtins::InitializeBuiltinEntryTable(Isolate* isolate) {
- EmbeddedData d = EmbeddedData::FromBlob();
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
Address* builtin_entry_table = isolate->builtin_entry_table();
for (int i = 0; i < builtin_count; i++) {
// TODO(jgruber,chromium:1020986): Remove the CHECK once the linked issue is
@@ -485,7 +485,9 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) {
case Builtins::kCall_ReceiverIsAny:
case Builtins::kHandleApiCall:
case Builtins::kInstantiateAsmJs:
+#if V8_ENABLE_WEBASSEMBLY
case Builtins::kGenericJSToWasmWrapper:
+#endif // V8_ENABLE_WEBASSEMBLY
// TODO(delphick): Remove this when calls to it have the trampoline inlined
// or are converted to use kCallBuiltinPointer.
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 7bb957bb66b..95f7728a3fc 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -49,6 +49,7 @@ class Builtins {
const char* Lookup(Address pc);
enum Name : int32_t {
+ kNoBuiltinId = -1,
#define DEF_ENUM(Name, ...) k##Name,
BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
DEF_ENUM)
@@ -62,8 +63,6 @@ class Builtins {
#undef EXTRACT_NAME
};
- static const int32_t kNoBuiltinId = -1;
-
static constexpr int kFirstWideBytecodeHandler =
kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
static constexpr int kFirstExtraWideBytecodeHandler =
@@ -73,7 +72,9 @@ class Builtins {
STATIC_ASSERT(kLastBytecodeHandlerPlusOne == builtin_count);
static constexpr bool IsBuiltinId(int maybe_id) {
- return 0 <= maybe_id && maybe_id < builtin_count;
+ STATIC_ASSERT(kNoBuiltinId == -1);
+ return static_cast<uint32_t>(maybe_id) <
+ static_cast<uint32_t>(builtin_count);
}
// The different builtin kinds are documented in builtins-definitions.h.
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 2bec3d86be0..b490055a19e 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -386,12 +386,6 @@ Cast<Undefined|Callable>(o: HeapObject): Undefined|Callable
return HeapObjectToCallable(o) otherwise CastError;
}
-Cast<Undefined|JSFunction>(o: HeapObject): Undefined|JSFunction
- labels CastError {
- if (o == Undefined) return Undefined;
- return Cast<JSFunction>(o) otherwise CastError;
-}
-
macro Cast<T : type extends Symbol>(o: Symbol): T labels CastError;
Cast<PublicSymbol>(s: Symbol): PublicSymbol labels CastError {
if (s.flags.is_private) goto CastError;
diff --git a/deps/v8/src/builtins/constructor.tq b/deps/v8/src/builtins/constructor.tq
index 53088c627d8..add6db03052 100644
--- a/deps/v8/src/builtins/constructor.tq
+++ b/deps/v8/src/builtins/constructor.tq
@@ -61,7 +61,7 @@ builtin CreateRegExpLiteral(implicit context: Context)(
builtin CreateShallowArrayLiteral(implicit context: Context)(
maybeFeedbackVector: Undefined|FeedbackVector, slot: TaggedIndex,
- constantElements: ArrayBoilerplateDescription): HeapObject {
+ constantElements: ArrayBoilerplateDescription, flags: Smi): HeapObject {
try {
const vector = Cast<FeedbackVector>(maybeFeedbackVector)
otherwise CallRuntime;
@@ -70,8 +70,7 @@ builtin CreateShallowArrayLiteral(implicit context: Context)(
otherwise CallRuntime;
} label CallRuntime deferred {
tail runtime::CreateArrayLiteral(
- context, maybeFeedbackVector, slot, constantElements,
- SmiConstant(kIsShallow));
+ context, maybeFeedbackVector, slot, constantElements, flags);
}
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 7e8453b544b..c1c73d00601 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -69,9 +69,19 @@ FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
FromConstexpr<uint32, constexpr int31>(i: constexpr int31): uint32 {
return Unsigned(Int32Constant(i));
}
+FromConstexpr<uint8, constexpr uint8>(i: constexpr uint8): uint8 {
+ const i: uint32 = i;
+ return %RawDownCast<uint8>(i);
+}
FromConstexpr<uint32, constexpr uint32>(i: constexpr uint32): uint32 {
return Unsigned(%FromConstexpr<int32>(i));
}
+FromConstexpr<uint64, constexpr uint64>(i: constexpr uint64): uint64 {
+ return Uint64Constant(i);
+}
+FromConstexpr<uint64, constexpr int31>(i: constexpr int31): uint64 {
+ return Convert<uint64>(Unsigned(Int32Constant(i)));
+}
FromConstexpr<uintptr, constexpr int31>(i: constexpr int31): uintptr {
return ChangeUint32ToWord(i);
}
@@ -128,9 +138,8 @@ macro Convert<To: type, From: type>(i: From): To labels Overflow {
Convert<Boolean, bool>(b: bool): Boolean {
return b ? True : False;
}
-extern macro ConvertElementsKindToInt(ElementsKind): int32;
-Convert<int32, ElementsKind>(elementsKind: ElementsKind): int32 {
- return ConvertElementsKindToInt(elementsKind);
+Convert<int32, bool>(b: bool): int32 {
+ return ChangeBoolToInt32(b);
}
Convert<Number, int32>(i: int32): Number {
return ChangeInt32ToTagged(i);
@@ -156,6 +165,9 @@ Convert<Smi, uint32>(ui: uint32): Smi {
Convert<uintptr, uint32>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
+Convert<uint64, uint32>(ui: uint32): uint64 {
+ return ChangeUint32ToUint64(ui);
+}
Convert<intptr, uint16>(ui: uint16): intptr {
return Signed(ChangeUint32ToWord(ui));
}
@@ -183,6 +195,9 @@ Convert<int32, uint31>(i: uint31): int32 {
Convert<int32, intptr>(i: intptr): int32 {
return TruncateIntPtrToInt32(i);
}
+Convert<int32, int64>(i: int64): int32 {
+ return TruncateInt64ToInt32(i);
+}
Convert<int32, Number>(n: Number): int32 {
typeswitch (n) {
case (s: Smi): {
@@ -320,3 +335,6 @@ Convert<PromiseState, int32>(s: int32): PromiseState {
Convert<ScopeFlags, Smi>(s: Smi): ScopeFlags {
return %RawDownCast<ScopeFlags>(Unsigned(SmiToInt32(s)));
}
+Convert<I8X16, Simd128>(s: Simd128): I8X16 {
+ return %RawDownCast<I8X16>(s);
+}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 03bad42d67a..44b71bed915 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -23,8 +23,11 @@
#include "src/objects/js-generator.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -563,6 +566,25 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ bind(&done);
}
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ Label done;
+ __ LoadMap(scratch1, sfi_data);
+
+ __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
+ __ j(equal, is_baseline);
+
+ __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
+ __ j(not_equal, &done, Label::kNear);
+
+ __ mov(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -645,13 +667,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline, ok;
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
__ Push(eax);
- GetSharedFunctionInfoBytecode(masm, ecx, eax);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
__ Pop(eax);
+
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
+ __ jmp(&ok);
+
+ __ bind(&is_baseline);
+ __ Pop(eax);
+ __ CmpObjectType(ecx, BASELINE_DATA_TYPE, ecx);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
+
+ __ bind(&ok);
}
// Resume (Ignition/TurboFan) generator object.
@@ -919,6 +951,31 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+// Registers optimization_state and feedback_vector must be aliased.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state,
+ XMMRegister saved_feedback_vector, Label* has_optimized_code_or_marker) {
+ Register feedback_vector = optimization_state;
+ __ RecordComment("[ Check optimization state");
+
+ // Store feedback_vector. We may need it if we need to load the optimize code
+ // slot entry.
+ __ movd(saved_feedback_vector, feedback_vector);
+ __ mov(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to be
+ // processed.
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, has_optimized_code_or_marker);
+
+ __ RecordComment("]");
+}
+
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
XMMRegister saved_feedback_vector) {
@@ -964,10 +1021,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
- Label compile_lazy;
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, ecx, eax);
+
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
+
+ Label compile_lazy;
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
__ j(not_equal, &compile_lazy);
@@ -985,20 +1045,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the optimization state from the feedback vector and re-use the
// register.
- Register optimization_state = ecx;
- // Store feedback_vector. We may need it if we need to load the optimze code
- // slot entry.
- __ movd(xmm1, feedback_vector);
- __ mov(optimization_state,
- FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to be
- // processed.
Label has_optimized_code_or_marker;
- __ test(
- optimization_state,
- Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ j(not_zero, &has_optimized_code_or_marker);
+ Register optimization_state = ecx;
+ LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
+ &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1183,6 +1233,40 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movd(eax, xmm0);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ bind(&is_baseline);
+ {
+ __ movd(xmm2, ecx); // Save baseline data.
+ // Load the feedback vector from the closure.
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ LoadMap(eax, feedback_vector);
+ __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
+ __ j(not_equal, &install_baseline_code);
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, xmm1, &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ movd(ecx, xmm2);
+ __ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ push(edx); // Spill.
+ ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, edx);
+ __ pop(edx);
+ __ movd(eax, xmm0); // Recover argument count.
+ __ JumpCodeObject(ecx);
+
+ __ bind(&install_baseline_code);
+ __ movd(eax, xmm0); // Recover argument count.
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ int3(); // Should not return.
@@ -1555,6 +1639,150 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register arg_count = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+
+ // Save argument count and bytecode array.
+ XMMRegister saved_arg_count = xmm0;
+ XMMRegister saved_bytecode_array = xmm1;
+ XMMRegister saved_frame_size = xmm2;
+ XMMRegister saved_feedback_vector = xmm3;
+ __ movd(saved_arg_count, arg_count);
+ __ movd(saved_frame_size, frame_size);
+
+ // Use the arg count (eax) as the scratch register.
+ Register scratch = arg_count;
+
+ // Load the feedback vector from the closure.
+ Register feedback_vector = ecx;
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ if (__ emit_debug_code()) {
+ __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
+ __ Assert(equal, AbortReason::kExpectedFeedbackVector);
+ }
+
+ // Load the optimization state from the feedback vector and re-use the
+ // register.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = ecx;
+ LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ saved_feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the feedback vector and increment the invocation count.
+ __ movd(feedback_vector, saved_feedback_vector);
+ __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
+
+ XMMRegister return_address = xmm4;
+ __ RecordComment("[ Frame Setup");
+ // Save the return address, so that we can push it to the end of the newly
+ // set-up frame once we're done setting it up.
+ __ PopReturnAddressTo(return_address, scratch);
+ // The bytecode array was pushed to the stack by the caller.
+ __ Pop(saved_bytecode_array, scratch);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::BASELINE);
+
+ __ Push(descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
+ // context.
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+ __ Push(callee_js_function); // Callee's JS function.
+ __ Push(saved_arg_count, scratch); // Push actual argument count.
+
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecode_array = scratch;
+ __ movd(bytecode_array, saved_bytecode_array);
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov_w(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
+ Immediate(0));
+ __ Push(bytecode_array);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ __ Push(saved_feedback_vector, scratch);
+ __ RecordComment("]");
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ //
+ // TODO(v8:11429): Backport this folded check to the
+ // InterpreterEntryTrampoline.
+ __ movd(frame_size, saved_frame_size);
+ __ Move(scratch, esp);
+ DCHECK_NE(frame_size, kJavaScriptCallNewTargetRegister);
+ __ sub(scratch, frame_size);
+ __ CompareStackLimit(scratch, StackLimitKind::kInterruptStackLimit);
+ __ j(below, &call_stack_guard);
+ __ RecordComment("]");
+ }
+
+ // Push the return address back onto the stack for return.
+ __ PushReturnAddressFrom(return_address, scratch);
+ // Return to caller pushed pc, without any frame teardown.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ __ RecordComment("[ Optimized marker check");
+ // Drop the return address and bytecode array, rebalancing the return stack
+ // buffer by using JumpMode::kPushAndReturn. We can't leave the slot and
+ // overwrite it on return since we may do a runtime call along the way that
+ // requires the stack to only contain valid frames.
+ __ Drop(2);
+ __ movd(arg_count, saved_arg_count); // Restore actual argument count.
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ saved_feedback_vector);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ __ RecordComment("[ Stack/interrupt call");
+ {
+ // Push the baseline code return address now, as if it had been pushed by
+ // the call to this builtin.
+ __ PushReturnAddressFrom(return_address, scratch);
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap, 1);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+
+ // Return to caller pushed pc, without any frame teardown.
+ __ Ret();
+ __ RecordComment("]");
+ }
+}
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
@@ -1642,6 +1870,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ ret(1 * kSystemPointerSize); // Remove eax.
}
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ TailCallOptimizedCodeSlot(masm, ecx);
+}
+
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2255,6 +2487,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
@@ -2503,7 +2736,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), entry_address);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -2517,9 +2760,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ leave();
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ leave();
+ }
// Load deoptimization data from the code object.
__ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -2533,13 +2778,20 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Compute the target address = code_obj + header_size + osr_offset
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
- // Overwrite the return address on the stack.
- __ mov(Operand(esp, 0), eax);
+ Generate_OSREntry(masm, eax);
+}
- // And "return" to the OSR entry point of the function.
- __ ret(0);
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
}
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, false);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2640,6 +2892,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ ret(0);
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2876,11 +3134,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ ret(0);
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
@@ -3828,6 +4081,147 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the current or next (in execution order) bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ push(kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = eax;
+ __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
+
+ // Load the feedback vector.
+ Register feedback_vector = ecx;
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE,
+ kInterpreterBytecodeOffsetRegister);
+ __ j(not_equal, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ // Replace BytecodeOffset with the feedback vector.
+ __ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ feedback_vector);
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ Register code_obj = esi;
+ __ mov(code_obj,
+ FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = ecx;
+ __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+ }
+
+ __ sub(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ mov(kInterpreterBytecodeArrayRegister,
+ MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, eax);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), code_obj);
+ __ mov(Operand(esp, 1 * kSystemPointerSize),
+ kInterpreterBytecodeOffsetRegister);
+ __ mov(Operand(esp, 2 * kSystemPointerSize),
+ kInterpreterBytecodeArrayRegister);
+ __ CallCFunction(get_baseline_pc, 3);
+ }
+ __ lea(code_obj,
+ FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
+ __ pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset),
+ Immediate(0));
+ Generate_OSREntry(masm, code_obj);
+ } else {
+ __ jmp(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, Immediate(0));
+ if (next_bytecode) {
+ __ LoadAddress(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ jmp(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ jmp(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 2d06ebb9293..05993ea6d77 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -100,6 +100,7 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny):
transitioning builtin CallIteratorWithFeedback(
context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
feedback: Undefined|FeedbackVector): JSAny {
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
ic::CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 235ca01f98e..1d8e80bdf87 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -23,8 +23,11 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -2116,7 +2119,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2276,7 +2279,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2288,6 +2291,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2357,6 +2361,11 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2623,11 +2632,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -3696,6 +3700,22 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2800112.
+ __ break_(0xCC);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 55c8eb70742..c029188f146 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -23,8 +23,11 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -2183,7 +2186,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2342,7 +2345,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2354,6 +2357,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
@@ -2449,6 +2453,11 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2716,11 +2725,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -3283,6 +3287,22 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2800112.
+ __ break_(0xCC);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
index f0e9e1fb15d..53065ded5f1 100644
--- a/deps/v8/src/builtins/object.tq
+++ b/deps/v8/src/builtins/object.tq
@@ -94,14 +94,14 @@ transitioning builtin CreateObjectWithoutProperties(implicit context: Context)(
prototype: JSAny): JSAny {
try {
let map: Map;
- let properties: NameDictionary|OrderedNameDictionary|EmptyFixedArray;
+ let properties: NameDictionary|SwissNameDictionary|EmptyFixedArray;
typeswitch (prototype) {
case (Null): {
map = *NativeContextSlot(
ContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP);
if (kDictModePrototypes) {
- properties = AllocateOrderedNameDictionary(
- kOrderedNameDictionaryInitialCapacity);
+ properties =
+ AllocateSwissNameDictionary(kSwissNameDictionaryInitialCapacity);
} else {
properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
}
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 8fe4b004b40..bc467c9ff9f 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -21,8 +21,11 @@
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -2263,7 +2266,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver the (original) target.
__ StoreReceiver(r4, r3, r8);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(r4, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2379,7 +2382,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(r4, r3, r8);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(r4, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2391,6 +2394,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2503,6 +2507,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2800,11 +2810,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -3427,6 +3432,22 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ bkpt(0);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ bkpt(0);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2800112.
+ __ bkpt(0);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 0e435afad9b..b7a1b571e64 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -196,8 +196,6 @@ FulfillPromise(implicit context: Context)(
// Assert: The value of promise.[[PromiseState]] is "pending".
assert(promise.Status() == PromiseState::kPending);
- RunContextPromiseHookResolve(promise);
-
// 2. Let reactions be promise.[[PromiseFulfillReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
@@ -216,24 +214,17 @@ FulfillPromise(implicit context: Context)(
}
extern macro PromiseBuiltinsAssembler::
- IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
-
-extern macro PromiseBuiltinsAssembler::
- IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(uint32):
- bool;
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
// https://tc39.es/ecma262/#sec-rejectpromise
transitioning builtin
RejectPromise(implicit context: Context)(
promise: JSPromise, reason: JSAny, debugEvent: Boolean): JSAny {
- const promiseHookFlags = PromiseHookFlags();
-
// If promise hook is enabled or the debugger is active, let
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- promiseHookFlags) ||
+ if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
!promise.HasHandler()) {
// 7. If promise.[[PromiseIsHandled]] is false, perform
// HostPromiseRejectionTracker(promise, "reject").
@@ -242,8 +233,6 @@ RejectPromise(implicit context: Context)(
return runtime::RejectPromise(promise, reason, debugEvent);
}
- RunContextPromiseHookResolve(promise, promiseHookFlags);
-
// 2. Let reactions be promise.[[PromiseRejectReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
diff --git a/deps/v8/src/builtins/promise-all-element-closure.tq b/deps/v8/src/builtins/promise-all-element-closure.tq
index bf07ff62274..16e91dae06b 100644
--- a/deps/v8/src/builtins/promise-all-element-closure.tq
+++ b/deps/v8/src/builtins/promise-all-element-closure.tq
@@ -60,7 +60,7 @@ struct PromiseAllSettledWrapResultAsRejectedFunctor {
}
}
-extern macro LoadJSReceiverIdentityHash(Object): intptr labels IfNoHash;
+extern macro LoadJSReceiverIdentityHash(JSReceiver): intptr labels IfNoHash;
type PromiseAllResolveElementContext extends FunctionContext;
extern enum PromiseAllResolveElementContextSlots extends intptr
diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq
index 5ab64a167d3..41dee8b9e76 100644
--- a/deps/v8/src/builtins/promise-all.tq
+++ b/deps/v8/src/builtins/promise-all.tq
@@ -231,7 +231,8 @@ Reject(Object) {
// the PromiseReaction (aka we can pass undefined to
// PerformPromiseThen), since this is only necessary for DevTools and
// PromiseHooks.
- if (promiseResolveFunction != Undefined || NeedsAnyPromiseHooks() ||
+ if (promiseResolveFunction != Undefined ||
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
!IsPromiseThenLookupChainIntact(
nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq
index b5f7292a77c..3c5a5e560d4 100644
--- a/deps/v8/src/builtins/promise-constructor.tq
+++ b/deps/v8/src/builtins/promise-constructor.tq
@@ -40,8 +40,7 @@ extern macro ConstructorBuiltinsAssembler::FastNewObject(
Context, JSFunction, JSReceiver): JSObject;
extern macro
-PromiseBuiltinsAssembler::IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
- uint32): bool;
+PromiseBuiltinsAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate(): bool;
// https://tc39.es/ecma262/#sec-promise-executor
transitioning javascript builtin
@@ -74,7 +73,9 @@ PromiseConstructor(
result = UnsafeCast<JSPromise>(
FastNewObject(context, promiseFun, UnsafeCast<JSReceiver>(newTarget)));
PromiseInit(result);
- RunAnyPromiseHookInit(result, Undefined);
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(result, Undefined);
+ }
}
const isDebugActive = IsDebugActive();
diff --git a/deps/v8/src/builtins/promise-jobs.tq b/deps/v8/src/builtins/promise-jobs.tq
index 77d2e7cf9c4..80e98f373b9 100644
--- a/deps/v8/src/builtins/promise-jobs.tq
+++ b/deps/v8/src/builtins/promise-jobs.tq
@@ -7,7 +7,6 @@
// https://tc39.es/ecma262/#sec-promise-jobs
namespace promise {
extern macro IsJSPromiseMap(Map): bool;
-extern macro NeedsAnyPromiseHooks(): bool;
// https://tc39.es/ecma262/#sec-promiseresolvethenablejob
transitioning builtin
@@ -26,7 +25,7 @@ PromiseResolveThenableJob(implicit context: Context)(
const promiseThen = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
const thenableMap = thenable.map;
if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) &&
- !NeedsAnyPromiseHooks() &&
+ !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() &&
IsPromiseSpeciesLookupChainIntact(nativeContext, thenableMap)) {
// We know that the {thenable} is a JSPromise, which doesn't require
// any special treatment and that {then} corresponds to the initial
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 0eae717b3fc..67e5e38687d 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -8,9 +8,6 @@
namespace runtime {
extern transitioning runtime
AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
-
-extern transitioning runtime
-ReportMessageFromMicrotask(implicit context: Context)(JSAny): JSAny;
}
// Unsafe functions that should be used very carefully.
@@ -20,12 +17,6 @@ extern macro PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(JSPromise): void;
extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
}
-extern macro
-PromiseBuiltinsAssembler::IsContextPromiseHookEnabled(uint32): bool;
-
-extern macro
-PromiseBuiltinsAssembler::PromiseHookFlags(): uint32;
-
namespace promise {
extern macro IsFunctionWithPrototypeSlotMap(Map): bool;
@@ -99,109 +90,6 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
};
}
-@export
-transitioning macro RunContextPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
- const maybeHook = *NativeContextSlot(
- ContextSlot::PROMISE_HOOK_INIT_FUNCTION_INDEX);
- const hook = Cast<Callable>(maybeHook) otherwise return;
- const parentObject = Is<JSPromise>(parent) ? Cast<JSPromise>(parent)
- otherwise unreachable: Undefined;
-
- try {
- Call(context, hook, Undefined, promise, parentObject);
- } catch (e) {
- runtime::ReportMessageFromMicrotask(e);
- }
-}
-
-@export
-transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
- PromiseHookFlags());
-}
-
-@export
-transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise, flags: uint32) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise, flags);
-}
-
-@export
-transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
- PromiseHookFlags());
-}
-
-@export
-transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
- flags);
-}
-
-@export
-transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
- PromiseHookFlags());
-}
-
-@export
-transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
- RunContextPromiseHook(
- ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
- flags);
-}
-
-transitioning macro RunContextPromiseHook(implicit context: Context)(
- slot: Slot<NativeContext, Undefined|Callable>,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
- if (!IsContextPromiseHookEnabled(flags)) return;
- const maybeHook = *NativeContextSlot(slot);
- const hook = Cast<Callable>(maybeHook) otherwise return;
-
- let promise: JSPromise;
- typeswitch (promiseOrCapability) {
- case (jspromise: JSPromise): {
- promise = jspromise;
- }
- case (capability: PromiseCapability): {
- promise = Cast<JSPromise>(capability.promise) otherwise return;
- }
- case (Undefined): {
- return;
- }
- }
-
- try {
- Call(context, hook, Undefined, promise);
- } catch (e) {
- runtime::ReportMessageFromMicrotask(e);
- }
-}
-
-transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
- const promiseHookFlags = PromiseHookFlags();
- // Fast return if no hooks are set.
- if (promiseHookFlags == 0) return;
- if (IsContextPromiseHookEnabled(promiseHookFlags)) {
- RunContextPromiseHookInit(promise, parent);
- }
- if (IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(promiseHookFlags)) {
- runtime::PromiseHookInit(promise, parent);
- }
-}
-
// These allocate and initialize a promise with pending state and
// undefined fields.
//
@@ -212,7 +100,9 @@ transitioning macro NewJSPromise(implicit context: Context)(parent: Object):
JSPromise {
const instance = InnerNewJSPromise();
PromiseInit(instance);
- RunAnyPromiseHookInit(instance, parent);
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(instance, parent);
+ }
return instance;
}
@@ -234,7 +124,10 @@ transitioning macro NewJSPromise(implicit context: Context)(
instance.reactions_or_result = result;
instance.SetStatus(status);
promise_internal::ZeroOutEmbedderOffsets(instance);
- RunAnyPromiseHookInit(instance, Undefined);
+
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(instance, Undefined);
+ }
return instance;
}
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index fa3d19411fc..e933dfbae0a 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -30,8 +30,7 @@ transitioning builtin
PromiseResolve(implicit context: Context)(
constructor: JSReceiver, value: JSAny): JSAny {
const nativeContext = LoadNativeContext(context);
- const promiseFun = *NativeContextSlot(
- nativeContext, ContextSlot::PROMISE_FUNCTION_INDEX);
+ const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
try {
// Check if {value} is a JSPromise.
const value = Cast<JSPromise>(value) otherwise NeedToAllocate;
@@ -41,8 +40,7 @@ PromiseResolve(implicit context: Context)(
// intact, as that guards the lookup path for "constructor" on
// JSPromise instances which have the (initial) Promise.prototype.
const promisePrototype =
- *NativeContextSlot(
- nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
if (value.map.prototype != promisePrototype) {
@@ -99,7 +97,7 @@ ResolvePromise(implicit context: Context)(
// We also let the runtime handle it if promise == resolution.
// We can use pointer comparison here, since the {promise} is guaranteed
// to be a JSPromise inside this function and thus is reference comparable.
- if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
TaggedEqual(promise, resolution))
deferred {
return runtime::ResolvePromise(promise, resolution);
@@ -141,8 +139,7 @@ ResolvePromise(implicit context: Context)(
assert(IsJSReceiverMap(resolutionMap));
assert(!IsPromiseThenProtectorCellInvalid());
if (resolutionMap ==
- *NativeContextSlot(
- nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
+ *NativeContextSlot(ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
return FulfillPromise(promise, resolution);
} else {
goto Slow;
@@ -150,11 +147,10 @@ ResolvePromise(implicit context: Context)(
}
const promisePrototype =
- *NativeContextSlot(
- nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
if (resolutionMap.prototype == promisePrototype) {
// The {resolution} is a native Promise in this case.
- then = *NativeContextSlot(nativeContext, ContextSlot::PROMISE_THEN_INDEX);
+ then = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
goto Enqueue;
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index 685f575598f..04907f5268a 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -2183,7 +2183,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2342,7 +2342,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(a1, a0, kScratchReg);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 95dbb9a9b60..7711af6e901 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -21,8 +21,11 @@
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -2313,7 +2316,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver the (original) target.
__ StoreReceiver(r3, r2, r7);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
+ __ LoadNativeContextSlot(r3, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2428,7 +2431,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(r3, r2, r7);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
+ __ LoadNativeContextSlot(r3, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2440,6 +2443,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2541,6 +2545,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2830,11 +2840,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
- // TODO(v8:10701): Implement for this platform.
- __ Trap();
-}
-
namespace {
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -3426,6 +3431,22 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ bkpt(0);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2695591.
+ __ bkpt(0);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ // Implement on this platform, https://crrev.com/c/2800112.
+ __ bkpt(0);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index c3d7ec717a6..d9f05f55331 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -304,6 +304,15 @@ extern macro StaticAssert(bool, constexpr string);
// field x from object o is `(&o.x).length`.
intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
+// If field x is defined as optional, then &o.x returns a reference to the field
+// or crashes the program (unreachable) if the field is not present. Usually
+// that's the most convenient behavior, but in rare cases such as the
+// implementation of the dot operator, we may instead need to get a Slice to the
+// optional field, which is either length zero or one depending on whether the
+// field is present. This intrinsic provides Slices for both indexed fields
+// (equivalent to &o.x) and optional fields.
+intrinsic %FieldSlice<T: type>(o: T, f: constexpr string);
+
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index 970fbd08629..05a15162040 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -11,6 +11,10 @@ extern runtime WasmTableInit(
Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
extern runtime WasmTableCopy(
Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
+extern runtime WasmTableFill(
+ Context, WasmInstanceObject, Smi, Smi, Object, Smi): JSAny;
+extern runtime WasmTableGrow(
+ Context, WasmInstanceObject, Smi, Object, Smi): Smi;
extern runtime WasmFunctionTableGet(
Context, WasmInstanceObject, Smi, Smi): JSAny;
extern runtime WasmFunctionTableSet(
@@ -39,7 +43,10 @@ extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject;
}
namespace wasm {
-const kFuncTableType: constexpr int31 generates 'wasm::HeapType::kFunc';
+const kExternTableType: constexpr int31
+ generates 'wasm::kWasmExternRef.raw_bit_field()';
+const kExternNonNullTableType: constexpr int31
+ generates 'wasm::kWasmExternNonNullableRef.raw_bit_field()';
extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject;
@@ -113,6 +120,34 @@ builtin WasmTableCopy(
}
}
+builtin WasmTableFill(
+ table: Smi, startRaw: uint32, countRaw: uint32, value: Object): JSAny {
+ try {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const start: Smi =
+ Convert<PositiveSmi>(startRaw) otherwise TableOutOfBounds;
+ const count: Smi =
+ Convert<PositiveSmi>(countRaw) otherwise TableOutOfBounds;
+ tail runtime::WasmTableFill(
+ LoadContextFromInstance(instance), instance, table, start, value,
+ count);
+ } label TableOutOfBounds deferred {
+ tail ThrowWasmTrapTableOutOfBounds();
+ }
+}
+
+builtin WasmTableGrow(table: Smi, deltaRaw: uint32, value: Object): Smi {
+ try {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const delta: Smi =
+ Convert<PositiveSmi>(deltaRaw) otherwise TableOutOfBounds;
+ tail runtime::WasmTableGrow(
+ LoadContextFromInstance(instance), instance, table, value, delta);
+ } label TableOutOfBounds deferred {
+ return -1;
+ }
+}
+
builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
@@ -159,8 +194,12 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
// Fall back to the runtime to set funcrefs, since we have to update
// function dispatch tables.
+ // TODO(7748): Update this if further table types are supported.
const tableType: Smi = table.raw_type;
- if (tableType == SmiConstant(kFuncTableType)) goto CallRuntime;
+ if (tableType != SmiConstant(kExternTableType) &&
+ tableType != SmiConstant(kExternNonNullTableType)) {
+ goto CallRuntime;
+ }
const entriesCount: intptr = Convert<intptr, Smi>(table.current_length);
if (entryIndex >= entriesCount) goto IndexOutOfRange;
@@ -373,15 +412,13 @@ transitioning builtin WasmGetOwnProperty(implicit context: Context)(
try {
TryHasOwnProperty(
receiver, receiver.map, receiver.instanceType, uniqueName)
- otherwise Found, NotFound, Bailout;
+ otherwise Found, NotFound, NotFound;
} label Found {
tail GetPropertyWithReceiver(
receiver, uniqueName, receiver, SmiConstant(kReturnUndefined));
}
} label NotFound deferred {
return Undefined;
- } label Bailout deferred {
- unreachable;
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 58a897821d6..5b5e964ef95 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -26,11 +26,14 @@
#include "src/objects/js-generator.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -377,6 +380,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in arg_reg_1.
__ movq(kRootRegister, arg_reg_1);
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // Initialize the pointer cage base register.
+ // TODO(syg): Actually make a cage.
+ __ movq(kPointerCageBaseRegister, arg_reg_1);
+#endif
}
// Save copies of the top frame descriptor on the stack.
@@ -1613,7 +1622,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
- Register feedback_vector = rbx;
+ Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
@@ -1633,7 +1642,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
- Register return_address = r12;
+ Register return_address = r15;
__ RecordComment("[ Frame Setup");
// Save the return address, so that we can push it to the end of the newly
@@ -1681,6 +1690,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
@@ -1690,9 +1701,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
//
// TODO(v8:11429): Backport this folded check to the
// InterpreterEntryTrampoline.
- Register frame_size = r11;
- __ movzxwl(frame_size,
- FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
__ Move(kScratchRegister, rsp);
DCHECK_NE(frame_size, new_target);
__ subq(kScratchRegister, frame_size);
@@ -1731,7 +1739,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(new_target);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap, 1);
__ Pop(new_target);
}
@@ -2498,7 +2508,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ movq(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
+ __ LoadNativeContextSlot(rdi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2608,7 +2618,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ movq(args.GetReceiverOperand(), rdi);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
+ __ LoadNativeContextSlot(rdi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2621,6 +2631,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
+ // Overwrite the return address on the stack.
+ __ movq(StackOperandForReturnAddress(0), entry_address);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2653,12 +2672,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Compute the target address = code_obj + header_size + osr_offset
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
- // Overwrite the return address on the stack.
- __ movq(StackOperandForReturnAddress(0), rax);
-
- // And "return" to the OSR entry point of the function.
- __ ret(0);
+ Generate_OSREntry(masm, rax);
}
+
} // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
@@ -2669,6 +2685,7 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, false);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
@@ -2766,244 +2783,6 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ ret(0);
}
-void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
- //
- // If argv_mode == kArgvInRegister:
- // r15: pointer to the first argument
-
-#ifdef V8_TARGET_OS_WIN
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
- // stack to be aligned to 16 bytes. It only allows a single-word to be
- // returned in register rax. Larger return sizes must be written to an address
- // passed as a hidden first argument.
- const Register kCCallArg0 = rcx;
- const Register kCCallArg1 = rdx;
- const Register kCCallArg2 = r8;
- const Register kCCallArg3 = r9;
- const int kArgExtraStackSpace = 2;
- const int kMaxRegisterResultSize = 1;
-#else
- // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
- // are returned in rax, and a struct of two pointers are returned in rax+rdx.
- // Larger return sizes must be written to an address passed as a hidden first
- // argument.
- const Register kCCallArg0 = rdi;
- const Register kCCallArg1 = rsi;
- const Register kCCallArg2 = rdx;
- const Register kCCallArg3 = rcx;
- const int kArgExtraStackSpace = 0;
- const int kMaxRegisterResultSize = 2;
-#endif // V8_TARGET_OS_WIN
-
- // Enter the exit frame that transitions from JavaScript to C++.
- int arg_stack_space =
- kArgExtraStackSpace +
- (result_size <= kMaxRegisterResultSize ? 0 : result_size);
- if (argv_mode == kArgvInRegister) {
- DCHECK(save_doubles == kDontSaveFPRegs);
- DCHECK(!builtin_exit_frame);
- __ EnterApiExitFrame(arg_stack_space);
- // Move argc into r14 (argv is already in r15).
- __ movq(r14, rax);
- } else {
- __ EnterExitFrame(
- arg_stack_space, save_doubles == kSaveFPRegs,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
- }
-
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- // Call C function. The arguments object will be created by stubs declared by
- // DECLARE_RUNTIME_FUNCTION().
- if (result_size <= kMaxRegisterResultSize) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax), or a register pair (rax, rdx).
- __ movq(kCCallArg0, r14); // argc.
- __ movq(kCCallArg1, r15); // argv.
- __ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
- } else {
- DCHECK_LE(result_size, 2);
- // Pass a pointer to the result location as the first argument.
- __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
- // Pass a pointer to the Arguments object as the second argument.
- __ movq(kCCallArg1, r14); // argc.
- __ movq(kCCallArg2, r15); // argv.
- __ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
- }
- __ call(rbx);
-
- if (result_size > kMaxRegisterResultSize) {
- // Read result values stored on stack. Result is stored
- // above the the two Arguments object slots on Win64.
- DCHECK_LE(result_size, 2);
- __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
- __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
- }
- // Result is in rax or rdx:rax - do not destroy these registers!
-
- // Check result for exception sentinel.
- Label exception_returned;
- __ CompareRoot(rax, RootIndex::kException);
- __ j(equal, &exception_returned);
-
- // Check that there is no pending exception, otherwise we
- // should have returned the exception sentinel.
- if (FLAG_debug_code) {
- Label okay;
- __ LoadRoot(r14, RootIndex::kTheHoleValue);
- ExternalReference pending_exception_address = ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, masm->isolate());
- Operand pending_exception_operand =
- masm->ExternalReferenceAsOperand(pending_exception_address);
- __ cmp_tagged(r14, pending_exception_operand);
- __ j(equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- }
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
- __ ret(0);
-
- // Handling of exception.
- __ bind(&exception_returned);
-
- ExternalReference pending_handler_context_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
- ExternalReference pending_handler_entrypoint_address =
- ExternalReference::Create(
- IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
- ExternalReference pending_handler_fp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
- ExternalReference pending_handler_sp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
-
- // Ask the runtime for help to determine the handler. This will set rax to
- // contain the current pending exception, don't clobber it.
- ExternalReference find_handler =
- ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ movq(arg_reg_1, Immediate(0)); // argc.
- __ movq(arg_reg_2, Immediate(0)); // argv.
- __ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
- __ PrepareCallCFunction(3);
- __ CallCFunction(find_handler, 3);
- }
- // Retrieve the handler context, SP and FP.
- __ movq(rsi,
- masm->ExternalReferenceAsOperand(pending_handler_context_address));
- __ movq(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
- __ movq(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
-
- // If the handler is a JS frame, restore the context to the frame. Note that
- // the context will be set to (rsi == 0) for non-JS frames.
- Label skip;
- __ testq(rsi, rsi);
- __ j(zero, &skip, Label::kNear);
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- __ bind(&skip);
-
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
- // Clear c_entry_fp, like we do in `LeaveExitFrame`.
- ExternalReference c_entry_fp_address = ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate());
- Operand c_entry_fp_operand =
- masm->ExternalReferenceAsOperand(c_entry_fp_address);
- __ movq(c_entry_fp_operand, Immediate(0));
-
- // Compute the handler entry address and jump to it.
- __ movq(rdi,
- masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
- __ jmp(rdi);
-}
-
-void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
- Label check_negative, process_64_bits, done;
-
- // Account for return address and saved regs.
- const int kArgumentOffset = 4 * kSystemPointerSize;
-
- MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
- MemOperand exponent_operand(
- MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
-
- // The result is returned on the stack.
- MemOperand return_operand = mantissa_operand;
-
- Register scratch1 = rbx;
-
- // Since we must use rcx for shifts below, use some other register (rax)
- // to calculate the result if ecx is the requested return register.
- Register result_reg = rax;
- // Save ecx if it isn't the return register and therefore volatile, or if it
- // is the return register, then save the temp register we use in its stead
- // for the result.
- Register save_reg = rax;
- __ pushq(rcx);
- __ pushq(scratch1);
- __ pushq(save_reg);
-
- __ movl(scratch1, mantissa_operand);
- __ Movsd(kScratchDoubleReg, mantissa_operand);
- __ movl(rcx, exponent_operand);
-
- __ andl(rcx, Immediate(HeapNumber::kExponentMask));
- __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
- __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
- __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
- __ j(below, &process_64_bits, Label::kNear);
-
- // Result is entirely in lower 32-bits of mantissa
- int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
- __ subl(rcx, Immediate(delta));
- __ xorl(result_reg, result_reg);
- __ cmpl(rcx, Immediate(31));
- __ j(above, &done, Label::kNear);
- __ shll_cl(scratch1);
- __ jmp(&check_negative, Label::kNear);
-
- __ bind(&process_64_bits);
- __ Cvttsd2siq(result_reg, kScratchDoubleReg);
- __ jmp(&done, Label::kNear);
-
- // If the double was negative, negate the integer result.
- __ bind(&check_negative);
- __ movl(result_reg, scratch1);
- __ negl(result_reg);
- __ cmpl(exponent_operand, Immediate(0));
- __ cmovl(greater, result_reg, scratch1);
-
- // Restore registers
- __ bind(&done);
- __ movl(return_operand, result_reg);
- __ popq(save_reg);
- __ popq(scratch1);
- __ popq(rcx);
- __ ret(0);
-}
-
namespace {
// Helper functions for the GenericJSToWasmWrapper.
void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
@@ -3212,7 +2991,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
kLastSpillOffset - kSystemPointerSize;
// For Integer section.
// Set the current_int_param_slot to point to the start of the section.
- Register current_int_param_slot = r14;
+ Register current_int_param_slot = r10;
__ leaq(current_int_param_slot, MemOperand(rsp, -kSystemPointerSize));
Register params_size = param_count;
param_count = no_reg;
@@ -3326,7 +3105,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r8 : start_int_section
// -- rdi : start_float_section
- // -- r14 : current_int_param_slot
+ // -- r10 : current_int_param_slot
// -- r15 : current_float_param_slot
// -- r11 : valuetypes_array_ptr
// -- r12 : valuetype
@@ -3748,6 +3527,246 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ jmp(&compile_wrapper_done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
+ SaveFPRegsMode save_doubles, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+ //
+ // If argv_mode == kArgvInRegister:
+ // r15: pointer to the first argument
+
+#ifdef V8_TARGET_OS_WIN
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
+ // stack to be aligned to 16 bytes. It only allows a single-word to be
+ // returned in register rax. Larger return sizes must be written to an address
+ // passed as a hidden first argument.
+ const Register kCCallArg0 = rcx;
+ const Register kCCallArg1 = rdx;
+ const Register kCCallArg2 = r8;
+ const Register kCCallArg3 = r9;
+ const int kArgExtraStackSpace = 2;
+ const int kMaxRegisterResultSize = 1;
+#else
+ // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
+ // are returned in rax, and a struct of two pointers are returned in rax+rdx.
+ // Larger return sizes must be written to an address passed as a hidden first
+ // argument.
+ const Register kCCallArg0 = rdi;
+ const Register kCCallArg1 = rsi;
+ const Register kCCallArg2 = rdx;
+ const Register kCCallArg3 = rcx;
+ const int kArgExtraStackSpace = 0;
+ const int kMaxRegisterResultSize = 2;
+#endif // V8_TARGET_OS_WIN
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ int arg_stack_space =
+ kArgExtraStackSpace +
+ (result_size <= kMaxRegisterResultSize ? 0 : result_size);
+ if (argv_mode == kArgvInRegister) {
+ DCHECK(save_doubles == kDontSaveFPRegs);
+ DCHECK(!builtin_exit_frame);
+ __ EnterApiExitFrame(arg_stack_space);
+ // Move argc into r12 (argv is already in r15).
+ __ movq(r12, rax);
+ } else {
+ __ EnterExitFrame(
+ arg_stack_space, save_doubles == kSaveFPRegs,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ }
+
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r12: number of arguments including receiver (C callee-saved).
+ // r15: argv pointer (C callee-saved).
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ // Call C function. The arguments object will be created by stubs declared by
+ // DECLARE_RUNTIME_FUNCTION().
+ if (result_size <= kMaxRegisterResultSize) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax), or a register pair (rax, rdx).
+ __ movq(kCCallArg0, r12); // argc.
+ __ movq(kCCallArg1, r15); // argv.
+ __ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
+ } else {
+ DCHECK_LE(result_size, 2);
+ // Pass a pointer to the result location as the first argument.
+ __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ movq(kCCallArg1, r12); // argc.
+ __ movq(kCCallArg2, r15); // argv.
+ __ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
+ }
+ __ call(rbx);
+
+ if (result_size > kMaxRegisterResultSize) {
+ // Read result values stored on stack. Result is stored
+ // above the the two Arguments object slots on Win64.
+ DCHECK_LE(result_size, 2);
+ __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
+ __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
+ }
+ // Result is in rax or rdx:rax - do not destroy these registers!
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(rax, RootIndex::kException);
+ __ j(equal, &exception_returned);
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(kScratchRegister, RootIndex::kTheHoleValue);
+ ExternalReference pending_exception_address = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ Operand pending_exception_operand =
+ masm->ExternalReferenceAsOperand(pending_exception_address);
+ __ cmp_tagged(kScratchRegister, pending_exception_operand);
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
+ __ ret(0);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
+ ExternalReference pending_handler_entrypoint_address =
+ ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ExternalReference pending_handler_fp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ExternalReference pending_handler_sp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+
+ // Ask the runtime for help to determine the handler. This will set rax to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler =
+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ movq(arg_reg_1, Immediate(0)); // argc.
+ __ movq(arg_reg_2, Immediate(0)); // argv.
+ __ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
+ __ PrepareCallCFunction(3);
+ __ CallCFunction(find_handler, 3);
+ }
+ // Retrieve the handler context, SP and FP.
+ __ movq(rsi,
+ masm->ExternalReferenceAsOperand(pending_handler_context_address));
+ __ movq(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
+ __ movq(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (rsi == 0) for non-JS frames.
+ Label skip;
+ __ testq(rsi, rsi);
+ __ j(zero, &skip, Label::kNear);
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ bind(&skip);
+
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ ExternalReference c_entry_fp_address = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ Operand c_entry_fp_operand =
+ masm->ExternalReferenceAsOperand(c_entry_fp_address);
+ __ movq(c_entry_fp_operand, Immediate(0));
+
+ // Compute the handler entry address and jump to it.
+ __ movq(rdi,
+ masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
+ __ jmp(rdi);
+}
+
+void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Label check_negative, process_64_bits, done;
+
+ // Account for return address and saved regs.
+ const int kArgumentOffset = 4 * kSystemPointerSize;
+
+ MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
+ MemOperand exponent_operand(
+ MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
+
+ // The result is returned on the stack.
+ MemOperand return_operand = mantissa_operand;
+
+ Register scratch1 = rbx;
+
+ // Since we must use rcx for shifts below, use some other register (rax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = rax;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead
+ // for the result.
+ Register save_reg = rax;
+ __ pushq(rcx);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
+
+ __ movl(scratch1, mantissa_operand);
+ __ Movsd(kScratchDoubleReg, mantissa_operand);
+ __ movl(rcx, exponent_operand);
+
+ __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+ __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
+ __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
+ __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits, Label::kNear);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ subl(rcx, Immediate(delta));
+ __ xorl(result_reg, result_reg);
+ __ cmpl(rcx, Immediate(31));
+ __ j(above, &done, Label::kNear);
+ __ shll_cl(scratch1);
+ __ jmp(&check_negative, Label::kNear);
+
+ __ bind(&process_64_bits);
+ __ Cvttsd2siq(result_reg, kScratchDoubleReg);
+ __ jmp(&done, Label::kNear);
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ movl(result_reg, scratch1);
+ __ negl(result_reg);
+ __ cmpl(exponent_operand, Immediate(0));
+ __ cmovl(greater, result_reg, scratch1);
+
+ // Restore registers
+ __ bind(&done);
+ __ movl(return_operand, result_reg);
+ __ popq(save_reg);
+ __ popq(scratch1);
+ __ popq(rcx);
+ __ ret(0);
+}
+
namespace {
int Offset(ExternalReference ref0, ExternalReference ref1) {
@@ -3758,7 +3777,7 @@ int Offset(ExternalReference ref0, ExternalReference ref1) {
}
// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Clobbers r14, r15, rbx and
+// from handle and propagates exceptions. Clobbers r12, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kSystemPointerSize (GCed).
void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
@@ -3785,7 +3804,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(rdx == function_address || r8 == function_address);
// Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
+ Register prev_next_address_reg = r12;
Register prev_limit_reg = rbx;
Register base_reg = r15;
__ Move(base_reg, next_address);
@@ -4330,6 +4349,147 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the current or next (in execution order) bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ pushq(kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = rdi;
+ __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+
+ // Load the feedback vector.
+ Register feedback_vector = rbx;
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(feedback_vector,
+ FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
+ __ j(not_equal, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(
+ kInterpreterBytecodeOffsetRegister,
+ MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ feedback_vector);
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ Register code_obj = rbx;
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = rax;
+ __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ cmpq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+ }
+
+ __ subq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ movq(kInterpreterBytecodeArrayRegister,
+ MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3);
+ __ movq(arg_reg_1, code_obj);
+ __ movq(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ __ CallCFunction(get_baseline_pc, 3);
+ }
+ __ leaq(code_obj,
+ FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
+ __ popq(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset),
+ Immediate(0));
+ Generate_OSREntry(masm, code_obj);
+ } else {
+ __ jmp(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ movq(kInterpreterBytecodeOffsetRegister, Immediate(0));
+ if (next_bytecode) {
+ __ LoadAddress(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ jmp(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ jmp(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 641ed9c403f..364d34fb092 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,10 +1,8 @@
bbudge@chromium.org
-bmeurer@chromium.org
clemensb@chromium.org
delphick@chromium.org
gdeepti@chromium.org
ishell@chromium.org
-jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
@@ -12,7 +10,7 @@ mslekova@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
+nicohartmann@chromium.org
rmcilroy@chromium.org
-sigurds@chromium.org
solanes@chromium.org
zhin@chromium.org
diff --git a/deps/v8/src/codegen/aligned-slot-allocator.cc b/deps/v8/src/codegen/aligned-slot-allocator.cc
new file mode 100644
index 00000000000..9e7ab09c812
--- /dev/null
+++ b/deps/v8/src/codegen/aligned-slot-allocator.cc
@@ -0,0 +1,125 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/aligned-slot-allocator.h"
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+int AlignedSlotAllocator::NextSlot(int n) const {
+ DCHECK(n == 1 || n == 2 || n == 4);
+ if (n <= 1 && IsValid(next1_)) return next1_;
+ if (n <= 2 && IsValid(next2_)) return next2_;
+ DCHECK(IsValid(next4_));
+ return next4_;
+}
+
+int AlignedSlotAllocator::Allocate(int n) {
+ DCHECK(n == 1 || n == 2 || n == 4);
+ // Check invariants.
+ DCHECK_EQ(0, next4_ & 3);
+ DCHECK_IMPLIES(IsValid(next2_), (next2_ & 1) == 0);
+
+ // The sentinel value kInvalidSlot is used to indicate no slot.
+ // next1_ is the index of the 1 slot fragment, or kInvalidSlot.
+ // next2_ is the 2-aligned index of the 2 slot fragment, or kInvalidSlot.
+ // next4_ is the 4-aligned index of the next 4 slot group. It is always valid.
+ // In order to ensure we only have a single 1- or 2-slot fragment, we greedily
+ // use any fragment that satisfies the request.
+ int result = kInvalidSlot;
+ switch (n) {
+ case 1: {
+ if (IsValid(next1_)) {
+ result = next1_;
+ next1_ = kInvalidSlot;
+ } else if (IsValid(next2_)) {
+ result = next2_;
+ next1_ = result + 1;
+ next2_ = kInvalidSlot;
+ } else {
+ result = next4_;
+ next1_ = result + 1;
+ next2_ = result + 2;
+ next4_ += 4;
+ }
+ break;
+ }
+ case 2: {
+ if (IsValid(next2_)) {
+ result = next2_;
+ next2_ = kInvalidSlot;
+ } else {
+ result = next4_;
+ next2_ = result + 2;
+ next4_ += 4;
+ }
+ break;
+ }
+ case 4: {
+ result = next4_;
+ next4_ += 4;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ DCHECK(IsValid(result));
+ size_ = std::max(size_, result + n);
+ return result;
+}
+
+int AlignedSlotAllocator::AllocateUnaligned(int n) {
+ DCHECK_GE(n, 0);
+ // Check invariants.
+ DCHECK_EQ(0, next4_ & 3);
+ DCHECK_IMPLIES(IsValid(next2_), (next2_ & 1) == 0);
+
+ // Reserve |n| slots at |size_|, invalidate fragments below the new |size_|,
+ // and add any new fragments beyond the new |size_|.
+ int result = size_;
+ size_ += n;
+ switch (size_ & 3) {
+ case 0: {
+ next1_ = next2_ = kInvalidSlot;
+ next4_ = size_;
+ break;
+ }
+ case 1: {
+ next1_ = size_;
+ next2_ = size_ + 1;
+ next4_ = size_ + 3;
+ break;
+ }
+ case 2: {
+ next1_ = kInvalidSlot;
+ next2_ = size_;
+ next4_ = size_ + 2;
+ break;
+ }
+ case 3: {
+ next1_ = size_;
+ next2_ = kInvalidSlot;
+ next4_ = size_ + 1;
+ break;
+ }
+ }
+ return result;
+}
+
+int AlignedSlotAllocator::Align(int n) {
+ DCHECK(base::bits::IsPowerOfTwo(n));
+ DCHECK_LE(n, 4);
+ int mask = n - 1;
+ int misalignment = size_ & mask;
+ int padding = (n - misalignment) & mask;
+ AllocateUnaligned(padding);
+ return padding;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/aligned-slot-allocator.h b/deps/v8/src/codegen/aligned-slot-allocator.h
new file mode 100644
index 00000000000..1abb7117138
--- /dev/null
+++ b/deps/v8/src/codegen/aligned-slot-allocator.h
@@ -0,0 +1,71 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ALIGNED_SLOT_ALLOCATOR_H_
+#define V8_CODEGEN_ALIGNED_SLOT_ALLOCATOR_H_
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// An aligned slot allocator. Allocates groups of 1, 2, or 4 slots such that the
+// first slot of the group is aligned to the group size. The allocator can also
+// allocate unaligned groups of arbitrary size, and an align the number of slots
+// to 1, 2, or 4 slots. The allocator tries to be as thrifty as possible by
+// reusing alignment padding slots in subsequent smaller slot allocations.
+class V8_EXPORT_PRIVATE AlignedSlotAllocator {
+ public:
+ // Slots are always multiples of pointer-sized units.
+ static constexpr int kSlotSize = kSystemPointerSize;
+
+ static int NumSlotsForWidth(int bytes) {
+ DCHECK_GT(bytes, 0);
+ return (bytes + kSlotSize - 1) / kSlotSize;
+ }
+
+ AlignedSlotAllocator() = default;
+
+ // Allocates |n| slots, where |n| must be 1, 2, or 4. Padding slots may be
+ // inserted for alignment.
+ // Returns the starting index of the slots, which is evenly divisible by |n|.
+ int Allocate(int n);
+
+ // Gets the starting index of the slots that would be returned by Allocate(n).
+ int NextSlot(int n) const;
+
+ // Allocates the given number of slots at the current end of the slot area,
+ // and returns the starting index of the slots. This resets any fragment
+ // slots, so subsequent allocations will be after the end of this one.
+ // AllocateUnaligned(0) can be used to partition the slot area, for example
+ // to make sure tagged values follow untagged values on a Frame.
+ int AllocateUnaligned(int n);
+
+ // Aligns the slot area so that future allocations begin at the alignment.
+ // Returns the number of slots needed to align the slot area.
+ int Align(int n);
+
+ // Returns the size of the slot area, in slots. This will be greater than any
+ // already allocated slot index.
+ int Size() const { return size_; }
+
+ private:
+ static constexpr int kInvalidSlot = -1;
+
+ static bool IsValid(int slot) { return slot > kInvalidSlot; }
+
+ int next1_ = kInvalidSlot;
+ int next2_ = kInvalidSlot;
+ int next4_ = 0;
+ int size_ = 0;
+
+ DISALLOW_NEW_AND_DELETE()
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ALIGNED_SLOT_ALLOCATOR_H_
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 9dadad96ac3..7035fa2492b 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -49,8 +49,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
-
int DoubleRegister::SupportedRegisterCount() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 6af924fa47e..17a20a6f977 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -198,6 +198,8 @@ static constexpr unsigned CpuFeaturesFromCompiler() {
#endif
}
+bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
dcache_line_size_ = 64;
@@ -239,9 +241,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Additional tuning options.
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
- if (cpu.implementer() == base::CPU::ARM &&
- (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
- cpu.part() == base::CPU::ARM_CORTEX_A9)) {
+ if (cpu.implementer() == base::CPU::kArm &&
+ (cpu.part() == base::CPU::kArmCortexA5 ||
+ cpu.part() == base::CPU::kArmCortexA9)) {
dcache_line_size_ = 32;
}
#endif
@@ -4248,6 +4250,15 @@ void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
src2.code()));
}
+void Assembler::vorn(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ // Qd = vorn(Qn, Qm) SIMD OR NOT.
+ // Instruction details available in ARM DDI 0406C.d, A8.8.359.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonBinaryBitwiseOp(VORN, NEON_Q, dst.code(), src1.code(),
+ src2.code()));
+}
+
enum FPBinOp {
VADDF,
VSUBF,
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 456ac03f928..e0490a68533 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -887,6 +887,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vorn(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
@@ -1381,6 +1382,20 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()), 0);
+ *available |= reg1.bit() | reg2.bit();
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()),
+ reg1.bit() | reg2.bit());
+ *available &= ~(reg1.bit() | reg2.bit());
+ }
+
private:
friend class Assembler;
friend class TurboAssembler;
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 25063b2a327..53992227ab3 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -86,14 +86,8 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; }
+const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
@@ -220,8 +214,11 @@ void CompareDescriptor::InitializePlatformSpecific(
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ Register registers[] = {r1, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
@@ -232,8 +229,11 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ Register registers[] = {r1, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index f1831aaea30..f83eee4a919 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -25,7 +25,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -302,6 +305,18 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
@@ -1344,8 +1359,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- mov(scratch, Operand(StackFrame::TypeToMarker(type)));
+ Register scratch = no_reg;
+ if (!StackFrame::IsJavaScript(type)) {
+ scratch = temps.Acquire();
+ mov(scratch, Operand(StackFrame::TypeToMarker(type)));
+ }
PushCommonFrame(scratch);
}
@@ -1386,6 +1404,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = no_dreg;
while (bytes > kStackPageSize) {
@@ -1396,6 +1415,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
vldr(scratch, MemOperand(sp));
bytes -= kStackPageSize;
}
+ if (bytes == 0) return;
sub(sp, sp, Operand(bytes));
}
#endif
@@ -1913,8 +1933,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
AllocateStackSpace(kDoubleSize); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
@@ -2070,10 +2095,10 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
ldr(dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
@@ -2650,6 +2675,16 @@ void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
vand(dst, dst, scratch);
}
+void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = temps.AcquireQ();
+ vceq(Neon32, dst, src1, src2);
+ vrev64(Neon32, tmp, dst);
+ vmvn(dst, dst);
+ vorn(dst, dst, tmp);
+}
+
void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
vqsub(NeonS64, dst, src2, src1);
@@ -2663,7 +2698,7 @@ void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
vmvn(dst, dst);
}
-void TurboAssembler::V64x2AllTrue(Register dst, QwNeonRegister src) {
+void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
UseScratchRegisterScope temps(this);
QwNeonRegister tmp = temps.AcquireQ();
// src = | a | b | c | d |
@@ -2686,6 +2721,49 @@ void TurboAssembler::V64x2AllTrue(Register dst, QwNeonRegister src) {
// = defintion of i64x2.all_true.
}
+void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = temps.AcquireQ();
+ vshr(NeonS64, tmp, src, 63);
+ veor(dst, src, tmp);
+ vsub(Neon64, dst, dst, tmp);
+}
+
+namespace {
+using AssemblerFunc = void (Assembler::*)(DwVfpRegister, SwVfpRegister,
+ VFPConversionMode, const Condition);
+// Helper function for f64x2 convert low instructions.
+// This ensures that we do not overwrite src, if dst == src.
+void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst,
+ QwNeonRegister src, AssemblerFunc convert_fn) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low().code());
+ UseScratchRegisterScope temps(assm);
+ if (dst == src) {
+ LowDwVfpRegister tmp = temps.AcquireLowD();
+ assm->vmov(tmp, src_d);
+ src_d = tmp;
+ }
+ // Default arguments are not part of the function type
+ (assm->*convert_fn)(dst.low(), src_d.low(), kDefaultRoundToZero, al);
+ (assm->*convert_fn)(dst.high(), src_d.high(), kDefaultRoundToZero, al);
+}
+} // namespace
+
+void TurboAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst,
+ QwNeonRegister src) {
+ F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32);
+}
+
+void TurboAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst,
+ QwNeonRegister src) {
+ F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32);
+}
+
+void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
+ QwNeonRegister src) {
+ F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 54c3e6c9418..e622d4aa172 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -12,7 +12,7 @@
#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
-#include "src/objects/contexts.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -64,7 +64,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes);
#else
void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
- void AllocateStackSpace(int bytes) { sub(sp, sp, Operand(bytes)); }
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ sub(sp, sp, Operand(bytes));
+ }
#endif
// Push a fixed frame, consisting of lr, fp
@@ -309,6 +313,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
+ MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
@@ -474,6 +481,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
+ void Move(Register dst, const MemOperand& src) { ldr(dst, src); }
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
Condition cond = al) {
if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
@@ -571,9 +579,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// and be used in both TurboFan and Liftoff.
void I64x2BitMask(Register dst, QwNeonRegister src);
void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
- void V64x2AllTrue(Register dst, QwNeonRegister src);
+ void I64x2AllTrue(Register dst, QwNeonRegister src);
+ void I64x2Abs(QwNeonRegister dst, QwNeonRegister src);
+ void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src);
+ void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
+ void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
private:
// Compare single values and then load the fpscr flags to a register.
@@ -668,7 +681,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// ---------------------------------------------------------------------------
// JavaScript invokes
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 6cb6c602c25..6608ad4edeb 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -119,7 +119,12 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = Register::no_reg();
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 1027dccc470..ee64dbe1f26 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -19,8 +19,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return true; }
-
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references and immediate branches need extra work.
if (RelocInfo::IsInternalReference(rmode_)) {
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 441f299a17a..05518774031 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -32,6 +32,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/base/small-vector.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
@@ -82,6 +83,7 @@ constexpr unsigned CpuFeaturesFromTargetOS() {
// -----------------------------------------------------------------------------
// CpuFeatures implementation.
+bool CpuFeatures::SupportsWasmSimd128() { return true; }
void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
@@ -1428,37 +1430,6 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
-void Assembler::prfm(int prfop, const MemOperand& addr) {
- // Restricted support for prfm, only register offset.
- // This can probably be merged with Assembler::LoadStore as we expand support.
- DCHECK(addr.IsRegisterOffset());
- DCHECK(is_uint5(prfop));
- Instr memop = PRFM | prfop | RnSP(addr.base());
-
- Extend ext = addr.extend();
- Shift shift = addr.shift();
- unsigned shift_amount = addr.shift_amount();
-
- // LSL is encoded in the option field as UXTX.
- if (shift == LSL) {
- ext = UXTX;
- }
-
- // Shifts are encoded in one bit, indicating a left shift by the memory
- // access size.
- DCHECK((shift_amount == 0) ||
- (shift_amount == static_cast<unsigned>(CalcLSDataSize(PRFM))));
-
- Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
- ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
-}
-
-void Assembler::prfm(PrefetchOperation prfop, const MemOperand& addr) {
- // Restricted support for prfm, only register offset.
- // This can probably be merged with Assembler::LoadStore as we expand support.
- prfm(static_cast<int>(prfop), addr);
-}
-
void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn,
const VRegister& vm, NEON3DifferentOp vop) {
DCHECK(AreSameFormat(vn, vm));
@@ -4502,12 +4473,15 @@ const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
const size_t ConstantPool::kApproxMaxEntryCount = 512;
-bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
- // Account for the branch around the veneers and the guard.
- int protection_offset = 2 * kInstrSize;
- return static_cast<intptr_t>(pc_offset() + margin + protection_offset +
- unresolved_branches_.size() *
- kMaxVeneerCodeSize) >= max_reachable_pc;
+intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin) {
+ // Account for the branch and guard around the veneers.
+ static constexpr int kBranchSizeInBytes = kInstrSize;
+ static constexpr int kGuardSizeInBytes = kInstrSize;
+ const size_t max_veneer_size_in_bytes =
+ unresolved_branches_.size() * kVeneerCodeSize;
+ return static_cast<intptr_t>(pc_offset() + kBranchSizeInBytes +
+ kGuardSizeInBytes + max_veneer_size_in_bytes +
+ margin);
}
void Assembler::RecordVeneerPool(int location_offset, int size) {
@@ -4538,51 +4512,80 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection,
EmitVeneersGuard();
-#ifdef DEBUG
- Label veneer_size_check;
-#endif
+ // We only emit veneers if needed (unless emission is forced), i.e. when the
+ // max-reachable-pc of the branch has been exhausted by the current codegen
+ // state. Specifically, we emit when the max-reachable-pc of the branch <= the
+ // max-pc-after-veneers (over-approximated).
+ const intptr_t max_pc_after_veneers =
+ MaxPCOffsetAfterVeneerPoolIfEmittedNow(margin);
+
+ // The `unresolved_branches_` multimap is sorted by max-reachable-pc in
+ // ascending order. For efficiency reasons, we want to call
+ // RemoveBranchFromLabelLinkChain in descending order. The actual veneers are
+ // then generated in ascending order.
+ // TODO(jgruber): This is still inefficient in multiple ways, thoughts on how
+ // we could improve in the future:
+ // - Don't erase individual elements from the multimap, erase a range instead.
+ // - Replace the multimap by a simpler data structure (like a plain vector or
+ // a circular array).
+ // - Refactor s.t. RemoveBranchFromLabelLinkChain does not need the linear
+ // lookup in the link chain.
+
+ static constexpr int kStaticTasksSize = 16; // Arbitrary.
+ base::SmallVector<FarBranchInfo, kStaticTasksSize> tasks;
+
+ {
+ auto it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ const int max_reachable_pc = it->first;
+ if (!force_emit && max_reachable_pc > max_pc_after_veneers) break;
+
+ // Found a task. We'll emit a veneer for this.
+ tasks.emplace_back(it->second);
+ auto eraser_it = it++;
+ unresolved_branches_.erase(eraser_it);
+ }
+ }
- std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+ // Update next_veneer_pool_check_ (tightly coupled with unresolved_branches_).
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
- it = unresolved_branches_.begin();
- while (it != unresolved_branches_.end()) {
- if (force_emit || ShouldEmitVeneer(it->first, margin)) {
- Instruction* branch = InstructionAt(it->second.pc_offset_);
- Label* label = it->second.label_;
+ // Reminder: We iterate in reverse order to avoid duplicate linked-list
+ // iteration in RemoveBranchFromLabelLinkChain (which starts at the target
+ // label, and iterates backwards through linked branch instructions).
+ const int tasks_size = static_cast<int>(tasks.size());
+ for (int i = tasks_size - 1; i >= 0; i--) {
+ Instruction* branch = InstructionAt(tasks[i].pc_offset_);
+ Instruction* veneer = reinterpret_cast<Instruction*>(
+ reinterpret_cast<uintptr_t>(pc_) + i * kVeneerCodeSize);
+ RemoveBranchFromLabelLinkChain(branch, tasks[i].label_, veneer);
+ }
+
+ // Now emit the actual veneer and patch up the incoming branch.
+
+ for (const FarBranchInfo& info : tasks) {
#ifdef DEBUG
- bind(&veneer_size_check);
-#endif
- // Patch the branch to point to the current position, and emit a branch
- // to the label.
- Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
- RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(options(), veneer);
- b(label);
-#ifdef DEBUG
- DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
- static_cast<uint64_t>(kMaxVeneerCodeSize));
- veneer_size_check.Unuse();
+ Label veneer_size_check;
+ bind(&veneer_size_check);
#endif
-
- it_to_delete = it++;
- unresolved_branches_.erase(it_to_delete);
- } else {
- ++it;
- }
+ Instruction* branch = InstructionAt(info.pc_offset_);
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ branch->SetImmPCOffsetTarget(options(), veneer);
+ b(info.label_); // This may end up pointing at yet another veneer later on.
+ DCHECK_EQ(SizeOfCodeGeneratedSince(&veneer_size_check),
+ static_cast<uint64_t>(kVeneerCodeSize));
}
// Record the veneer pool size.
int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
- if (unresolved_branches_.empty()) {
- next_veneer_pool_check_ = kMaxInt;
- } else {
- next_veneer_pool_check_ =
- unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
- }
-
bind(&end);
RecordComment("]");
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 41bdb03b4f5..aa2ffb26cdf 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -880,9 +880,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const Register& rn);
- void prfm(int prfop, const MemOperand& addr);
- void prfm(PrefetchOperation prfop, const MemOperand& addr);
-
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -2392,18 +2389,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
}
+ // Used by veneer checks below - returns the max (= overapproximated) pc
+ // offset after the veneer pool, if the veneer pool were to be emitted
+ // immediately.
+ intptr_t MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin);
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
- bool ShouldEmitVeneer(int max_reachable_pc,
- size_t margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
+ return max_reachable_pc < MaxPCOffsetAfterVeneerPoolIfEmittedNow(margin);
+ }
bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
- // The maximum code size generated for a veneer. Currently one branch
+ // The code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
- static constexpr int kMaxVeneerCodeSize = 1 * kInstrSize;
+ static constexpr int kVeneerCodeSize = 1 * kInstrSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index 940216fc944..52790b9faf4 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -159,9 +159,6 @@ using float16 = uint16_t;
/* store second source. */ \
V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
V_(PrefetchMode, 4, 0, Bits) \
- V_(PrefetchHint, 4, 3, Bits) \
- V_(PrefetchTarget, 2, 1, Bits) \
- V_(PrefetchStream, 0, 0, Bits) \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \
@@ -219,7 +216,6 @@ using float16 = uint16_t;
V_(LSOpc, 23, 22, Bits) \
V_(LSVector, 26, 26, Bits) \
V_(LSSize, 31, 30, Bits) \
- V_(ImmPrefetchOperation, 4, 0, Bits) \
\
/* NEON generic fields */ \
V_(NEONQ, 30, 30, Bits) \
@@ -447,27 +443,6 @@ enum SystemRegister {
ImmSystemRegister_offset
};
-enum PrefetchOperation {
- PLDL1KEEP = 0x00,
- PLDL1STRM = 0x01,
- PLDL2KEEP = 0x02,
- PLDL2STRM = 0x03,
- PLDL3KEEP = 0x04,
- PLDL3STRM = 0x05,
- PLIL1KEEP = 0x08,
- PLIL1STRM = 0x09,
- PLIL2KEEP = 0x0a,
- PLIL2STRM = 0x0b,
- PLIL3KEEP = 0x0c,
- PLIL3STRM = 0x0d,
- PSTL1KEEP = 0x10,
- PSTL1STRM = 0x11,
- PSTL2KEEP = 0x12,
- PSTL2STRM = 0x13,
- PSTL3KEEP = 0x14,
- PSTL3STRM = 0x15,
-};
-
// Instruction enumerations.
//
// These are the masks that define a class of instructions, and the list of
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 963f862f923..6a33f864ab7 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -548,7 +548,7 @@ void TurboAssembler::Fcmp(const VRegister& fn, double value) {
}
}
-void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
+void TurboAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
const VRegister& fm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
@@ -1036,6 +1036,9 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Mov(kRootRegister, Operand(isolate_root));
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ Mov(kPointerCageBaseRegister, Operand(isolate_root));
+#endif
}
void MacroAssembler::SmiTag(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 7cd6027932b..a3570b80354 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/external-reference-table.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
+#include "src/codegen/reloc-info.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -22,7 +23,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -1766,25 +1770,36 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
Bind(&done);
}
-namespace {
-
// The calculated offset is either:
// * the 'target' input unmodified if this is a Wasm call, or
+// * the offset of the target from the code range start, if this is a call to
+// un-embedded builtin, or
// * the offset of the target from the current PC, in instructions, for any
// other type of call.
-static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
- byte* pc) {
+int64_t TurboAssembler::CalculateTargetOffset(Address target,
+ RelocInfo::Mode rmode, byte* pc) {
int64_t offset = static_cast<int64_t>(target);
- // The target of WebAssembly calls is still an index instead of an actual
- // address at this point, and needs to be encoded as-is.
- if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
+ if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) {
+ // The target of WebAssembly calls is still an index instead of an actual
+ // address at this point, and needs to be encoded as-is.
+ return offset;
+ }
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ // The runtime entry targets are used for generating short builtin calls
+ // from JIT-compiled code (it's not used during snapshot creation).
+ // The value is encoded as an offset from the code range (see
+ // Assembler::runtime_entry_at()).
+ // Note, that builtin-to-builitin calls use different OFF_HEAP_TARGET mode
+ // and therefore are encoded differently.
+ DCHECK_NE(options().code_range_start, 0);
+ offset -= static_cast<int64_t>(options().code_range_start);
+ } else {
offset -= reinterpret_cast<int64_t>(pc);
- DCHECK_EQ(offset % kInstrSize, 0);
- offset = offset / static_cast<int>(kInstrSize);
}
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
return offset;
}
-} // namespace
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
@@ -1801,14 +1816,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(scratch, cond);
+ CHECK_EQ(cond, Condition::al); // Implement if necessary.
+ TailCallBuiltin(builtin_index);
return;
}
}
@@ -1920,12 +1929,49 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(scratch);
+ if (options().short_builtin_calls) {
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Call(entry, RelocInfo::RUNTIME_ENTRY);
+
+ } else {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ }
+ if (FLAG_code_comments) RecordComment("]");
+}
+
+void TurboAssembler::TailCallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ if (options().short_builtin_calls) {
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+
+ } else {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // The control flow integrity (CFI) feature allows us to "sign" code entry
+ // points as a target for calls, jumps or both. Arm64 has special
+ // instructions for this purpose, so-called "landing pads" (see
+ // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and
+ // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call"
+ // landing pads for CPP builtins. In order to allow tail calling to those
+ // builtins we have to use a workaround.
+ // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump"
+ // (i.e. `bti j`) landing pads for the tail-called code.
+ Register temp = x17;
+
+ Ldr(temp, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(temp);
+ }
+ if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2424,8 +2470,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
}
// DoubleToI preserves any registers it needs to clobber.
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
@@ -2457,7 +2508,11 @@ void TurboAssembler::Prologue() {
void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
- if (type == StackFrame::INTERNAL || type == StackFrame::WASM_DEBUG_BREAK) {
+ if (type == StackFrame::INTERNAL
+#if V8_ENABLE_WEBASSEMBLY
+ || type == StackFrame::WASM_DEBUG_BREAK
+#endif // V8_ENABLE_WEBASSEMBLY
+ ) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
Push<TurboAssembler::kSignLR>(lr, fp, type_reg, padreg);
@@ -2468,6 +2523,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
+#if V8_ENABLE_WEBASSEMBLY
} else if (type == StackFrame::WASM ||
type == StackFrame::WASM_COMPILE_LAZY ||
type == StackFrame::WASM_EXIT) {
@@ -2480,6 +2536,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (type == StackFrame::CONSTRUCT) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2628,7 +2685,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
void MacroAssembler::LoadWeakValue(Register out, Register in,
@@ -2803,14 +2860,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
Ldr(destination.W(), field_operand);
- Add(destination, kRootRegister, destination);
+ Add(destination, kPointerCageBaseRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kRootRegister, Operand(source, UXTW));
+ Add(destination, kPointerCageBaseRegister, Operand(source, UXTW));
RecordComment("]");
}
@@ -2818,7 +2875,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldr(destination.W(), field_operand);
- Add(destination, kRootRegister, destination);
+ Add(destination, kPointerCageBaseRegister, destination);
RecordComment("]");
}
@@ -3117,7 +3174,7 @@ void TurboAssembler::Abort(AbortReason reason) {
TmpList()->set_list(old_tmp_list);
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
LoadTaggedPointerField(
dst, FieldMemOperand(
@@ -3430,6 +3487,7 @@ void TurboAssembler::RestoreFPAndLR() {
#endif
}
+#if V8_ENABLE_WEBASSEMBLY
void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
UseScratchRegisterScope temps(this);
temps.Exclude(x16, x17);
@@ -3440,6 +3498,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
#endif
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
+#endif // V8_ENABLE_WEBASSEMBLY
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
UseScratchRegisterScope scope(this);
@@ -3451,7 +3510,7 @@ void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
}
-void TurboAssembler::V64x2AllTrue(Register dst, VRegister src) {
+void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) {
UseScratchRegisterScope scope(this);
VRegister tmp = scope.AcquireV(kFormat2D);
Cmeq(tmp.V2D(), src.V2D(), 0);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index ef7bc151666..a749676cccd 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -698,6 +698,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
Condition cond);
+ inline void Fcsel(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, Condition cond);
// Emits a runtime assert that the stack pointer is aligned.
void AssertSpAligned();
@@ -975,7 +977,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register destination);
MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return CallBuiltin(static_cast<int>(builtin));
+ }
void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return TailCallBuiltin(static_cast<int>(builtin));
+ }
+ void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -1374,13 +1385,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// authenticate the LR when pointer authentication is enabled.
void RestoreFPAndLR();
+#if V8_ENABLE_WEBASSEMBLY
void StoreReturnAddressInWasmExitFrame(Label* return_location);
+#endif // V8_ENABLE_WEBASSEMBLY
// Wasm SIMD helpers. These instructions don't have direct lowering to native
// instructions. These helpers allow us to define the optimal code sequence,
// and be used in both TurboFan and Liftoff.
void I64x2BitMask(Register dst, VRegister src);
- void V64x2AllTrue(Register dst, VRegister src);
+ void I64x2AllTrue(Register dst, VRegister src);
protected:
// The actual Push and Pop implementations. These don't generate any code
@@ -1447,6 +1460,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
+ int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
+ byte* pc);
+
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
void CallRecordWriteStub(Register object, Operand offset,
@@ -1497,8 +1513,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Condition cond);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
- inline void Fcsel(const VRegister& fd, const VRegister& fn,
- const VRegister& fm, Condition cond);
void Fcvtl(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtl(vd, vn);
@@ -2032,7 +2046,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Debugging.
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 31620ae9658..605856e51c1 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -30,11 +30,21 @@ namespace internal {
// x18 is the platform register and is reserved for the use of platform ABIs.
// It is known to be reserved by the OS at least on Windows and iOS.
-#define ALLOCATABLE_GENERAL_REGISTERS(R) \
+#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(R) \
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
- R(x27) R(x28)
+ R(x27)
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(R)
+#else
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(R) R(x28)
+#endif
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -241,7 +251,14 @@ class Register : public CPURegister {
ASSERT_TRIVIALLY_COPYABLE(Register);
-constexpr bool kPadArguments = true;
+// Stack frame alignment and padding.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // Stack frames are aligned to 16 bytes.
+ constexpr int kStackFrameAlignment = 16;
+ constexpr int alignment_mask = kStackFrameAlignment / kSystemPointerSize - 1;
+ return argument_count & alignment_mask;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -458,6 +475,12 @@ ALIAS_REGISTER(Register, wip1, w17);
// Root register.
ALIAS_REGISTER(Register, kRootRegister, x26);
ALIAS_REGISTER(Register, rr, x26);
+// Pointer cage base register.
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ALIAS_REGISTER(Register, kPointerCageBaseRegister, x28);
+#else
+ALIAS_REGISTER(Register, kPointerCageBaseRegister, kRootRegister);
+#endif
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
ALIAS_REGISTER(Register, fp, x29);
@@ -524,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 0b71701d31a..95983705abd 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -73,6 +73,12 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
#endif
+ options.short_builtin_calls =
+ isolate->is_short_builtin_calls_enabled() &&
+ !generating_embedded_builtin &&
+ (options.code_range_start != kNullAddress) &&
+ // Serialization of RUNTIME_ENTRY reloc infos is not supported yet.
+ !serializer;
return options;
}
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 751799127d2..70669059664 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -169,6 +169,11 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// Enables the use of isolate-independent builtins through an off-heap
// trampoline. (macro assembler feature).
bool inline_offheap_trampolines = true;
+ // Enables generation of pc-relative calls to builtins if the off-heap
+ // builtins are guaranteed to be within the reach of pc-relative call or jump
+ // instructions. For example, when the bultins code is re-embedded into the
+ // code range.
+ bool short_builtin_calls = false;
// On some platforms, all code is within a given range in the process,
// and the start of this range is configured here.
Address code_range_start = 0;
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index ceabbac8071..ece8200023d 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -405,6 +405,13 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
}
// static
+Callable CodeFactory::InterpreterOnStackReplacement_ToBaseline(
+ Isolate* isolate) {
+ return Builtins::CallableFor(
+ isolate, Builtins::kInterpreterOnStackReplacement_ToBaseline);
+}
+
+// static
Callable CodeFactory::ArrayNoArgumentConstructor(
Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode) {
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index b98c576b9e0..aab29770453 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -92,6 +92,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
Isolate* isolate, InterpreterPushArgsMode mode);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
+ static Callable InterpreterOnStackReplacement_ToBaseline(Isolate* isolate);
static Callable ArrayNoArgumentConstructor(
Isolate* isolate, ElementsKind kind,
diff --git a/deps/v8/src/codegen/code-reference.cc b/deps/v8/src/codegen/code-reference.cc
index 63c8d374974..0c550fa0d30 100644
--- a/deps/v8/src/codegen/code-reference.cc
+++ b/deps/v8/src/codegen/code-reference.cc
@@ -8,7 +8,10 @@
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -28,6 +31,7 @@ struct JSOps {
int code_comments_size() const { return code->code_comments_size(); }
};
+#if V8_ENABLE_WEBASSEMBLY
struct WasmOps {
const wasm::WasmCode* code;
@@ -48,6 +52,7 @@ struct WasmOps {
Address code_comments() const { return code->code_comments(); }
int code_comments_size() const { return code->code_comments_size(); }
};
+#endif // V8_ENABLE_WEBASSEMBLY
struct CodeDescOps {
const CodeDesc* code_desc;
@@ -76,6 +81,7 @@ struct CodeDescOps {
};
} // namespace
+#if V8_ENABLE_WEBASSEMBLY
#define DISPATCH(ret, method) \
ret CodeReference::method() const { \
DCHECK(!is_null()); \
@@ -90,6 +96,18 @@ struct CodeDescOps {
UNREACHABLE(); \
} \
}
+#else
+#define DISPATCH(ret, method) \
+ ret CodeReference::method() const { \
+ DCHECK(!is_null()); \
+ DCHECK(kind_ == JS || kind_ == CODE_DESC); \
+ if (kind_ == JS) { \
+ return JSOps{js_code_}.method(); \
+ } else { \
+ return CodeDescOps{code_desc_}.method(); \
+ } \
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
DISPATCH(Address, constant_pool)
DISPATCH(Address, instruction_start)
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 90e34d4dbdc..0b039e40fac 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -4,6 +4,8 @@
#include "src/codegen/code-stub-assembler.h"
+#include <functional>
+
#include "include/v8-internal.h"
#include "src/base/macros.h"
#include "src/codegen/code-factory.h"
@@ -25,13 +27,14 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-cell.h"
#include "src/roots/roots.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
-using compiler::Node;
-
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state),
TorqueGeneratedExportedMacrosAssembler(state) {
@@ -349,7 +352,7 @@ TNode<Float64T> CodeStubAssembler::Float64Round(TNode<Float64T> x) {
Goto(&return_x);
BIND(&return_x);
- return TNode<Float64T>::UncheckedCast(var_x.value());
+ return var_x.value();
}
TNode<Float64T> CodeStubAssembler::Float64Ceil(TNode<Float64T> x) {
@@ -401,7 +404,7 @@ TNode<Float64T> CodeStubAssembler::Float64Ceil(TNode<Float64T> x) {
Goto(&return_x);
BIND(&return_x);
- return TNode<Float64T>::UncheckedCast(var_x.value());
+ return var_x.value();
}
TNode<Float64T> CodeStubAssembler::Float64Floor(TNode<Float64T> x) {
@@ -453,7 +456,7 @@ TNode<Float64T> CodeStubAssembler::Float64Floor(TNode<Float64T> x) {
Goto(&return_x);
BIND(&return_x);
- return TNode<Float64T>::UncheckedCast(var_x.value());
+ return var_x.value();
}
TNode<Float64T> CodeStubAssembler::Float64RoundToEven(TNode<Float64T> x) {
@@ -484,7 +487,7 @@ TNode<Float64T> CodeStubAssembler::Float64RoundToEven(TNode<Float64T> x) {
Goto(&done);
BIND(&done);
- return TNode<Float64T>::UncheckedCast(var_result.value());
+ return var_result.value();
}
TNode<Float64T> CodeStubAssembler::Float64Trunc(TNode<Float64T> x) {
@@ -545,7 +548,137 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(TNode<Float64T> x) {
Goto(&return_x);
BIND(&return_x);
- return TNode<Float64T>::UncheckedCast(var_x.value());
+ return var_x.value();
+}
+
+TNode<IntPtrT> CodeStubAssembler::PopulationCountFallback(
+ TNode<UintPtrT> value) {
+ // Taken from slow path of base::bits::CountPopulation, the comments here show
+ // C++ code and comments from there for reference.
+ // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry
+ // S. Warren, Jr.), chapter 5-1.
+ constexpr uintptr_t mask[] = {static_cast<uintptr_t>(0x5555555555555555),
+ static_cast<uintptr_t>(0x3333333333333333),
+ static_cast<uintptr_t>(0x0f0f0f0f0f0f0f0f)};
+
+ // TNode<UintPtrT> value = Unsigned(value_word);
+ TNode<UintPtrT> lhs, rhs;
+
+ // Start with 64 buckets of 1 bits, holding values from [0,1].
+ // {value = ((value >> 1) & mask[0]) + (value & mask[0])}
+ lhs = WordAnd(WordShr(value, UintPtrConstant(1)), UintPtrConstant(mask[0]));
+ rhs = WordAnd(value, UintPtrConstant(mask[0]));
+ value = UintPtrAdd(lhs, rhs);
+
+ // Having 32 buckets of 2 bits, holding values from [0,2] now.
+ // {value = ((value >> 2) & mask[1]) + (value & mask[1])}
+ lhs = WordAnd(WordShr(value, UintPtrConstant(2)), UintPtrConstant(mask[1]));
+ rhs = WordAnd(value, UintPtrConstant(mask[1]));
+ value = UintPtrAdd(lhs, rhs);
+
+ // Having 16 buckets of 4 bits, holding values from [0,4] now.
+ // {value = ((value >> 4) & mask[2]) + (value & mask[2])}
+ lhs = WordAnd(WordShr(value, UintPtrConstant(4)), UintPtrConstant(mask[2]));
+ rhs = WordAnd(value, UintPtrConstant(mask[2]));
+ value = UintPtrAdd(lhs, rhs);
+
+ // Having 8 buckets of 8 bits, holding values from [0,8] now.
+ // From this point on, the buckets are bigger than the number of bits
+ // required to hold the values, and the buckets are bigger the maximum
+ // result, so there's no need to mask value anymore, since there's no
+ // more risk of overflow between buckets.
+ // {value = (value >> 8) + value}
+ lhs = WordShr(value, UintPtrConstant(8));
+ value = UintPtrAdd(lhs, value);
+
+ // Having 4 buckets of 16 bits, holding values from [0,16] now.
+ // {value = (value >> 16) + value}
+ lhs = WordShr(value, UintPtrConstant(16));
+ value = UintPtrAdd(lhs, value);
+
+ if (Is64()) {
+ // Having 2 buckets of 32 bits, holding values from [0,32] now.
+ // {value = (value >> 32) + value}
+ lhs = WordShr(value, UintPtrConstant(32));
+ value = UintPtrAdd(lhs, value);
+ }
+
+ // Having 1 buckets of sizeof(intptr_t) bits, holding values from [0,64] now.
+ // {return static_cast<unsigned>(value & 0xff)}
+ return Signed(WordAnd(value, UintPtrConstant(0xff)));
+}
+
+TNode<Int64T> CodeStubAssembler::PopulationCount64(TNode<Word64T> value) {
+ if (IsWord64PopcntSupported()) {
+ return Word64Popcnt(value);
+ }
+
+ if (Is32()) {
+ // Unsupported.
+ UNREACHABLE();
+ }
+
+ return ReinterpretCast<Int64T>(
+ PopulationCountFallback(ReinterpretCast<UintPtrT>(value)));
+}
+
+TNode<Int32T> CodeStubAssembler::PopulationCount32(TNode<Word32T> value) {
+ if (IsWord32PopcntSupported()) {
+ return Word32Popcnt(value);
+ }
+
+ if (Is32()) {
+ TNode<IntPtrT> res =
+ PopulationCountFallback(ReinterpretCast<UintPtrT>(value));
+ return ReinterpretCast<Int32T>(res);
+ } else {
+ TNode<IntPtrT> res = PopulationCountFallback(
+ ReinterpretCast<UintPtrT>(ChangeUint32ToUint64(value)));
+ return TruncateInt64ToInt32(ReinterpretCast<Int64T>(res));
+ }
+}
+
+TNode<Int64T> CodeStubAssembler::CountTrailingZeros64(TNode<Word64T> value) {
+ if (IsWord64CtzSupported()) {
+ return Word64Ctz(value);
+ }
+
+ if (Is32()) {
+ // Unsupported.
+ UNREACHABLE();
+ }
+
+ // Same fallback as in base::bits::CountTrailingZeros.
+ // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
+ // chapter 5-4. On x64, since is faster than counting in a loop and faster
+ // than doing binary search.
+ TNode<Word64T> lhs = Word64Not(value);
+ TNode<Word64T> rhs = Uint64Sub(Unsigned(value), Uint64Constant(1));
+ return PopulationCount64(Word64And(lhs, rhs));
+}
+
+TNode<Int32T> CodeStubAssembler::CountTrailingZeros32(TNode<Word32T> value) {
+ if (IsWord32CtzSupported()) {
+ return Word32Ctz(value);
+ }
+
+ if (Is32()) {
+ // Same fallback as in Word64CountTrailingZeros.
+ TNode<Word32T> lhs = Word32BitwiseNot(value);
+ TNode<Word32T> rhs = Int32Sub(Signed(value), Int32Constant(1));
+ return PopulationCount32(Word32And(lhs, rhs));
+ } else {
+ TNode<Int64T> res64 = CountTrailingZeros64(ChangeUint32ToUint64(value));
+ return TruncateInt64ToInt32(Signed(res64));
+ }
+}
+
+TNode<Int64T> CodeStubAssembler::CountLeadingZeros64(TNode<Word64T> value) {
+ return Word64Clz(value);
+}
+
+TNode<Int32T> CodeStubAssembler::CountLeadingZeros32(TNode<Word32T> value) {
+ return Word32Clz(value);
}
template <>
@@ -610,7 +743,7 @@ TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
return smi_index;
}
-TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
+TNode<Smi> CodeStubAssembler::SmiFromInt32(TNode<Int32T> value) {
if (COMPRESS_POINTERS_BOOL) {
static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1),
"Use shifting instead of add");
@@ -1590,16 +1723,16 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
NodeGenerator<HeapObject> make_empty = [=]() -> TNode<HeapObject> {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return EmptyOrderedPropertyDictionaryConstant();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return EmptySwissPropertyDictionaryConstant();
} else {
return EmptyPropertyDictionaryConstant();
}
};
NodeGenerator<HeapObject> cast_properties = [=] {
TNode<HeapObject> dict = CAST(properties);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- CSA_ASSERT(this, Word32Or(IsOrderedNameDictionary(dict),
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(dict),
IsGlobalDictionary(dict)));
} else {
CSA_ASSERT(this,
@@ -1775,27 +1908,25 @@ TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
}
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
- TNode<Object> receiver, Label* if_no_hash) {
+ TNode<JSReceiver> receiver, Label* if_no_hash) {
TVARIABLE(IntPtrT, var_hash);
Label done(this), if_smi(this), if_property_array(this),
- if_ordered_property_dictionary(this), if_property_dictionary(this),
+ if_swiss_property_dictionary(this), if_property_dictionary(this),
if_fixed_array(this);
TNode<Object> properties_or_hash =
- LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver),
- JSReceiver::kPropertiesOrHashOffset);
+ LoadObjectField(receiver, JSReceiver::kPropertiesOrHashOffset);
GotoIf(TaggedIsSmi(properties_or_hash), &if_smi);
- TNode<HeapObject> properties =
- TNode<HeapObject>::UncheckedCast(properties_or_hash);
+ TNode<HeapObject> properties = CAST(properties_or_hash);
TNode<Uint16T> properties_instance_type = LoadInstanceType(properties);
GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
&if_property_array);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- GotoIf(InstanceTypeEqual(properties_instance_type,
- ORDERED_NAME_DICTIONARY_TYPE),
- &if_ordered_property_dictionary);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ GotoIf(
+ InstanceTypeEqual(properties_instance_type, SWISS_NAME_DICTIONARY_TYPE),
+ &if_swiss_property_dictionary);
}
Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE),
&if_property_dictionary, &if_fixed_array);
@@ -1808,7 +1939,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
BIND(&if_smi);
{
- var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash));
+ var_hash = SmiUntag(CAST(properties_or_hash));
Goto(&done);
}
@@ -1816,15 +1947,14 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
{
TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
properties, PropertyArray::kLengthAndHashOffset);
- var_hash = TNode<IntPtrT>::UncheckedCast(
- DecodeWord<PropertyArray::HashField>(length_and_hash));
+ var_hash = Signed(DecodeWord<PropertyArray::HashField>(length_and_hash));
Goto(&done);
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- BIND(&if_ordered_property_dictionary);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ BIND(&if_swiss_property_dictionary);
{
- var_hash = SmiUntag(CAST(LoadFixedArrayElement(
- CAST(properties), OrderedNameDictionary::HashIndex())));
+ var_hash = Signed(
+ ChangeUint32ToWord(LoadSwissNameDictionaryHash(CAST(properties))));
Goto(&done);
}
}
@@ -2694,9 +2824,8 @@ TNode<BoolT> CodeStubAssembler::IsGeneratorFunction(
TNode<BoolT> CodeStubAssembler::IsJSFunctionWithPrototypeSlot(
TNode<HeapObject> object) {
// Only JSFunction maps may have HasPrototypeSlotBit set.
- return TNode<BoolT>::UncheckedCast(
- IsSetWord32<Map::Bits1::HasPrototypeSlotBit>(
- LoadMapBitField(LoadMap(object))));
+ return IsSetWord32<Map::Bits1::HasPrototypeSlotBit>(
+ LoadMapBitField(LoadMap(object)));
}
void CodeStubAssembler::BranchIfHasPrototypeProperty(
@@ -2784,9 +2913,19 @@ void CodeStubAssembler::StoreHeapNumberValue(TNode<HeapNumber> object,
}
void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object, int offset,
+ TNode<Smi> value) {
+ StoreObjectFieldNoWriteBarrier(object, offset, value);
+}
+
+void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<Smi> value) {
+ StoreObjectFieldNoWriteBarrier(object, offset, value);
+}
+
+void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object, int offset,
TNode<Object> value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
-
OptimizedStoreField(MachineRepresentation::kTagged,
UncheckedCast<HeapObject>(object), offset, value);
}
@@ -2867,7 +3006,7 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
[=] {
TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
object, PropertyArray::kLengthAndHashOffset);
- return TNode<IntPtrT>::UncheckedCast(
+ return Signed(
DecodeWord<PropertyArray::LengthField>(length_and_hash));
},
[=] {
@@ -3109,7 +3248,7 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
- SloppyTNode<Float64T> value) {
+ TNode<Float64T> value) {
TNode<HeapNumber> result = AllocateHeapNumber();
StoreHeapNumberValue(result, value);
return result;
@@ -3205,8 +3344,33 @@ TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
return LoadObjectField<UintPtrT>(bigint, offset);
}
+TNode<ByteArray> CodeStubAssembler::AllocateNonEmptyByteArray(
+ TNode<UintPtrT> length, AllocationFlags flags) {
+ CSA_ASSERT(this, WordNotEqual(length, IntPtrConstant(0)));
+
+ Comment("AllocateNonEmptyByteArray");
+ TVARIABLE(Object, var_result);
+
+ TNode<IntPtrT> raw_size =
+ GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS,
+ ByteArray::kHeaderSize + kObjectAlignmentMask);
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+
+ TNode<HeapObject> result = Allocate(size, flags);
+
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kByteArrayMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kByteArrayMap);
+ StoreObjectFieldNoWriteBarrier(result, ByteArray::kLengthOffset,
+ SmiTag(Signed(length)));
+
+ return CAST(result);
+}
+
TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
AllocationFlags flags) {
+ // TODO(ishell): unify with AllocateNonEmptyByteArray().
+
Comment("AllocateByteArray");
TVARIABLE(Object, var_result);
@@ -3584,7 +3748,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
} else {
CSA_ASSERT(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties),
IsNameDictionary(*properties)),
- IsOrderedNameDictionary(*properties)),
+ IsSwissNameDictionary(*properties)),
IsEmptyFixedArray(*properties)));
StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOrHashOffset,
*properties);
@@ -5423,8 +5587,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(TNode<Float64T> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
- SloppyTNode<Int32T> value) {
+TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(TNode<Int32T> value) {
if (SmiValuesAre32Bits()) {
return SmiTag(ChangeInt32ToIntPtr(value));
}
@@ -5454,8 +5617,7 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
return var_result.value();
}
-TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
- SloppyTNode<Uint32T> value) {
+TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(TNode<Uint32T> value) {
Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
if_join(this);
TVARIABLE(Number, var_result);
@@ -5507,6 +5669,10 @@ TNode<Number> CodeStubAssembler::ChangeUintPtrToTagged(TNode<UintPtrT> value) {
return var_result.value();
}
+TNode<Int32T> CodeStubAssembler::ChangeBoolToInt32(TNode<BoolT> b) {
+ return UncheckedCast<Int32T>(b);
+}
+
TNode<String> CodeStubAssembler::ToThisString(TNode<Context> context,
TNode<Object> value,
TNode<String> method_name) {
@@ -6402,6 +6568,11 @@ TNode<BoolT> CodeStubAssembler::IsOrderedNameDictionary(
return HasInstanceType(object, ORDERED_NAME_DICTIONARY_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsSwissNameDictionary(
+ TNode<HeapObject> object) {
+ return HasInstanceType(object, SWISS_NAME_DICTIONARY_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, GLOBAL_DICTIONARY_TYPE);
}
@@ -7858,6 +8029,108 @@ TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByDescriptorEntry(
DescriptorArray::ToValueIndex(0) * kTaggedSize);
}
+// Loads the value for the entry with the given key_index.
+// Returns a tagged value.
+template <class ContainerType>
+TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
+ TNode<ContainerType> container, TNode<IntPtrT> key_index) {
+ static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
+ "Use the non-templatized version for DescriptorArray");
+ const int kKeyToValueOffset =
+ (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+ kTaggedSize;
+ return LoadFixedArrayElement(container, key_index, kKeyToValueOffset);
+}
+
+template <>
+V8_EXPORT_PRIVATE TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
+ TNode<SwissNameDictionary> container, TNode<IntPtrT> key_index) {
+ TNode<IntPtrT> offset_minus_tag = SwissNameDictionaryOffsetIntoDataTableMT(
+ container, key_index, SwissNameDictionary::kDataTableValueEntryIndex);
+
+ return Load<Object>(container, offset_minus_tag);
+}
+
+template <class ContainerType>
+TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
+ TNode<ContainerType> container, TNode<IntPtrT> key_index) {
+ static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
+ "Use the non-templatized version for DescriptorArray");
+ const int kKeyToDetailsOffset =
+ (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+ kTaggedSize;
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(container, key_index,
+ kKeyToDetailsOffset));
+}
+
+template <>
+V8_EXPORT_PRIVATE TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
+ TNode<SwissNameDictionary> container, TNode<IntPtrT> key_index) {
+ TNode<IntPtrT> capacity =
+ ChangeInt32ToIntPtr(LoadSwissNameDictionaryCapacity(container));
+ return LoadSwissNameDictionaryPropertyDetails(container, capacity, key_index);
+}
+
+// Stores the details for the entry with the given key_index.
+// |details| must be a Smi.
+template <class ContainerType>
+void CodeStubAssembler::StoreDetailsByKeyIndex(TNode<ContainerType> container,
+ TNode<IntPtrT> key_index,
+ TNode<Smi> details) {
+ const int kKeyToDetailsOffset =
+ (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+ kTaggedSize;
+ StoreFixedArrayElement(container, key_index, details, kKeyToDetailsOffset);
+}
+
+template <>
+V8_EXPORT_PRIVATE void CodeStubAssembler::StoreDetailsByKeyIndex(
+ TNode<SwissNameDictionary> container, TNode<IntPtrT> key_index,
+ TNode<Smi> details) {
+ TNode<IntPtrT> capacity =
+ ChangeInt32ToIntPtr(LoadSwissNameDictionaryCapacity(container));
+ TNode<Uint8T> details_byte = UncheckedCast<Uint8T>(SmiToInt32(details));
+ StoreSwissNameDictionaryPropertyDetails(container, capacity, key_index,
+ details_byte);
+}
+
+// Stores the value for the entry with the given key_index.
+template <class ContainerType>
+void CodeStubAssembler::StoreValueByKeyIndex(TNode<ContainerType> container,
+ TNode<IntPtrT> key_index,
+ TNode<Object> value,
+ WriteBarrierMode write_barrier) {
+ const int kKeyToValueOffset =
+ (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+ kTaggedSize;
+ StoreFixedArrayElement(container, key_index, value, write_barrier,
+ kKeyToValueOffset);
+}
+
+template <>
+V8_EXPORT_PRIVATE void CodeStubAssembler::StoreValueByKeyIndex(
+ TNode<SwissNameDictionary> container, TNode<IntPtrT> key_index,
+ TNode<Object> value, WriteBarrierMode write_barrier) {
+ TNode<IntPtrT> offset_minus_tag = SwissNameDictionaryOffsetIntoDataTableMT(
+ container, key_index, SwissNameDictionary::kDataTableValueEntryIndex);
+
+ StoreToObjectWriteBarrier mode;
+ switch (write_barrier) {
+ case UNSAFE_SKIP_WRITE_BARRIER:
+ case SKIP_WRITE_BARRIER:
+ mode = StoreToObjectWriteBarrier::kNone;
+ break;
+ case UPDATE_WRITE_BARRIER:
+ mode = StoreToObjectWriteBarrier::kFull;
+ break;
+ default:
+ // We shouldn't see anything else.
+ UNREACHABLE();
+ }
+ StoreToObject(MachineRepresentation::kTagged, container, offset_minus_tag,
+ value, mode);
+}
+
template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::EntryToIndex<NameDictionary>(TNode<IntPtrT>, int);
template V8_EXPORT_PRIVATE TNode<IntPtrT>
@@ -7865,6 +8138,19 @@ CodeStubAssembler::EntryToIndex<GlobalDictionary>(TNode<IntPtrT>, int);
template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::EntryToIndex<NumberDictionary>(TNode<IntPtrT>, int);
+template TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
+ TNode<NameDictionary> container, TNode<IntPtrT> key_index);
+template TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
+ TNode<GlobalDictionary> container, TNode<IntPtrT> key_index);
+template TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
+ TNode<NameDictionary> container, TNode<IntPtrT> key_index);
+template void CodeStubAssembler::StoreDetailsByKeyIndex(
+ TNode<NameDictionary> container, TNode<IntPtrT> key_index,
+ TNode<Smi> details);
+template void CodeStubAssembler::StoreValueByKeyIndex(
+ TNode<NameDictionary> container, TNode<IntPtrT> key_index,
+ TNode<Object> value, WriteBarrierMode write_barrier);
+
// This must be kept in sync with HashTableBase::ComputeCapacity().
TNode<IntPtrT> CodeStubAssembler::HashTableComputeCapacity(
TNode<IntPtrT> at_least_space_for) {
@@ -8010,6 +8296,15 @@ TNode<Word32T> CodeStubAssembler::ComputeSeededHash(TNode<IntPtrT> key) {
std::make_pair(type_int32, TruncateIntPtrToInt32(key))));
}
+template <>
+void CodeStubAssembler::NameDictionaryLookup(
+ TNode<SwissNameDictionary> dictionary, TNode<Name> unique_name,
+ Label* if_found, TVariable<IntPtrT>* var_name_index, Label* if_not_found,
+ LookupMode mode) {
+ SwissNameDictionaryFindEntry(dictionary, unique_name, if_found,
+ var_name_index, if_not_found);
+}
+
void CodeStubAssembler::NumberDictionaryLookup(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* if_found, TVariable<IntPtrT>* var_entry, Label* if_not_found) {
@@ -8196,6 +8491,31 @@ void CodeStubAssembler::Add(TNode<Dictionary> dictionary, TNode<Name> key,
enum_index);
}
+template <>
+void CodeStubAssembler::Add(TNode<SwissNameDictionary> dictionary,
+ TNode<Name> key, TNode<Object> value,
+ Label* bailout) {
+ PropertyDetails d(kData, NONE,
+ PropertyDetails::kConstIfDictConstnessTracking);
+
+ PropertyDetails d_dont_enum(kData, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking);
+ TNode<Uint8T> details_byte_enum =
+ UncheckedCast<Uint8T>(Uint32Constant(d.ToByte()));
+ TNode<Uint8T> details_byte_dont_enum =
+ UncheckedCast<Uint8T>(Uint32Constant(d_dont_enum.ToByte()));
+
+ Label not_private(this);
+ TVARIABLE(Uint8T, var_details, details_byte_enum);
+
+ GotoIfNot(IsPrivateSymbol(key), &not_private);
+ var_details = details_byte_dont_enum;
+ Goto(&not_private);
+
+ BIND(&not_private);
+ SwissNameDictionaryAdd(dictionary, key, value, var_details.value(), bailout);
+}
+
template void CodeStubAssembler::Add<NameDictionary>(TNode<NameDictionary>,
TNode<Name>, TNode<Object>,
Label*);
@@ -8209,9 +8529,11 @@ TNode<Smi> CodeStubAssembler::GetNumberOfElements(
template <>
TNode<Smi> CodeStubAssembler::GetNumberOfElements(
- TNode<OrderedNameDictionary> dictionary) {
- return CAST(LoadFixedArrayElement(
- dictionary, OrderedNameDictionary::NumberOfElementsIndex()));
+ TNode<SwissNameDictionary> dictionary) {
+ TNode<IntPtrT> capacity =
+ ChangeInt32ToIntPtr(LoadSwissNameDictionaryCapacity(dictionary));
+ return SmiFromIntPtr(
+ LoadSwissNameDictionaryNumberOfElements(dictionary, capacity));
}
template TNode<Smi> CodeStubAssembler::GetNumberOfElements(
@@ -8528,13 +8850,8 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
}
BIND(&if_found_dict);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167, v8:11177) Only here due to SetDataProperties
- // workaround.
- GotoIf(Int32TrueConstant(), bailout);
- }
-
- TNode<NameDictionary> dictionary = CAST(var_meta_storage.value());
+ TNode<PropertyDictionary> dictionary =
+ CAST(var_meta_storage.value());
TNode<IntPtrT> entry = var_entry.value();
TNode<Uint32T> details = LoadDetailsByKeyIndex(dictionary, entry);
@@ -8544,7 +8861,8 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
&next_iteration);
var_details = details;
- var_value = LoadValueByKeyIndex<NameDictionary>(dictionary, entry);
+ var_value =
+ LoadValueByKeyIndex<PropertyDictionary>(dictionary, entry);
Goto(&if_found);
}
@@ -8733,16 +9051,11 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
BIND(&if_isslowmap);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167, v8:11177) Only here due to SetDataProperties workaround.
- GotoIf(Int32TrueConstant(), bailout);
- }
-
- TNode<NameDictionary> dictionary = CAST(LoadSlowProperties(object));
+ TNode<PropertyDictionary> dictionary = CAST(LoadSlowProperties(object));
*var_meta_storage = dictionary;
- NameDictionaryLookup<NameDictionary>(dictionary, unique_name, if_found_dict,
- var_name_index, if_not_found);
+ NameDictionaryLookup<PropertyDictionary>(
+ dictionary, unique_name, if_found_dict, var_name_index, if_not_found);
}
}
@@ -8966,8 +9279,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
Comment("] LoadPropertyFromFastObject");
}
-void CodeStubAssembler::LoadPropertyFromNameDictionary(
- TNode<NameDictionary> dictionary, TNode<IntPtrT> name_index,
+template <typename Dictionary>
+void CodeStubAssembler::LoadPropertyFromDictionary(
+ TNode<Dictionary> dictionary, TNode<IntPtrT> name_index,
TVariable<Uint32T>* var_details, TVariable<Object>* var_value) {
Comment("LoadPropertyFromNameDictionary");
*var_details = LoadDetailsByKeyIndex(dictionary, name_index);
@@ -8997,6 +9311,14 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(
Comment("] LoadPropertyFromGlobalDictionary");
}
+template void CodeStubAssembler::LoadPropertyFromDictionary(
+ TNode<NameDictionary> dictionary, TNode<IntPtrT> name_index,
+ TVariable<Uint32T>* var_details, TVariable<Object>* var_value);
+
+template void CodeStubAssembler::LoadPropertyFromDictionary(
+ TNode<SwissNameDictionary> dictionary, TNode<IntPtrT> name_index,
+ TVariable<Uint32T>* var_details, TVariable<Object>* var_value);
+
// |value| is the property backing store's contents, which is either a value or
// an accessor pair, as specified by |details|. |holder| is a JSObject or a
// PropertyCell (TODO: use UnionT). Returns either the original value, or the
@@ -9159,9 +9481,10 @@ void CodeStubAssembler::TryGetOwnProperty(
}
BIND(&if_found_dict);
{
- TNode<NameDictionary> dictionary = CAST(var_meta_storage.value());
+ TNode<PropertyDictionary> dictionary = CAST(var_meta_storage.value());
TNode<IntPtrT> entry = var_entry.value();
- LoadPropertyFromNameDictionary(dictionary, entry, var_details, var_value);
+ LoadPropertyFromDictionary(dictionary, entry, var_details, var_value);
+
Goto(&if_found);
}
BIND(&if_found_global);
@@ -9284,7 +9607,7 @@ void CodeStubAssembler::TryLookupElement(
{
// Negative and too-large keys must be converted to property names.
if (Is64()) {
- GotoIf(UintPtrLessThan(IntPtrConstant(JSArray::kMaxArrayIndex),
+ GotoIf(UintPtrLessThan(IntPtrConstant(JSObject::kMaxElementIndex),
intptr_index),
if_bailout);
} else {
@@ -9323,7 +9646,7 @@ void CodeStubAssembler::TryLookupElement(
// Positive OOB indices mean "not found", negative indices and indices
// out of array index range must be converted to property names.
if (Is64()) {
- GotoIf(UintPtrLessThan(IntPtrConstant(JSArray::kMaxArrayIndex),
+ GotoIf(UintPtrLessThan(IntPtrConstant(JSObject::kMaxElementIndex),
intptr_index),
if_bailout);
} else {
@@ -9803,7 +10126,7 @@ void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
}
void CodeStubAssembler::ReportFeedbackUpdate(
- TNode<FeedbackVector> feedback_vector, SloppyTNode<UintPtrT> slot_id,
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id,
const char* reason) {
// Reset profiler ticks.
StoreObjectFieldNoWriteBarrier(
@@ -9940,28 +10263,25 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
} // namespace
-template <typename TArray, typename TIndex>
-void CodeStubAssembler::StoreElementTypedArray(TNode<TArray> elements,
- ElementsKind kind,
- TNode<TIndex> index,
- Node* value) {
- // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
- static_assert(std::is_same<TIndex, Smi>::value ||
- std::is_same<TIndex, UintPtrT>::value ||
+// TODO(solanes): Since we can't use `if constexpr` until we enable C++17 we
+// have to specialize the BigInt and Word32T cases. Since we can't partly
+// specialize, we have to specialize all used combinations.
+template <typename TIndex>
+void CodeStubAssembler::StoreElementTypedArrayBigInt(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ TNode<BigInt> value) {
+ static_assert(std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
- "Only Smi, UintPtrT or IntPtrT index is allowed");
- static_assert(std::is_same<TArray, RawPtrT>::value ||
- std::is_same<TArray, FixedArrayBase>::value,
- "Only RawPtrT or FixedArrayBase elements are allowed");
- DCHECK(IsTypedArrayElementsKind(kind));
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
- TVARIABLE(UintPtrT, var_low);
- // Only used on 32-bit platforms.
- TVARIABLE(UintPtrT, var_high);
- BigIntToRawBytes(CAST(value), &var_low, &var_high);
-
- MachineRepresentation rep = WordT::kMachineRepresentation;
+ "Only UintPtrT or IntPtrT indices is allowed");
+ DCHECK(kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
+ TVARIABLE(UintPtrT, var_low);
+ // Only used on 32-bit platforms.
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(value, &var_low, &var_high);
+
+ MachineRepresentation rep = WordT::kMachineRepresentation;
#if defined(V8_TARGET_BIG_ENDIAN)
if (!Is64()) {
StoreNoWriteBarrier(rep, elements, offset, var_high.value());
@@ -9979,16 +10299,82 @@ void CodeStubAssembler::StoreElementTypedArray(TNode<TArray> elements,
var_high.value());
}
#endif
- } else {
- if (kind == UINT8_CLAMPED_ELEMENTS) {
- CSA_ASSERT(this, Word32Equal(UncheckedCast<Word32T>(value),
- Word32And(Int32Constant(0xFF), value)));
- }
- TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
- // TODO(cbruni): Add OOB check once typed.
- MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
- StoreNoWriteBarrier(rep, elements, offset, value);
+}
+
+template <>
+void CodeStubAssembler::StoreElementTypedArray(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<UintPtrT> index,
+ TNode<BigInt> value) {
+ StoreElementTypedArrayBigInt(elements, kind, index, value);
+}
+
+template <>
+void CodeStubAssembler::StoreElementTypedArray(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<IntPtrT> index,
+ TNode<BigInt> value) {
+ StoreElementTypedArrayBigInt(elements, kind, index, value);
+}
+
+template <typename TIndex>
+void CodeStubAssembler::StoreElementTypedArrayWord32(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ TNode<Word32T> value) {
+ static_assert(std::is_same<TIndex, UintPtrT>::value ||
+ std::is_same<TIndex, IntPtrT>::value,
+ "Only UintPtrT or IntPtrT indices is allowed");
+ DCHECK(IsTypedArrayElementsKind(kind));
+ if (kind == UINT8_CLAMPED_ELEMENTS) {
+ CSA_ASSERT(this, Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
}
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
+ // TODO(cbruni): Add OOB check once typed.
+ MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
+ StoreNoWriteBarrier(rep, elements, offset, value);
+}
+
+template <>
+void CodeStubAssembler::StoreElementTypedArray(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<UintPtrT> index,
+ TNode<Word32T> value) {
+ StoreElementTypedArrayWord32(elements, kind, index, value);
+}
+
+template <>
+void CodeStubAssembler::StoreElementTypedArray(TNode<RawPtrT> elements,
+ ElementsKind kind,
+ TNode<IntPtrT> index,
+ TNode<Word32T> value) {
+ StoreElementTypedArrayWord32(elements, kind, index, value);
+}
+
+template <typename TArray, typename TIndex, typename TValue>
+void CodeStubAssembler::StoreElementTypedArray(TNode<TArray> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ TNode<TValue> value) {
+ // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, UintPtrT>::value ||
+ std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi, UintPtrT or IntPtrT indices is allowed");
+ static_assert(std::is_same<TArray, RawPtrT>::value ||
+ std::is_same<TArray, FixedArrayBase>::value,
+ "Only RawPtrT or FixedArrayBase elements are allowed");
+ static_assert(std::is_same<TValue, Int32T>::value ||
+ std::is_same<TValue, Float32T>::value ||
+ std::is_same<TValue, Float64T>::value ||
+ std::is_same<TValue, Object>::value,
+ "Only Int32T, Float32T, Float64T or object value "
+ "types are allowed");
+ DCHECK(IsTypedArrayElementsKind(kind));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
+ // TODO(cbruni): Add OOB check once typed.
+ MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
+ StoreNoWriteBarrier(rep, elements, offset, value);
}
template <typename TIndex>
@@ -10020,14 +10406,41 @@ void CodeStubAssembler::StoreElement(TNode<FixedArrayBase> elements,
StoreFixedDoubleArrayElement(CAST(elements), index, value);
}
-template <typename TIndex>
+template <typename TIndex, typename TValue>
void CodeStubAssembler::StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
- TNode<TIndex> index, Node* value) {
+ TNode<TIndex> index, TNode<TValue> value) {
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, IntPtrT>::value ||
+ std::is_same<TIndex, UintPtrT>::value,
+ "Only Smi, IntPtrT or UintPtrT indices are allowed");
+ static_assert(
+ std::is_same<TValue, Int32T>::value ||
+ std::is_same<TValue, Word32T>::value ||
+ std::is_same<TValue, Float32T>::value ||
+ std::is_same<TValue, Float64T>::value ||
+ std::is_same<TValue, BigInt>::value,
+ "Only Int32T, Word32T, Float32T, Float64T or BigInt value types "
+ "are allowed");
+
DCHECK(IsTypedArrayElementsKind(kind));
StoreElementTypedArray(elements, kind, index, value);
}
-template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<UintPtrT>(
- TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, Node*);
+template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(TNode<RawPtrT>,
+ ElementsKind,
+ TNode<UintPtrT>,
+ TNode<Int32T>);
+template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(TNode<RawPtrT>,
+ ElementsKind,
+ TNode<UintPtrT>,
+ TNode<Word32T>);
+template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(
+ TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, TNode<Float32T>);
+template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(
+ TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, TNode<Float64T>);
+template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement(TNode<RawPtrT>,
+ ElementsKind,
+ TNode<UintPtrT>,
+ TNode<BigInt>);
TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped(
TNode<Int32T> int32_value) {
@@ -10231,60 +10644,6 @@ TNode<BigInt> CodeStubAssembler::PrepareValueForWriteToTypedArray<BigInt>(
return ToBigInt(context, input);
}
-template <>
-TNode<UntaggedT> CodeStubAssembler::PrepareValueForWriteToTypedArray(
- TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
- DCHECK(IsTypedArrayElementsKind(elements_kind));
-
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Word32T>(input, elements_kind,
- context);
- case FLOAT32_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Float32T>(input, elements_kind,
- context);
- case FLOAT64_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Float64T>(input, elements_kind,
- context);
- default:
- UNREACHABLE();
- }
-}
-
-Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
- TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
- DCHECK(IsTypedArrayElementsKind(elements_kind));
-
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Word32T>(input, elements_kind,
- context);
- case FLOAT32_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Float32T>(input, elements_kind,
- context);
- case FLOAT64_ELEMENTS:
- return PrepareValueForWriteToTypedArray<Float64T>(input, elements_kind,
- context);
- case BIGINT64_ELEMENTS:
- case BIGUINT64_ELEMENTS:
- return ToBigInt(context, input);
- default:
- UNREACHABLE();
- }
-}
-
void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
TVariable<UintPtrT>* var_low,
TVariable<UintPtrT>* var_high) {
@@ -10318,6 +10677,134 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
BIND(&done);
}
+template <>
+void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue(
+ TNode<Object> value, ElementsKind elements_kind,
+ TNode<Word32T> converted_value, TVariable<Object>* maybe_converted_value) {
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ *maybe_converted_value =
+ SmiFromInt32(UncheckedCast<Int32T>(converted_value));
+ break;
+ case UINT32_ELEMENTS:
+ *maybe_converted_value =
+ ChangeUint32ToTagged(UncheckedCast<Uint32T>(converted_value));
+ break;
+ case INT32_ELEMENTS:
+ *maybe_converted_value =
+ ChangeInt32ToTagged(UncheckedCast<Int32T>(converted_value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <>
+void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue(
+ TNode<Object> value, ElementsKind elements_kind,
+ TNode<Float32T> converted_value, TVariable<Object>* maybe_converted_value) {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(CAST(value)), &dont_allocate_heap_number);
+ {
+ *maybe_converted_value =
+ AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(converted_value));
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ *maybe_converted_value = value;
+ Goto(&end);
+ }
+ BIND(&end);
+}
+
+template <>
+void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue(
+ TNode<Object> value, ElementsKind elements_kind,
+ TNode<Float64T> converted_value, TVariable<Object>* maybe_converted_value) {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(CAST(value)), &dont_allocate_heap_number);
+ {
+ *maybe_converted_value = AllocateHeapNumberWithValue(converted_value);
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ *maybe_converted_value = value;
+ Goto(&end);
+ }
+ BIND(&end);
+}
+
+template <>
+void CodeStubAssembler::EmitElementStoreTypedArrayUpdateValue(
+ TNode<Object> value, ElementsKind elements_kind,
+ TNode<BigInt> converted_value, TVariable<Object>* maybe_converted_value) {
+ *maybe_converted_value = converted_value;
+}
+
+template <typename TValue>
+void CodeStubAssembler::EmitElementStoreTypedArray(
+ TNode<JSTypedArray> typed_array, TNode<IntPtrT> key, TNode<Object> value,
+ ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout,
+ TNode<Context> context, TVariable<Object>* maybe_converted_value) {
+ Label done(this), update_value_and_bailout(this, Label::kDeferred);
+
+ TNode<TValue> converted_value =
+ PrepareValueForWriteToTypedArray<TValue>(value, elements_kind, context);
+
+ // There must be no allocations between the buffer load and
+ // and the actual store to backing store, because GC may decide that
+ // the buffer is not alive or move the elements.
+ // TODO(ishell): introduce DisallowGarbageCollectionCode scope here.
+
+ // Check if buffer has been detached.
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
+ if (maybe_converted_value) {
+ GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout);
+ } else {
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+ }
+
+ // Bounds check.
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(typed_array);
+
+ if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ // Skip the store if we write beyond the length or
+ // to a property with a negative integer index.
+ GotoIfNot(UintPtrLessThan(key, length), &done);
+ } else {
+ DCHECK_EQ(store_mode, STANDARD_STORE);
+ GotoIfNot(UintPtrLessThan(key, length), &update_value_and_bailout);
+ }
+
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ StoreElement(data_ptr, elements_kind, key, converted_value);
+ Goto(&done);
+
+ BIND(&update_value_and_bailout);
+ // We already prepared the incoming value for storing into a typed array.
+ // This might involve calling ToNumber in some cases. We shouldn't call
+ // ToNumber again in the runtime so pass the converted value to the runtime.
+ // The prepared value is an untagged value. Convert it to a tagged value
+ // to pass it to runtime. It is not possible to do the detached buffer check
+ // before we prepare the value, since ToNumber can detach the ArrayBuffer.
+ // The spec specifies the order of these operations.
+ if (maybe_converted_value != nullptr) {
+ EmitElementStoreTypedArrayUpdateValue(value, elements_kind, converted_value,
+ maybe_converted_value);
+ }
+ Goto(bailout);
+
+ BIND(&done);
+}
+
void CodeStubAssembler::EmitElementStore(
TNode<JSObject> object, TNode<Object> key, TNode<Object> value,
ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout,
@@ -10339,111 +10826,38 @@ void CodeStubAssembler::EmitElementStore(
// TODO(rmcilroy): TNodify the converted value once this funciton and
// StoreElement are templated based on the type elements_kind type.
if (IsTypedArrayElementsKind(elements_kind)) {
- Label done(this), update_value_and_bailout(this, Label::kDeferred);
-
- // IntegerIndexedElementSet converts value to a Number/BigInt prior to the
- // bounds check.
- Node* converted_value =
- PrepareValueForWriteToTypedArray(value, elements_kind, context);
TNode<JSTypedArray> typed_array = CAST(object);
-
- // There must be no allocations between the buffer load and
- // and the actual store to backing store, because GC may decide that
- // the buffer is not alive or move the elements.
- // TODO(ishell): introduce DisallowGarbageCollectionCode scope here.
-
- // Check if buffer has been detached.
- TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
- if (maybe_converted_value) {
- GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout);
- } else {
- GotoIf(IsDetachedBuffer(buffer), bailout);
- }
-
- // Bounds check.
- TNode<UintPtrT> length = LoadJSTypedArrayLength(typed_array);
-
- if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
- // Skip the store if we write beyond the length or
- // to a property with a negative integer index.
- GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
- } else {
- DCHECK_EQ(store_mode, STANDARD_STORE);
- GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout);
- }
-
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
- StoreElement(data_ptr, elements_kind, intptr_key, converted_value);
- Goto(&done);
-
- BIND(&update_value_and_bailout);
- // We already prepared the incoming value for storing into a typed array.
- // This might involve calling ToNumber in some cases. We shouldn't call
- // ToNumber again in the runtime so pass the converted value to the runtime.
- // The prepared value is an untagged value. Convert it to a tagged value
- // to pass it to runtime. It is not possible to do the detached buffer check
- // before we prepare the value, since ToNumber can detach the ArrayBuffer.
- // The spec specifies the order of these operations.
- if (maybe_converted_value != nullptr) {
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- *maybe_converted_value = SmiFromInt32(converted_value);
- break;
- case UINT32_ELEMENTS:
- *maybe_converted_value = ChangeUint32ToTagged(converted_value);
- break;
- case INT32_ELEMENTS:
- *maybe_converted_value = ChangeInt32ToTagged(converted_value);
- break;
- case FLOAT32_ELEMENTS: {
- Label dont_allocate_heap_number(this), end(this);
- GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
- GotoIf(IsHeapNumber(CAST(value)), &dont_allocate_heap_number);
- {
- *maybe_converted_value = AllocateHeapNumberWithValue(
- ChangeFloat32ToFloat64(converted_value));
- Goto(&end);
- }
- BIND(&dont_allocate_heap_number);
- {
- *maybe_converted_value = value;
- Goto(&end);
- }
- BIND(&end);
- break;
- }
- case FLOAT64_ELEMENTS: {
- Label dont_allocate_heap_number(this), end(this);
- GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
- GotoIf(IsHeapNumber(CAST(value)), &dont_allocate_heap_number);
- {
- *maybe_converted_value =
- AllocateHeapNumberWithValue(converted_value);
- Goto(&end);
- }
- BIND(&dont_allocate_heap_number);
- {
- *maybe_converted_value = value;
- Goto(&end);
- }
- BIND(&end);
- break;
- }
- case BIGINT64_ELEMENTS:
- case BIGUINT64_ELEMENTS:
- *maybe_converted_value = CAST(converted_value);
- break;
- default:
- UNREACHABLE();
- }
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ EmitElementStoreTypedArray<Word32T>(typed_array, intptr_key, value,
+ elements_kind, store_mode, bailout,
+ context, maybe_converted_value);
+ break;
+ case FLOAT32_ELEMENTS:
+ EmitElementStoreTypedArray<Float32T>(typed_array, intptr_key, value,
+ elements_kind, store_mode, bailout,
+ context, maybe_converted_value);
+ break;
+ case FLOAT64_ELEMENTS:
+ EmitElementStoreTypedArray<Float64T>(typed_array, intptr_key, value,
+ elements_kind, store_mode, bailout,
+ context, maybe_converted_value);
+ break;
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ EmitElementStoreTypedArray<BigInt>(typed_array, intptr_key, value,
+ elements_kind, store_mode, bailout,
+ context, maybe_converted_value);
+ break;
+ default:
+ UNREACHABLE();
}
- Goto(bailout);
-
- BIND(&done);
return;
}
DCHECK(IsFastElementsKind(elements_kind) ||
@@ -12559,11 +12973,6 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this), if_proxy(this, Label::kDeferred);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), &call_runtime);
- }
-
CodeStubAssembler::LookupPropertyInHolder lookup_property_in_holder =
[this, &return_true](
TNode<HeapObject> receiver, TNode<HeapObject> holder,
@@ -13244,8 +13653,7 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
- return assembler_->UncheckedCast<Object>(
- assembler_->LoadFullTagged(AtIndexPtr(index)));
+ return assembler_->LoadFullTagged(AtIndexPtr(index));
}
TNode<Object> CodeStubArguments::AtIndex(int index) const {
@@ -13391,56 +13799,35 @@ TNode<BoolT> CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
+TNode<BoolT> CodeStubAssembler::IsPromiseHookEnabled() {
+ const TNode<RawPtrT> promise_hook = Load<RawPtrT>(
+ ExternalConstant(ExternalReference::promise_hook_address(isolate())));
+ return WordNotEqual(promise_hook, IntPtrConstant(0));
+}
+
TNode<BoolT> CodeStubAssembler::HasAsyncEventDelegate() {
const TNode<RawPtrT> async_event_delegate = Load<RawPtrT>(ExternalConstant(
ExternalReference::async_event_delegate_address(isolate())));
return WordNotEqual(async_event_delegate, IntPtrConstant(0));
}
-TNode<Uint32T> CodeStubAssembler::PromiseHookFlags() {
- return Load<Uint32T>(ExternalConstant(
- ExternalReference::promise_hook_flags_address(isolate())));
-}
-
-TNode<BoolT> CodeStubAssembler::IsAnyPromiseHookEnabled(TNode<Uint32T> flags) {
- uint32_t mask = Isolate::PromiseHookFields::HasContextPromiseHook::kMask |
- Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask;
- return IsSetWord32(flags, mask);
-}
-
-TNode<BoolT> CodeStubAssembler::IsContextPromiseHookEnabled(
- TNode<Uint32T> flags) {
- return IsSetWord32<Isolate::PromiseHookFields::HasContextPromiseHook>(flags);
-}
-
-TNode<BoolT> CodeStubAssembler::
- IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(TNode<Uint32T> flags) {
- uint32_t mask = Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
- Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask;
- return IsSetWord32(flags, mask);
+TNode<BoolT> CodeStubAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate() {
+ const TNode<Uint8T> promise_hook_or_async_event_delegate =
+ Load<Uint8T>(ExternalConstant(
+ ExternalReference::promise_hook_or_async_event_delegate_address(
+ isolate())));
+ return Word32NotEqual(promise_hook_or_async_event_delegate, Int32Constant(0));
}
TNode<BoolT> CodeStubAssembler::
- IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags) {
- uint32_t mask = Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
- Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask |
- Isolate::PromiseHookFields::IsDebugActive::kMask;
- return IsSetWord32(flags, mask);
-}
-
-TNode<BoolT> CodeStubAssembler::
- IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags) {
- return Word32NotEqual(flags, Int32Constant(0));
-}
-
-TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
- uint32_t mask = Isolate::PromiseHookFields::HasContextPromiseHook::kMask |
- Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
- Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask |
- Isolate::PromiseHookFields::IsDebugActive::kMask;
- return IsSetWord32(flags, mask);
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
+ const TNode<Uint8T> promise_hook_or_debug_is_active_or_async_event_delegate =
+ Load<Uint8T>(ExternalConstant(
+ ExternalReference::
+ promise_hook_or_debug_is_active_or_async_event_delegate_address(
+ isolate())));
+ return Word32NotEqual(promise_hook_or_debug_is_active_or_async_event_delegate,
+ Int32Constant(0));
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
@@ -13449,14 +13836,14 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
- return CAST(BitcastWordToTagged(
- Load(MachineType::Pointer(),
- ExternalConstant(ExternalReference::builtins_address(isolate())),
- offset)));
+ return CAST(BitcastWordToTagged(Load<RawPtrT>(
+ ExternalConstant(ExternalReference::builtins_address(isolate())),
+ offset)));
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
- TNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
+ TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
+ Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
@@ -13467,6 +13854,9 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsSmi: Is builtin
GotoIf(TaggedIsNotSmi(sfi_data), &check_instance_type);
+ if (data_type_out) {
+ *data_type_out = Uint16Constant(0);
+ }
if (if_compile_lazy) {
GotoIf(SmiEqual(CAST(sfi_data), SmiConstant(Builtins::kCompileLazy)),
if_compile_lazy);
@@ -13477,16 +13867,23 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// Switch on data's instance type.
BIND(&check_instance_type);
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
-
- int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
- BASELINE_DATA_TYPE,
- WASM_EXPORTED_FUNCTION_DATA_TYPE,
- ASM_WASM_DATA_TYPE,
- UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
- UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE,
- WASM_JS_FUNCTION_DATA_TYPE,
- WASM_CAPI_FUNCTION_DATA_TYPE};
+ if (data_type_out) {
+ *data_type_out = data_type;
+ }
+
+ int32_t case_values[] = {
+ BYTECODE_ARRAY_TYPE,
+ BASELINE_DATA_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
+ FUNCTION_TEMPLATE_INFO_TYPE,
+#if V8_ENABLE_WEBASSEMBLY
+ WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ ASM_WASM_DATA_TYPE,
+ WASM_JS_FUNCTION_DATA_TYPE,
+ WASM_CAPI_FUNCTION_DATA_TYPE,
+#endif // V8_ENABLE_WEBASSEMBLY
+ };
Label check_is_bytecode_array(this);
Label check_is_baseline_data(this);
Label check_is_exported_function_data(this);
@@ -13497,15 +13894,19 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_interpreter_data(this);
Label check_is_wasm_js_function_data(this);
Label check_is_wasm_capi_function_data(this);
- Label* case_labels[] = {&check_is_bytecode_array,
- &check_is_baseline_data,
- &check_is_exported_function_data,
- &check_is_asm_wasm_data,
- &check_is_uncompiled_data_without_preparse_data,
- &check_is_uncompiled_data_with_preparse_data,
- &check_is_function_template_info,
- &check_is_wasm_js_function_data,
- &check_is_wasm_capi_function_data};
+ Label* case_labels[] = {
+ &check_is_bytecode_array,
+ &check_is_baseline_data,
+ &check_is_uncompiled_data_without_preparse_data,
+ &check_is_uncompiled_data_with_preparse_data,
+ &check_is_function_template_info,
+#if V8_ENABLE_WEBASSEMBLY
+ &check_is_exported_function_data,
+ &check_is_asm_wasm_data,
+ &check_is_wasm_js_function_data,
+ &check_is_wasm_capi_function_data
+#endif // V8_ENABLE_WEBASSEMBLY
+ };
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
Switch(data_type, &check_is_interpreter_data, case_values, case_labels,
arraysize(case_labels));
@@ -13523,17 +13924,6 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
sfi_code = baseline_code;
Goto(&done);
- // IsWasmExportedFunctionData: Use the wrapper code
- BIND(&check_is_exported_function_data);
- sfi_code = CAST(LoadObjectField(
- CAST(sfi_data), WasmExportedFunctionData::kWrapperCodeOffset));
- Goto(&done);
-
- // IsAsmWasmData: Instantiate using AsmWasmData
- BIND(&check_is_asm_wasm_data);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
- Goto(&done);
-
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
BIND(&check_is_uncompiled_data_with_preparse_data);
@@ -13556,6 +13946,18 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset));
Goto(&done);
+#if V8_ENABLE_WEBASSEMBLY
+ // IsWasmExportedFunctionData: Use the wrapper code
+ BIND(&check_is_exported_function_data);
+ sfi_code = CAST(LoadObjectField(
+ CAST(sfi_data), WasmExportedFunctionData::kWrapperCodeOffset));
+ Goto(&done);
+
+ // IsAsmWasmData: Instantiate using AsmWasmData
+ BIND(&check_is_asm_wasm_data);
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
+ Goto(&done);
+
// IsWasmJSFunctionData: Use the wrapper code.
BIND(&check_is_wasm_js_function_data);
sfi_code = CAST(
@@ -13567,6 +13969,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
sfi_code = CAST(LoadObjectField(CAST(sfi_data),
WasmCapiFunctionData::kWrapperCodeOffset));
Goto(&done);
+#endif // V8_ENABLE_WEBASSEMBLY
BIND(&done);
return sfi_code.value();
@@ -13661,15 +14064,15 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
TNode<Smi> length;
TNode<HeapObject> properties = LoadSlowProperties(receiver);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- CSA_ASSERT(this, Word32Or(IsOrderedNameDictionary(properties),
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(properties),
IsGlobalDictionary(properties)));
length = Select<Smi>(
- IsOrderedNameDictionary(properties),
+ IsSwissNameDictionary(properties),
[=] {
return GetNumberOfElements(
- UncheckedCast<OrderedNameDictionary>(properties));
+ UncheckedCast<SwissNameDictionary>(properties));
},
[=] {
return GetNumberOfElements(
@@ -14039,5 +14442,729 @@ void PrototypeCheckAssembler::CheckAndBranch(TNode<HeapObject> prototype,
}
}
+//
+// Begin of SwissNameDictionary macros
+//
+
+namespace {
+
+// Provides load and store functions that abstract over the details of accessing
+// the meta table in memory. Instead they allow using logical indices that are
+// independent from the underlying entry size in the meta table of a
+// SwissNameDictionary.
+class MetaTableAccessor {
+ public:
+ MetaTableAccessor(CodeStubAssembler& csa, MachineType mt)
+ : csa{csa}, mt{mt} {}
+
+ TNode<Uint32T> Load(TNode<ByteArray> meta_table, TNode<IntPtrT> index) {
+ TNode<IntPtrT> offset = OverallOffset(meta_table, index);
+
+ return csa.UncheckedCast<Uint32T>(
+ csa.LoadFromObject(mt, meta_table, offset));
+ }
+
+ TNode<Uint32T> Load(TNode<ByteArray> meta_table, int index) {
+ return Load(meta_table, csa.IntPtrConstant(index));
+ }
+
+ void Store(TNode<ByteArray> meta_table, TNode<IntPtrT> index,
+ TNode<Uint32T> data) {
+ TNode<IntPtrT> offset = OverallOffset(meta_table, index);
+
+#ifdef DEBUG
+ int bits = mt.MemSize() * 8;
+ TNode<UintPtrT> max_value = csa.UintPtrConstant((1ULL << bits) - 1);
+
+ CSA_ASSERT(&csa, csa.UintPtrLessThanOrEqual(csa.ChangeUint32ToWord(data),
+ max_value));
+#endif
+
+ csa.StoreToObject(mt.representation(), meta_table, offset, data,
+ StoreToObjectWriteBarrier::kNone);
+ }
+
+ void Store(TNode<ByteArray> meta_table, int index, TNode<Uint32T> data) {
+ Store(meta_table, csa.IntPtrConstant(index), data);
+ }
+
+ private:
+ TNode<IntPtrT> OverallOffset(TNode<ByteArray> meta_table,
+ TNode<IntPtrT> index) {
+ // TODO(v8:11330): consider using ElementOffsetFromIndex().
+
+ int offset_to_data_minus_tag = ByteArray::kHeaderSize - kHeapObjectTag;
+
+ TNode<IntPtrT> overall_offset;
+ int size = mt.MemSize();
+ intptr_t constant;
+ if (csa.TryToIntPtrConstant(index, &constant)) {
+ intptr_t index_offset = constant * size;
+ overall_offset =
+ csa.IntPtrConstant(offset_to_data_minus_tag + index_offset);
+ } else {
+ TNode<IntPtrT> index_offset =
+ csa.IntPtrMul(index, csa.IntPtrConstant(size));
+ overall_offset = csa.IntPtrAdd(
+ csa.IntPtrConstant(offset_to_data_minus_tag), index_offset);
+ }
+
+#ifdef DEBUG
+ TNode<IntPtrT> byte_array_data_bytes =
+ csa.SmiToIntPtr(csa.LoadFixedArrayBaseLength(meta_table));
+ TNode<IntPtrT> max_allowed_offset = csa.IntPtrAdd(
+ byte_array_data_bytes, csa.IntPtrConstant(offset_to_data_minus_tag));
+ CSA_ASSERT(&csa, csa.UintPtrLessThan(overall_offset, max_allowed_offset));
+#endif
+
+ return overall_offset;
+ }
+
+ CodeStubAssembler& csa;
+ MachineType mt;
+};
+
+// Type of functions that given a MetaTableAccessor, use its load and store
+// functions to generate code for operating on the meta table.
+using MetaTableAccessFunction = std::function<void(MetaTableAccessor&)>;
+
+// Helper function for macros operating on the meta table of a
+// SwissNameDictionary. Given a MetaTableAccessFunction, generates branching
+// code and uses the builder to generate code for each of the three possible
+// sizes per entry a meta table can have.
+void GenerateMetaTableAccess(CodeStubAssembler* csa, TNode<IntPtrT> capacity,
+ MetaTableAccessFunction builder) {
+ MetaTableAccessor mta8 = MetaTableAccessor(*csa, MachineType::Uint8());
+ MetaTableAccessor mta16 = MetaTableAccessor(*csa, MachineType::Uint16());
+ MetaTableAccessor mta32 = MetaTableAccessor(*csa, MachineType::Uint32());
+
+ using Label = compiler::CodeAssemblerLabel;
+ Label small(csa), medium(csa), done(csa);
+
+ csa->GotoIf(
+ csa->IntPtrLessThanOrEqual(
+ capacity,
+ csa->IntPtrConstant(SwissNameDictionary::kMax1ByteMetaTableCapacity)),
+ &small);
+ csa->GotoIf(
+ csa->IntPtrLessThanOrEqual(
+ capacity,
+ csa->IntPtrConstant(SwissNameDictionary::kMax2ByteMetaTableCapacity)),
+ &medium);
+
+ builder(mta32);
+ csa->Goto(&done);
+
+ csa->Bind(&medium);
+ builder(mta16);
+ csa->Goto(&done);
+
+ csa->Bind(&small);
+ builder(mta8);
+ csa->Goto(&done);
+ csa->Bind(&done);
+}
+
+} // namespace
+
+TNode<IntPtrT> CodeStubAssembler::LoadSwissNameDictionaryNumberOfElements(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity) {
+ TNode<ByteArray> meta_table = LoadSwissNameDictionaryMetaTable(table);
+
+ TVARIABLE(Uint32T, nof, Uint32Constant(0));
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ nof = mta.Load(meta_table,
+ SwissNameDictionary::kMetaTableElementCountFieldIndex);
+ };
+
+ GenerateMetaTableAccess(this, capacity, builder);
+ return ChangeInt32ToIntPtr(nof.value());
+}
+
+TNode<IntPtrT>
+CodeStubAssembler::LoadSwissNameDictionaryNumberOfDeletedElements(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity) {
+ TNode<ByteArray> meta_table = LoadSwissNameDictionaryMetaTable(table);
+
+ TVARIABLE(Uint32T, nod, Uint32Constant(0));
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ nod =
+ mta.Load(meta_table,
+ SwissNameDictionary::kMetaTableDeletedElementCountFieldIndex);
+ };
+
+ GenerateMetaTableAccess(this, capacity, builder);
+ return ChangeInt32ToIntPtr(nod.value());
+}
+
+void CodeStubAssembler::StoreSwissNameDictionaryEnumToEntryMapping(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> enum_index, TNode<Int32T> entry) {
+ TNode<ByteArray> meta_table = LoadSwissNameDictionaryMetaTable(table);
+ TNode<IntPtrT> meta_table_index = IntPtrAdd(
+ IntPtrConstant(SwissNameDictionary::kMetaTableEnumerationDataStartIndex),
+ enum_index);
+
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ mta.Store(meta_table, meta_table_index, Unsigned(entry));
+ };
+
+ GenerateMetaTableAccess(this, capacity, builder);
+}
+
+TNode<Uint32T>
+CodeStubAssembler::SwissNameDictionaryIncreaseElementCountOrBailout(
+ TNode<ByteArray> meta_table, TNode<IntPtrT> capacity,
+ TNode<Uint32T> max_usable_capacity, Label* bailout) {
+ TVARIABLE(Uint32T, used_var, Uint32Constant(0));
+
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ TNode<Uint32T> nof = mta.Load(
+ meta_table, SwissNameDictionary::kMetaTableElementCountFieldIndex);
+ TNode<Uint32T> nod =
+ mta.Load(meta_table,
+ SwissNameDictionary::kMetaTableDeletedElementCountFieldIndex);
+ TNode<Uint32T> used = Uint32Add(nof, nod);
+ GotoIf(Uint32GreaterThanOrEqual(used, max_usable_capacity), bailout);
+ TNode<Uint32T> inc_nof = Uint32Add(nof, Uint32Constant(1));
+ mta.Store(meta_table, SwissNameDictionary::kMetaTableElementCountFieldIndex,
+ inc_nof);
+ used_var = used;
+ };
+
+ GenerateMetaTableAccess(this, capacity, builder);
+ return used_var.value();
+}
+
+TNode<Uint32T> CodeStubAssembler::SwissNameDictionaryUpdateCountsForDeletion(
+ TNode<ByteArray> meta_table, TNode<IntPtrT> capacity) {
+ TVARIABLE(Uint32T, new_nof_var, Uint32Constant(0));
+
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ TNode<Uint32T> nof = mta.Load(
+ meta_table, SwissNameDictionary::kMetaTableElementCountFieldIndex);
+ TNode<Uint32T> nod =
+ mta.Load(meta_table,
+ SwissNameDictionary::kMetaTableDeletedElementCountFieldIndex);
+
+ TNode<Uint32T> new_nof = Uint32Sub(nof, Uint32Constant(1));
+ TNode<Uint32T> new_nod = Uint32Add(nod, Uint32Constant(1));
+
+ mta.Store(meta_table, SwissNameDictionary::kMetaTableElementCountFieldIndex,
+ new_nof);
+ mta.Store(meta_table,
+ SwissNameDictionary::kMetaTableDeletedElementCountFieldIndex,
+ new_nod);
+
+ new_nof_var = new_nof;
+ };
+
+ GenerateMetaTableAccess(this, capacity, builder);
+ return new_nof_var.value();
+}
+
+TNode<SwissNameDictionary> CodeStubAssembler::AllocateSwissNameDictionary(
+ TNode<IntPtrT> at_least_space_for) {
+ // Note that as AllocateNameDictionary, we return a table with initial
+ // (non-zero) capacity even if |at_least_space_for| is 0.
+
+ TNode<IntPtrT> capacity =
+ IntPtrMax(IntPtrConstant(SwissNameDictionary::kInitialCapacity),
+ SwissNameDictionaryCapacityFor(at_least_space_for));
+
+ return AllocateSwissNameDictionaryWithCapacity(capacity);
+}
+
+TNode<SwissNameDictionary> CodeStubAssembler::AllocateSwissNameDictionary(
+ int at_least_space_for) {
+ return AllocateSwissNameDictionary(IntPtrConstant(at_least_space_for));
+}
+
+TNode<SwissNameDictionary>
+CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
+ TNode<IntPtrT> capacity) {
+ Comment("[ AllocateSwissNameDictionaryWithCapacity");
+ CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+ CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ capacity,
+ IntPtrConstant(SwissNameDictionary::kInitialCapacity)));
+ CSA_ASSERT(this,
+ UintPtrLessThanOrEqual(
+ capacity, IntPtrConstant(SwissNameDictionary::MaxCapacity())));
+
+ Comment("Size check.");
+ intptr_t capacity_constant;
+ if (ToParameterConstant(capacity, &capacity_constant)) {
+ CHECK_LE(capacity_constant, SwissNameDictionary::MaxCapacity());
+ } else {
+ Label if_out_of_memory(this, Label::kDeferred), next(this);
+ Branch(UintPtrGreaterThan(
+ capacity, IntPtrConstant(SwissNameDictionary::MaxCapacity())),
+ &if_out_of_memory, &next);
+
+ BIND(&if_out_of_memory);
+ CallRuntime(Runtime::kFatalProcessOutOfMemoryInAllocateRaw,
+ NoContextConstant());
+ Unreachable();
+
+ BIND(&next);
+ }
+
+ // TODO(v8:11330) Consider adding dedicated handling for constant capacties,
+ // similar to AllocateOrderedHashTableWithCapacity.
+
+ // We must allocate the ByteArray first. Otherwise, allocating the ByteArray
+ // may trigger GC, which may try to verify the un-initialized
+ // SwissNameDictionary.
+ Comment("Meta table allocation.");
+ TNode<IntPtrT> meta_table_payload_size =
+ SwissNameDictionaryMetaTableSizeFor(capacity);
+
+ TNode<ByteArray> meta_table =
+ AllocateNonEmptyByteArray(Unsigned(meta_table_payload_size),
+ AllocationFlag::kAllowLargeObjectAllocation);
+
+ Comment("SwissNameDictionary allocation.");
+ TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
+
+ TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
+ Allocate(total_size, kAllowLargeObjectAllocation));
+
+ StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
+
+ Comment(
+ "Initialize the hash, capacity, meta table pointer, and number of "
+ "(deleted) elements.");
+
+ StoreSwissNameDictionaryHash(table,
+ Uint32Constant(PropertyArray::kNoHashSentinel));
+ StoreSwissNameDictionaryCapacity(table, TruncateIntPtrToInt32(capacity));
+ StoreSwissNameDictionaryMetaTable(table, meta_table);
+
+ // Set present and deleted element count without doing branching needed for
+ // meta table access twice.
+ MetaTableAccessFunction builder = [&](MetaTableAccessor& mta) {
+ mta.Store(meta_table, SwissNameDictionary::kMetaTableElementCountFieldIndex,
+ Uint32Constant(0));
+ mta.Store(meta_table,
+ SwissNameDictionary::kMetaTableDeletedElementCountFieldIndex,
+ Uint32Constant(0));
+ };
+ GenerateMetaTableAccess(this, capacity, builder);
+
+ Comment("Initialize the ctrl table.");
+
+ TNode<IntPtrT> ctrl_table_start_offset_minus_tag =
+ SwissNameDictionaryCtrlTableStartOffsetMT(capacity);
+
+ TNode<IntPtrT> table_address_with_tag = BitcastTaggedToWord(table);
+ TNode<IntPtrT> ctrl_table_size_bytes =
+ IntPtrAdd(capacity, IntPtrConstant(SwissNameDictionary::kGroupWidth));
+ TNode<IntPtrT> ctrl_table_start_ptr =
+ IntPtrAdd(table_address_with_tag, ctrl_table_start_offset_minus_tag);
+ TNode<IntPtrT> ctrl_table_end_ptr =
+ IntPtrAdd(ctrl_table_start_ptr, ctrl_table_size_bytes);
+
+ // |ctrl_table_size_bytes| (= capacity + kGroupWidth) is divisble by four:
+ STATIC_ASSERT(SwissNameDictionary::kGroupWidth % 4 == 0);
+ STATIC_ASSERT(SwissNameDictionary::kInitialCapacity % 4 == 0);
+
+ // TODO(v8:11330) For all capacities except 4, we know that
+ // |ctrl_table_size_bytes| is divisible by 8. Consider initializing the ctrl
+ // table with WordTs in those cases. Alternatively, always initialize as many
+ // bytes as possbible with WordT and then, if necessary, the remaining 4 bytes
+ // with Word32T.
+
+ constexpr uint8_t kEmpty = swiss_table::Ctrl::kEmpty;
+ constexpr uint32_t kEmpty32 =
+ (kEmpty << 24) | (kEmpty << 16) | (kEmpty << 8) | kEmpty;
+ TNode<Int32T> empty32 = Int32Constant(kEmpty32);
+ BuildFastLoop<IntPtrT>(
+ ctrl_table_start_ptr, ctrl_table_end_ptr,
+ [=](TNode<IntPtrT> current) {
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kWord32, current,
+ empty32);
+ },
+ sizeof(uint32_t), IndexAdvanceMode::kPost);
+
+ Comment("Initialize the data table.");
+
+ TNode<IntPtrT> data_table_start_offset_minus_tag =
+ SwissNameDictionaryDataTableStartOffsetMT();
+ TNode<IntPtrT> data_table_ptr =
+ IntPtrAdd(table_address_with_tag, data_table_start_offset_minus_tag);
+ TNode<IntPtrT> data_table_size = IntPtrMul(
+ IntPtrConstant(SwissNameDictionary::kDataTableEntryCount * kTaggedSize),
+ capacity);
+
+ StoreFieldsNoWriteBarrier(data_table_ptr,
+ IntPtrAdd(data_table_ptr, data_table_size),
+ TheHoleConstant());
+
+ Comment("AllocateSwissNameDictionaryWithCapacity ]");
+
+ return table;
+}
+
+TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
+ TNode<SwissNameDictionary> original) {
+ Comment("[ CopySwissNameDictionary");
+
+ TNode<IntPtrT> capacity =
+ Signed(ChangeUint32ToWord(LoadSwissNameDictionaryCapacity(original)));
+
+ // We must allocate the ByteArray first. Otherwise, allocating the ByteArray
+ // may trigger GC, which may try to verify the un-initialized
+ // SwissNameDictionary.
+ Comment("Meta table allocation.");
+ TNode<IntPtrT> meta_table_payload_size =
+ SwissNameDictionaryMetaTableSizeFor(capacity);
+
+ TNode<ByteArray> meta_table =
+ AllocateNonEmptyByteArray(Unsigned(meta_table_payload_size),
+ AllocationFlag::kAllowLargeObjectAllocation);
+
+ Comment("SwissNameDictionary allocation.");
+ TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
+
+ TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
+ Allocate(total_size, kAllowLargeObjectAllocation));
+
+ StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
+
+ Comment("Copy the hash and capacity.");
+
+ StoreSwissNameDictionaryHash(table, LoadSwissNameDictionaryHash(original));
+ StoreSwissNameDictionaryCapacity(table, TruncateIntPtrToInt32(capacity));
+ StoreSwissNameDictionaryMetaTable(table, meta_table);
+ // Not setting up number of (deleted elements), copying whole meta table
+ // instead.
+
+ TNode<ExternalReference> memcpy =
+ ExternalConstant(ExternalReference::libc_memcpy_function());
+
+ TNode<IntPtrT> old_table_address_with_tag = BitcastTaggedToWord(original);
+ TNode<IntPtrT> new_table_address_with_tag = BitcastTaggedToWord(table);
+
+ TNode<IntPtrT> ctrl_table_start_offset_minus_tag =
+ SwissNameDictionaryCtrlTableStartOffsetMT(capacity);
+
+ TNode<IntPtrT> ctrl_table_size_bytes =
+ IntPtrAdd(capacity, IntPtrConstant(SwissNameDictionary::kGroupWidth));
+
+ Comment("Copy the ctrl table.");
+ {
+ TNode<IntPtrT> old_ctrl_table_start_ptr = IntPtrAdd(
+ old_table_address_with_tag, ctrl_table_start_offset_minus_tag);
+ TNode<IntPtrT> new_ctrl_table_start_ptr = IntPtrAdd(
+ new_table_address_with_tag, ctrl_table_start_offset_minus_tag);
+
+ CallCFunction(
+ memcpy, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), new_ctrl_table_start_ptr),
+ std::make_pair(MachineType::Pointer(), old_ctrl_table_start_ptr),
+ std::make_pair(MachineType::UintPtr(), ctrl_table_size_bytes));
+ }
+
+ Comment("Copy the data table.");
+ {
+ TNode<IntPtrT> start_offset =
+ IntPtrConstant(SwissNameDictionary::DataTableStartOffset());
+ TNode<IntPtrT> data_table_size = IntPtrMul(
+ IntPtrConstant(SwissNameDictionary::kDataTableEntryCount * kTaggedSize),
+ capacity);
+
+ BuildFastLoop<IntPtrT>(
+ start_offset, IntPtrAdd(start_offset, data_table_size),
+ [=](TNode<IntPtrT> offset) {
+ TNode<Object> table_field = LoadObjectField(original, offset);
+ StoreObjectField(table, offset, table_field);
+ },
+ kTaggedSize, IndexAdvanceMode::kPost);
+ }
+
+ Comment("Copy the meta table");
+ {
+ TNode<IntPtrT> old_meta_table_address_with_tag =
+ BitcastTaggedToWord(LoadSwissNameDictionaryMetaTable(original));
+ TNode<IntPtrT> new_meta_table_address_with_tag =
+ BitcastTaggedToWord(meta_table);
+
+ TNode<IntPtrT> meta_table_size =
+ SwissNameDictionaryMetaTableSizeFor(capacity);
+
+ TNode<IntPtrT> old_data_start =
+ IntPtrAdd(old_meta_table_address_with_tag,
+ IntPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag));
+ TNode<IntPtrT> new_data_start =
+ IntPtrAdd(new_meta_table_address_with_tag,
+ IntPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag));
+
+ CallCFunction(memcpy, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), new_data_start),
+ std::make_pair(MachineType::Pointer(), old_data_start),
+ std::make_pair(MachineType::UintPtr(), meta_table_size));
+ }
+
+ Comment("Copy the PropertyDetails table");
+ {
+ TNode<IntPtrT> property_details_start_offset_minus_tag =
+ SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(table, capacity,
+ IntPtrConstant(0));
+
+ // Offset to property details entry
+ TVARIABLE(IntPtrT, details_table_offset_minus_tag,
+ property_details_start_offset_minus_tag);
+
+ TNode<IntPtrT> start = ctrl_table_start_offset_minus_tag;
+
+ VariableList in_loop_variables({&details_table_offset_minus_tag}, zone());
+ BuildFastLoop<IntPtrT>(
+ in_loop_variables, start, IntPtrAdd(start, ctrl_table_size_bytes),
+ [&](TNode<IntPtrT> ctrl_table_offset) {
+ TNode<Uint8T> ctrl = Load<Uint8T>(original, ctrl_table_offset);
+
+ // TODO(v8:11330) Entries in the PropertyDetails table may be
+ // uninitialized if the corresponding buckets in the data/ctrl table
+ // are empty. Therefore, to avoid accessing un-initialized memory
+ // here, we need to check the ctrl table to determine whether we
+ // should copy a certain PropertyDetails entry or not.
+ // TODO(v8:11330) If this function becomes performance-critical, we
+ // may consider always initializing the PropertyDetails table entirely
+ // during allocation, to avoid the branching during copying.
+ Label done(this);
+ // |kNotFullMask| catches kEmpty and kDeleted, both of which indicate
+ // entries that we don't want to copy the PropertyDetails for.
+ GotoIf(IsSetWord32(ctrl, swiss_table::kNotFullMask), &done);
+
+ TNode<Uint8T> details =
+ Load<Uint8T>(original, details_table_offset_minus_tag.value());
+
+ StoreToObject(MachineRepresentation::kWord8, table,
+ details_table_offset_minus_tag.value(), details,
+ StoreToObjectWriteBarrier::kNone);
+ Goto(&done);
+ BIND(&done);
+
+ details_table_offset_minus_tag =
+ IntPtrAdd(details_table_offset_minus_tag.value(),
+ IntPtrConstant(kOneByteSize));
+ },
+ kOneByteSize, IndexAdvanceMode::kPost);
+ }
+
+ Comment("CopySwissNameDictionary ]");
+
+ return table;
+}
+
+TNode<IntPtrT> CodeStubAssembler::SwissNameDictionaryOffsetIntoDataTableMT(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> index, int field_index) {
+ TNode<IntPtrT> data_table_start = SwissNameDictionaryDataTableStartOffsetMT();
+
+ TNode<IntPtrT> offset_within_data_table = IntPtrMul(
+ index,
+ IntPtrConstant(SwissNameDictionary::kDataTableEntryCount * kTaggedSize));
+
+ if (field_index != 0) {
+ offset_within_data_table = IntPtrAdd(
+ offset_within_data_table, IntPtrConstant(field_index * kTaggedSize));
+ }
+
+ return IntPtrAdd(data_table_start, offset_within_data_table);
+}
+
+TNode<IntPtrT>
+CodeStubAssembler::SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> index) {
+ CSA_ASSERT(this,
+ WordEqual(capacity, ChangeUint32ToWord(
+ LoadSwissNameDictionaryCapacity(dict))));
+
+ TNode<IntPtrT> data_table_start = SwissNameDictionaryDataTableStartOffsetMT();
+
+ TNode<IntPtrT> gw = IntPtrConstant(SwissNameDictionary::kGroupWidth);
+ TNode<IntPtrT> data_and_ctrl_table_size = IntPtrAdd(
+ IntPtrMul(capacity,
+ IntPtrConstant(kOneByteSize +
+ SwissNameDictionary::kDataTableEntryCount *
+ kTaggedSize)),
+ gw);
+
+ TNode<IntPtrT> property_details_table_start =
+ IntPtrAdd(data_table_start, data_and_ctrl_table_size);
+
+ CSA_ASSERT(
+ this,
+ WordEqual(FieldSliceSwissNameDictionaryPropertyDetailsTable(dict).offset,
+ // Our calculation subtracted the tag, Torque's offset didn't.
+ IntPtrAdd(property_details_table_start,
+ IntPtrConstant(kHeapObjectTag))));
+
+ TNode<IntPtrT> offset_within_details_table = index;
+ return IntPtrAdd(property_details_table_start, offset_within_details_table);
+}
+
+void CodeStubAssembler::StoreSwissNameDictionaryCapacity(
+ TNode<SwissNameDictionary> table, TNode<Int32T> capacity) {
+ StoreObjectFieldNoWriteBarrier<Word32T>(
+ table, SwissNameDictionary::CapacityOffset(), capacity);
+}
+
+TNode<Name> CodeStubAssembler::LoadSwissNameDictionaryKey(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> entry) {
+ TNode<IntPtrT> offset_minus_tag = SwissNameDictionaryOffsetIntoDataTableMT(
+ dict, entry, SwissNameDictionary::kDataTableKeyEntryIndex);
+
+ // TODO(v8:11330) Consider using LoadObjectField here.
+ return CAST(Load<Object>(dict, offset_minus_tag));
+}
+
+TNode<Uint8T> CodeStubAssembler::LoadSwissNameDictionaryPropertyDetails(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> entry) {
+ TNode<IntPtrT> offset_minus_tag =
+ SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(table, capacity,
+ entry);
+ // TODO(v8:11330) Consider using LoadObjectField here.
+ return Load<Uint8T>(table, offset_minus_tag);
+}
+
+void CodeStubAssembler::StoreSwissNameDictionaryPropertyDetails(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> entry, TNode<Uint8T> details) {
+ TNode<IntPtrT> offset_minus_tag =
+ SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(table, capacity,
+ entry);
+
+ // TODO(v8:11330) Consider using StoreObjectField here.
+ StoreToObject(MachineRepresentation::kWord8, table, offset_minus_tag, details,
+ StoreToObjectWriteBarrier::kNone);
+}
+
+void CodeStubAssembler::StoreSwissNameDictionaryKeyAndValue(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> entry, TNode<Object> key,
+ TNode<Object> value) {
+ STATIC_ASSERT(SwissNameDictionary::kDataTableKeyEntryIndex == 0);
+ STATIC_ASSERT(SwissNameDictionary::kDataTableValueEntryIndex == 1);
+
+ // TODO(v8:11330) Consider using StoreObjectField here.
+ TNode<IntPtrT> key_offset_minus_tag =
+ SwissNameDictionaryOffsetIntoDataTableMT(
+ dict, entry, SwissNameDictionary::kDataTableKeyEntryIndex);
+ StoreToObject(MachineRepresentation::kTagged, dict, key_offset_minus_tag, key,
+ StoreToObjectWriteBarrier::kFull);
+
+ TNode<IntPtrT> value_offset_minus_tag =
+ IntPtrAdd(key_offset_minus_tag, IntPtrConstant(kTaggedSize));
+ StoreToObject(MachineRepresentation::kTagged, dict, value_offset_minus_tag,
+ value, StoreToObjectWriteBarrier::kFull);
+}
+
+TNode<Uint64T> CodeStubAssembler::LoadSwissNameDictionaryCtrlTableGroup(
+ TNode<IntPtrT> address) {
+ TNode<RawPtrT> ptr = ReinterpretCast<RawPtrT>(address);
+ TNode<Uint64T> data = UnalignedLoad<Uint64T>(ptr, IntPtrConstant(0));
+
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ return data;
+#else
+ // Reverse byte order.
+ // TODO(v8:11330) Doing this without using dedicated instructions (which we
+ // don't have access to here) will destroy any performance benefit Swiss
+ // Tables have. So we just support this so that we don't have to disable the
+ // test suite for SwissNameDictionary on big endian platforms.
+
+ TNode<Uint64T> result = Uint64Constant(0);
+ constexpr int count = sizeof(uint64_t);
+ for (int i = 0; i < count; ++i) {
+ int src_offset = i * 8;
+ int dest_offset = (count - i - 1) * 8;
+
+ TNode<Uint64T> mask = Uint64Constant(0xffULL << src_offset);
+ TNode<Uint64T> src_data = Word64And(data, mask);
+
+ TNode<Uint64T> shifted =
+ src_offset < dest_offset
+ ? Word64Shl(src_data, Uint64Constant(dest_offset - src_offset))
+ : Word64Shr(src_data, Uint64Constant(src_offset - dest_offset));
+ result = Unsigned(Word64Or(result, shifted));
+ }
+ return result;
+#endif
+}
+
+void CodeStubAssembler::SwissNameDictionarySetCtrl(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> entry, TNode<Uint8T> ctrl) {
+ CSA_ASSERT(this,
+ WordEqual(capacity, ChangeUint32ToWord(
+ LoadSwissNameDictionaryCapacity(table))));
+ CSA_ASSERT(this, UintPtrLessThan(entry, capacity));
+
+ TNode<IntPtrT> one = IntPtrConstant(1);
+ TNode<IntPtrT> offset = SwissNameDictionaryCtrlTableStartOffsetMT(capacity);
+
+ CSA_ASSERT(this,
+ WordEqual(FieldSliceSwissNameDictionaryCtrlTable(table).offset,
+ IntPtrAdd(offset, one)));
+
+ TNode<IntPtrT> offset_entry = IntPtrAdd(offset, entry);
+ StoreToObject(MachineRepresentation::kWord8, table, offset_entry, ctrl,
+ StoreToObjectWriteBarrier::kNone);
+
+ TNode<IntPtrT> mask = IntPtrSub(capacity, one);
+ TNode<IntPtrT> group_width = IntPtrConstant(SwissNameDictionary::kGroupWidth);
+
+ // See SwissNameDictionary::SetCtrl for description of what's going on here.
+
+ // ((entry - Group::kWidth) & mask) + 1
+ TNode<IntPtrT> copy_entry_lhs =
+ IntPtrAdd(WordAnd(IntPtrSub(entry, group_width), mask), one);
+ // ((Group::kWidth - 1) & mask)
+ TNode<IntPtrT> copy_entry_rhs = WordAnd(IntPtrSub(group_width, one), mask);
+ TNode<IntPtrT> copy_entry = IntPtrAdd(copy_entry_lhs, copy_entry_rhs);
+ TNode<IntPtrT> offset_copy_entry = IntPtrAdd(offset, copy_entry);
+
+ // |entry| < |kGroupWidth| implies |copy_entry| == |capacity| + |entry|
+ CSA_ASSERT(this, Word32Or(UintPtrGreaterThanOrEqual(entry, group_width),
+ WordEqual(copy_entry, IntPtrAdd(capacity, entry))));
+
+ // |entry| >= |kGroupWidth| implies |copy_entry| == |entry|
+ CSA_ASSERT(this, Word32Or(UintPtrLessThan(entry, group_width),
+ WordEqual(copy_entry, entry)));
+
+ // TODO(v8:11330): consider using StoreObjectFieldNoWriteBarrier here.
+ StoreToObject(MachineRepresentation::kWord8, table, offset_copy_entry, ctrl,
+ StoreToObjectWriteBarrier::kNone);
+}
+
+void CodeStubAssembler::SwissNameDictionaryFindEntry(
+ TNode<SwissNameDictionary> table, TNode<Name> key, Label* found,
+ TVariable<IntPtrT>* var_found_entry, Label* not_found) {
+ if (SwissNameDictionary::kUseSIMD) {
+ SwissNameDictionaryFindEntrySIMD(table, key, found, var_found_entry,
+ not_found);
+ } else {
+ SwissNameDictionaryFindEntryPortable(table, key, found, var_found_entry,
+ not_found);
+ }
+}
+
+void CodeStubAssembler::SwissNameDictionaryAdd(TNode<SwissNameDictionary> table,
+ TNode<Name> key,
+ TNode<Object> value,
+ TNode<Uint8T> property_details,
+ Label* needs_resize) {
+ if (SwissNameDictionary::kUseSIMD) {
+ SwissNameDictionaryAddSIMD(table, key, value, property_details,
+ needs_resize);
+ } else {
+ SwissNameDictionaryAddPortable(table, key, value, property_details,
+ needs_resize);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index ba331624c6f..72b8fbc8a80 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -135,6 +135,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
EmptyPropertyDictionary) \
V(EmptyOrderedPropertyDictionary, empty_ordered_property_dictionary, \
EmptyOrderedPropertyDictionary) \
+ V(EmptySwissPropertyDictionary, empty_swiss_property_dictionary, \
+ EmptySwissPropertyDictionary) \
V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
@@ -305,7 +307,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
: public compiler::CodeAssembler,
public TorqueGeneratedExportedMacrosAssembler {
public:
- using Node = compiler::Node;
using ScopedExceptionHandler = compiler::ScopedExceptionHandler;
template <typename T>
@@ -539,6 +540,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool TryGetIntPtrOrSmiConstantValue(TNode<IntPtrT> maybe_constant,
int* value);
+ TNode<IntPtrT> PopulationCountFallback(TNode<UintPtrT> value);
+ TNode<Int64T> PopulationCount64(TNode<Word64T> value);
+ TNode<Int32T> PopulationCount32(TNode<Word32T> value);
+ TNode<Int64T> CountTrailingZeros64(TNode<Word64T> value);
+ TNode<Int32T> CountTrailingZeros32(TNode<Word32T> value);
+ TNode<Int64T> CountLeadingZeros64(TNode<Word64T> value);
+ TNode<Int32T> CountLeadingZeros32(TNode<Word32T> value);
+
// Round the 32bits payload of the provided word up to the next power of two.
TNode<IntPtrT> IntPtrRoundUpToPowerOfTwo32(TNode<IntPtrT> value);
// Select the maximum of the two provided IntPtr values.
@@ -569,7 +578,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Smi conversions.
TNode<Float64T> SmiToFloat64(TNode<Smi> value);
TNode<Smi> SmiFromIntPtr(TNode<IntPtrT> value) { return SmiTag(value); }
- TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
+ TNode<Smi> SmiFromInt32(TNode<Int32T> value);
TNode<Smi> SmiFromUint32(TNode<Uint32T> value);
TNode<IntPtrT> SmiToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
TNode<Int32T> SmiToInt32(TNode<Smi> value);
@@ -778,20 +787,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<JSReceiver> receiver, TArgs... args) {
- return UncheckedCast<Object>(CallJS(
+ return CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, callable, receiver, args...));
+ context, callable, receiver, args...);
}
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<Object> receiver, TArgs... args) {
if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) {
- return UncheckedCast<Object>(CallJS(
+ return CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, callable, receiver, args...));
+ context, callable, receiver, args...);
}
- return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- callable, receiver, args...));
+ return CallJS(CodeFactory::Call(isolate()), context, callable, receiver,
+ args...);
}
TNode<Object> CallApiCallback(TNode<Object> context, TNode<RawPtrT> callback,
@@ -1128,6 +1137,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Map::kConstructorOrBackPointerOrNativeContextOffset);
}
+ TNode<Simd128T> LoadSimd128(TNode<IntPtrT> ptr) {
+ return Load<Simd128T>(ptr);
+ }
+
// Reference is the CSA-equivalent of a Torque reference value, representing
// an inner pointer into a HeapObject.
//
@@ -1272,7 +1285,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> instance_type,
Label* bailout);
// Load the identity hash of a JSRececiver.
- TNode<IntPtrT> LoadJSReceiverIdentityHash(TNode<Object> receiver,
+ TNode<IntPtrT> LoadJSReceiverIdentityHash(TNode<JSReceiver> receiver,
Label* if_no_hash = nullptr);
// This is only used on a newly allocated PropertyArray which
@@ -1520,6 +1533,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreHeapNumberValue(TNode<HeapNumber> object, TNode<Float64T> value);
// Store a field to an object on the heap.
+ void StoreObjectField(TNode<HeapObject> object, int offset, TNode<Smi> value);
+ void StoreObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset,
+ TNode<Smi> value);
void StoreObjectField(TNode<HeapObject> object, int offset,
TNode<Object> value);
void StoreObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset,
@@ -1695,7 +1711,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Allocate a HeapNumber without initializing its value.
TNode<HeapNumber> AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
- TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value);
+ TNode<HeapNumber> AllocateHeapNumberWithValue(TNode<Float64T> value);
TNode<HeapNumber> AllocateHeapNumberWithValue(double value) {
return AllocateHeapNumberWithValue(Float64Constant(value));
}
@@ -1716,6 +1732,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint,
TNode<IntPtrT> digit_index);
+ // Allocate a ByteArray with the given non-zero length.
+ TNode<ByteArray> AllocateNonEmptyByteArray(TNode<UintPtrT> length,
+ AllocationFlags flags);
+
// Allocate a ByteArray with the given length.
TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
AllocationFlags flags = kNone);
@@ -2023,10 +2043,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<FixedDoubleArray>(base);
}
- TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
- return UncheckedCast<Int32T>(elements_kind);
- }
-
template <typename T>
bool ClassHasMapConstant() {
return false;
@@ -2242,8 +2258,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_smi);
TNode<Number> ChangeFloat32ToTagged(TNode<Float32T> value);
TNode<Number> ChangeFloat64ToTagged(TNode<Float64T> value);
- TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
- TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
+ TNode<Number> ChangeInt32ToTagged(TNode<Int32T> value);
+ TNode<Number> ChangeUint32ToTagged(TNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(TNode<Number> value);
@@ -2253,6 +2269,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> ChangeTaggedToFloat64(TNode<Context> context,
TNode<Object> input);
+ TNode<Int32T> ChangeBoolToInt32(TNode<BoolT> b);
+
void TaggedToNumeric(TNode<Context> context, TNode<Object> value,
TVariable<Numeric>* var_numeric);
void TaggedToNumericWithFeedback(TNode<Context> context, TNode<Object> value,
@@ -2430,6 +2448,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsString(TNode<HeapObject> object);
TNode<BoolT> IsSeqOneByteString(TNode<HeapObject> object);
+ TNode<BoolT> IsSwissNameDictionary(TNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
@@ -2819,52 +2838,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns an untagged int32.
template <class ContainerType>
TNode<Uint32T> LoadDetailsByKeyIndex(TNode<ContainerType> container,
- TNode<IntPtrT> key_index) {
- static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
- "Use the non-templatized version for DescriptorArray");
- const int kKeyToDetailsOffset =
- (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
- kTaggedSize;
- return Unsigned(LoadAndUntagToWord32FixedArrayElement(container, key_index,
- kKeyToDetailsOffset));
- }
+ TNode<IntPtrT> key_index);
// Loads the value for the entry with the given key_index.
// Returns a tagged value.
template <class ContainerType>
TNode<Object> LoadValueByKeyIndex(TNode<ContainerType> container,
- TNode<IntPtrT> key_index) {
- static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
- "Use the non-templatized version for DescriptorArray");
- const int kKeyToValueOffset =
- (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
- kTaggedSize;
- return LoadFixedArrayElement(container, key_index, kKeyToValueOffset);
- }
+ TNode<IntPtrT> key_index);
// Stores the details for the entry with the given key_index.
// |details| must be a Smi.
template <class ContainerType>
void StoreDetailsByKeyIndex(TNode<ContainerType> container,
- TNode<IntPtrT> key_index, TNode<Smi> details) {
- const int kKeyToDetailsOffset =
- (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
- kTaggedSize;
- StoreFixedArrayElement(container, key_index, details, kKeyToDetailsOffset);
- }
+ TNode<IntPtrT> key_index, TNode<Smi> details);
// Stores the value for the entry with the given key_index.
template <class ContainerType>
void StoreValueByKeyIndex(
TNode<ContainerType> container, TNode<IntPtrT> key_index,
TNode<Object> value,
- WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) {
- const int kKeyToValueOffset =
- (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
- kTaggedSize;
- StoreFixedArrayElement(container, key_index, value, write_barrier,
- kKeyToValueOffset);
- }
+ WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER);
// Calculate a valid size for the a hash table.
TNode<IntPtrT> HashTableComputeCapacity(TNode<IntPtrT> at_least_space_for);
@@ -2880,12 +2873,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class Dictionary>
void SetNumberOfElements(TNode<Dictionary> dictionary,
TNode<Smi> num_elements_smi) {
+ // Not supposed to be used for SwissNameDictionary.
+ STATIC_ASSERT(!(std::is_same<Dictionary, SwissNameDictionary>::value));
+
StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
num_elements_smi, SKIP_WRITE_BARRIER);
}
template <class Dictionary>
TNode<Smi> GetNumberOfDeletedElements(TNode<Dictionary> dictionary) {
+ // Not supposed to be used for SwissNameDictionary.
+ STATIC_ASSERT(!(std::is_same<Dictionary, SwissNameDictionary>::value));
+
return CAST(LoadFixedArrayElement(
dictionary, Dictionary::kNumberOfDeletedElementsIndex));
}
@@ -2893,6 +2892,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class Dictionary>
void SetNumberOfDeletedElements(TNode<Dictionary> dictionary,
TNode<Smi> num_deleted_smi) {
+ // Not supposed to be used for SwissNameDictionary.
+ STATIC_ASSERT(!(std::is_same<Dictionary, SwissNameDictionary>::value));
+
StoreFixedArrayElement(dictionary,
Dictionary::kNumberOfDeletedElementsIndex,
num_deleted_smi, SKIP_WRITE_BARRIER);
@@ -2900,6 +2902,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class Dictionary>
TNode<Smi> GetCapacity(TNode<Dictionary> dictionary) {
+ // Not supposed to be used for SwissNameDictionary.
+ STATIC_ASSERT(!(std::is_same<Dictionary, SwissNameDictionary>::value));
+
return CAST(
UnsafeLoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex));
}
@@ -3040,10 +3045,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> name_index, TNode<Uint32T>,
TVariable<Object>* var_value);
- void LoadPropertyFromNameDictionary(TNode<NameDictionary> dictionary,
- TNode<IntPtrT> name_index,
- TVariable<Uint32T>* var_details,
- TVariable<Object>* var_value);
+ template <typename Dictionary>
+ void LoadPropertyFromDictionary(TNode<Dictionary> dictionary,
+ TNode<IntPtrT> name_index,
+ TVariable<Uint32T>* var_details,
+ TVariable<Object>* var_value);
void LoadPropertyFromGlobalDictionary(TNode<GlobalDictionary> dictionary,
TNode<IntPtrT> name_index,
TVariable<Uint32T>* var_details,
@@ -3179,7 +3185,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
void ReportFeedbackUpdate(TNode<FeedbackVector> feedback_vector,
- SloppyTNode<UintPtrT> slot_id, const char* reason);
+ TNode<UintPtrT> slot_id, const char* reason);
// Combine the new feedback with the existing_feedback. Do nothing if
// existing_feedback is nullptr.
@@ -3203,10 +3209,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint8T> Int32ToUint8Clamped(TNode<Int32T> int32_value);
TNode<Uint8T> Float64ToUint8Clamped(TNode<Float64T> float64_value);
- Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
- ElementsKind elements_kind,
- TNode<Context> context);
-
template <typename T>
TNode<T> PrepareValueForWriteToTypedArray(TNode<Object> input,
ElementsKind elements_kind,
@@ -3216,9 +3218,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
// we pass {value} as BigInt object instead of int64_t. We should
// teach TurboFan to handle int64_t on 32-bit platforms eventually.
- template <typename TIndex>
+ template <typename TIndex, typename TValue>
void StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
- TNode<TIndex> index, Node* value);
+ TNode<TIndex> index, TNode<TValue> value);
// Implements the BigInt part of
// https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
@@ -3487,52 +3489,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
+ // If |data_type_out| is provided, the instance type of the function data will
+ // be stored in it. In case the code object is a builtin (data is a Smi),
+ // data_type_out will be set to 0.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<Code> GetSharedFunctionInfoCode(TNode<SharedFunctionInfo> shared_info,
- Label* if_compile_lazy = nullptr);
+ TNode<Code> GetSharedFunctionInfoCode(
+ TNode<SharedFunctionInfo> shared_info,
+ TVariable<Uint16T>* data_type_out = nullptr,
+ Label* if_compile_lazy = nullptr);
TNode<JSFunction> AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context);
// Promise helpers
- TNode<Uint32T> PromiseHookFlags();
+ TNode<BoolT> IsPromiseHookEnabled();
TNode<BoolT> HasAsyncEventDelegate();
- TNode<BoolT> IsContextPromiseHookEnabled(TNode<Uint32T> flags);
- TNode<BoolT> IsContextPromiseHookEnabled() {
- return IsContextPromiseHookEnabled(PromiseHookFlags());
- }
- TNode<BoolT> IsAnyPromiseHookEnabled(TNode<Uint32T> flags);
- TNode<BoolT> IsAnyPromiseHookEnabled() {
- return IsAnyPromiseHookEnabled(PromiseHookFlags());
- }
- TNode<BoolT> IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
- TNode<Uint32T> flags);
- TNode<BoolT> IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate() {
- return IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
- PromiseHookFlags());
- }
- TNode<BoolT>
- IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags);
- TNode<BoolT>
- IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
- return IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- PromiseHookFlags());
- }
- TNode<BoolT> IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags);
- TNode<BoolT>
- IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
- return IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- PromiseHookFlags());
- }
-
- TNode<BoolT> NeedsAnyPromiseHooks(TNode<Uint32T> flags);
- TNode<BoolT> NeedsAnyPromiseHooks() {
- return NeedsAnyPromiseHooks(PromiseHookFlags());
- }
+ TNode<BoolT> IsPromiseHookEnabledOrHasAsyncEventDelegate();
+ TNode<BoolT> IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
// for..in helpers
void CheckPrototypeEnumCache(TNode<JSReceiver> receiver,
@@ -3579,6 +3554,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; }
+ int32_t ConstexprInt32Sub(int32_t a, int32_t b) { return a - b; }
+ int32_t ConstexprInt32Add(int32_t a, int32_t b) { return a + b; }
int31_t ConstexprInt31Add(int31_t a, int31_t b) {
int32_t val;
CHECK(!base::bits::SignedAddOverflow32(a, b, &val));
@@ -3733,6 +3710,88 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return IntPtrConstant(FeedbackIterator::kHandlerOffset);
}
+ TNode<SwissNameDictionary> AllocateSwissNameDictionary(
+ TNode<IntPtrT> at_least_space_for);
+ TNode<SwissNameDictionary> AllocateSwissNameDictionary(
+ int at_least_space_for);
+
+ TNode<SwissNameDictionary> AllocateSwissNameDictionaryWithCapacity(
+ TNode<IntPtrT> capacity);
+
+ // MT stands for "minus tag".
+ TNode<IntPtrT> SwissNameDictionaryOffsetIntoDataTableMT(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> index, int field_index);
+
+ // MT stands for "minus tag".
+ TNode<IntPtrT> SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
+ TNode<SwissNameDictionary> dict, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> index);
+
+ TNode<IntPtrT> LoadSwissNameDictionaryNumberOfElements(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity);
+
+ TNode<IntPtrT> LoadSwissNameDictionaryNumberOfDeletedElements(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity);
+
+ // Specialized operation to be used when adding entries:
+ // If used capacity (= number of present + deleted elements) is less than
+ // |max_usable|, increment the number of present entries and return the used
+ // capacity value (prior to the incrementation). Otherwise, goto |bailout|.
+ TNode<Uint32T> SwissNameDictionaryIncreaseElementCountOrBailout(
+ TNode<ByteArray> meta_table, TNode<IntPtrT> capacity,
+ TNode<Uint32T> max_usable_capacity, Label* bailout);
+
+ // Specialized operation to be used when deleting entries: Decreases the
+ // number of present entries and increases the number of deleted ones. Returns
+ // new (= decremented) number of present entries.
+ TNode<Uint32T> SwissNameDictionaryUpdateCountsForDeletion(
+ TNode<ByteArray> meta_table, TNode<IntPtrT> capacity);
+
+ void StoreSwissNameDictionaryCapacity(TNode<SwissNameDictionary> table,
+ TNode<Int32T> capacity);
+
+ void StoreSwissNameDictionaryEnumToEntryMapping(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> enum_index, TNode<Int32T> entry);
+
+ TNode<Name> LoadSwissNameDictionaryKey(TNode<SwissNameDictionary> dict,
+ TNode<IntPtrT> entry);
+
+ void StoreSwissNameDictionaryKeyAndValue(TNode<SwissNameDictionary> dict,
+ TNode<IntPtrT> entry,
+ TNode<Object> key,
+ TNode<Object> value);
+
+ // Equivalent to SwissNameDictionary::SetCtrl, therefore preserves the copy of
+ // the first group at the end of the control table.
+ void SwissNameDictionarySetCtrl(TNode<SwissNameDictionary> table,
+ TNode<IntPtrT> capacity, TNode<IntPtrT> entry,
+ TNode<Uint8T> ctrl);
+
+ TNode<Uint64T> LoadSwissNameDictionaryCtrlTableGroup(TNode<IntPtrT> address);
+
+ TNode<Uint8T> LoadSwissNameDictionaryPropertyDetails(
+ TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
+ TNode<IntPtrT> entry);
+
+ void StoreSwissNameDictionaryPropertyDetails(TNode<SwissNameDictionary> table,
+ TNode<IntPtrT> capacity,
+ TNode<IntPtrT> entry,
+ TNode<Uint8T> details);
+
+ TNode<SwissNameDictionary> CopySwissNameDictionary(
+ TNode<SwissNameDictionary> original);
+
+ void SwissNameDictionaryFindEntry(TNode<SwissNameDictionary> table,
+ TNode<Name> key, Label* found,
+ TVariable<IntPtrT>* var_found_entry,
+ Label* not_found);
+
+ void SwissNameDictionaryAdd(TNode<SwissNameDictionary> table, TNode<Name> key,
+ TNode<Object> value,
+ TNode<Uint8T> property_details,
+ Label* needs_resize);
+
private:
friend class CodeStubArguments;
@@ -3831,15 +3890,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0);
+ template <typename TIndex>
+ void StoreElementTypedArrayBigInt(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, TNode<BigInt> value);
+
+ template <typename TIndex>
+ void StoreElementTypedArrayWord32(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, TNode<Word32T> value);
+
// Store value to an elements array with given elements kind.
// TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
// we pass {value} as BigInt object instead of int64_t. We should
// teach TurboFan to handle int64_t on 32-bit platforms eventually.
// TODO(solanes): This method can go away and simplify into only one version
// of StoreElement once we have "if constexpr" available to use.
- template <typename TArray, typename TIndex>
+ template <typename TArray, typename TIndex, typename TValue>
void StoreElementTypedArray(TNode<TArray> elements, ElementsKind kind,
- TNode<TIndex> index, Node* value);
+ TNode<TIndex> index, TNode<TValue> value);
template <typename TIndex>
void StoreElement(TNode<FixedArrayBase> elements, ElementsKind kind,
@@ -3855,12 +3922,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void TryPlainPrimitiveNonNumberToNumber(TNode<HeapObject> input,
TVariable<Number>* var_result,
Label* if_bailout);
+
+ template <typename TValue>
+ void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
+ TNode<IntPtrT> key, TNode<Object> value,
+ ElementsKind elements_kind,
+ KeyedAccessStoreMode store_mode,
+ Label* bailout, TNode<Context> context,
+ TVariable<Object>* maybe_converted_value);
+
+ template <typename TValue>
+ void EmitElementStoreTypedArrayUpdateValue(
+ TNode<Object> value, ElementsKind elements_kind,
+ TNode<TValue> converted_value, TVariable<Object>* maybe_converted_value);
};
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
- using Node = compiler::Node;
-
// |argc| specifies the number of arguments passed to the builtin excluding
// the receiver. The arguments include the receiver.
CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc)
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 826b53293a2..3941e56e6a6 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -145,7 +145,7 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
- Handle<Context> native_context, LanguageMode language_mode) {
+ LanguageMode language_mode) {
MaybeHandle<SharedFunctionInfo> result;
// Probe the script generation tables. Make sure not to leak handles
@@ -156,7 +156,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
MaybeHandle<SharedFunctionInfo> probe = CompilationCacheTable::LookupScript(
- table, source, native_context, language_mode);
+ table, source, language_mode, isolate());
Handle<SharedFunctionInfo> function_info;
if (probe.ToHandle(&function_info)) {
// Break when we've found a suitable shared function info that
@@ -188,13 +188,12 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
}
void CompilationCacheScript::Put(Handle<String> source,
- Handle<Context> native_context,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutScript(table, source, native_context,
- language_mode, function_info));
+ SetFirstTable(CompilationCacheTable::PutScript(table, source, language_mode,
+ function_info, isolate()));
}
InfoCellPair CompilationCacheEval::Lookup(Handle<String> source,
@@ -331,11 +330,11 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
- Handle<Context> native_context, LanguageMode language_mode) {
+ LanguageMode language_mode) {
if (!IsEnabledScriptAndEval()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
- resource_options, native_context, language_mode);
+ resource_options, language_mode);
}
InfoCellPair CompilationCache::LookupEval(Handle<String> source,
@@ -378,13 +377,12 @@ MaybeHandle<Code> CompilationCache::LookupCode(Handle<SharedFunctionInfo> sfi) {
}
void CompilationCache::PutScript(Handle<String> source,
- Handle<Context> native_context,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabledScriptAndEval()) return;
LOG(isolate(), CompilationCacheEvent("put", "script", *function_info));
- script_.Put(source, native_context, language_mode, function_info);
+ script_.Put(source, language_mode, function_info);
}
void CompilationCache::PutEval(Handle<String> source,
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 58e4e0f75fe..0ed13e53b6d 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -85,11 +85,9 @@ class CompilationCacheScript : public CompilationSubCache {
MaybeHandle<Object> name,
int line_offset, int column_offset,
ScriptOriginOptions resource_options,
- Handle<Context> native_context,
LanguageMode language_mode);
- void Put(Handle<String> source, Handle<Context> context,
- LanguageMode language_mode,
+ void Put(Handle<String> source, LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
void Age() override;
@@ -193,7 +191,7 @@ class V8_EXPORT_PRIVATE CompilationCache {
MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
- Handle<Context> native_context, LanguageMode language_mode);
+ LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
@@ -212,8 +210,7 @@ class V8_EXPORT_PRIVATE CompilationCache {
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
- void PutScript(Handle<String> source, Handle<Context> native_context,
- LanguageMode language_mode,
+ void PutScript(Handle<String> source, LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 66336ca32cd..e46639d90a4 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -288,7 +288,6 @@ ScriptOriginOptions OriginOptionsForEval(Object script) {
// Implementation of UnoptimizedCompilationJob
CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
- DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
@@ -319,12 +318,15 @@ namespace {
void RecordUnoptimizedCompilationStats(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
- int code_size;
- if (shared_info->HasBytecodeArray()) {
- code_size = shared_info->GetBytecodeArray(isolate).SizeIncludingMetadata();
- } else {
- code_size = shared_info->asm_wasm_data().Size();
- }
+#if V8_ENABLE_WEBASSEMBLY
+ int code_size =
+ shared_info->HasBytecodeArray()
+ ? shared_info->GetBytecodeArray(isolate).SizeIncludingMetadata()
+ : shared_info->asm_wasm_data().Size();
+#else
+ int code_size =
+ shared_info->GetBytecodeArray(isolate).SizeIncludingMetadata();
+#endif // V8_ENABLE_WEBASSEMBLY
Counters* counters = isolate->counters();
// TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
@@ -344,9 +346,13 @@ void RecordUnoptimizedFunctionCompilation(
abstract_code =
handle(AbstractCode::cast(shared->GetBytecodeArray(isolate)), isolate);
} else {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(shared->HasAsmWasmData());
abstract_code =
Handle<AbstractCode>::cast(BUILTIN_CODE(isolate, InstantiateAsmJs));
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
double time_taken_ms = time_taken_to_execute.InMillisecondsF() +
@@ -516,8 +522,9 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
}
#endif
-void InstallInterpreterTrampolineCopy(Isolate* isolate,
- Handle<SharedFunctionInfo> shared_info) {
+void InstallInterpreterTrampolineCopy(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ CodeEventListener::LogEventsAndTags log_tag) {
DCHECK(FLAG_interpreted_frames_native_stack);
if (!shared_info->function_data(kAcquireLoad).IsBytecodeArray()) {
DCHECK(!shared_info->HasBytecodeArray());
@@ -548,8 +555,6 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
handle(script->name().IsString() ? String::cast(script->name())
: ReadOnlyRoots(isolate).empty_string(),
isolate);
- CodeEventListener::LogEventsAndTags log_tag = Logger::ToNativeByScript(
- CodeEventListener::INTERPRETED_FUNCTION_TAG, *script);
PROFILE(isolate, CodeCreateEvent(log_tag, abstract_code, shared_info,
script_name, line_num, column_num));
}
@@ -563,11 +568,13 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
DCHECK(!compilation_info->has_asm_wasm_data());
DCHECK(!shared_info->HasFeedbackMetadata());
+#if V8_ENABLE_WEBASSEMBLY
// If the function failed asm-wasm compilation, mark asm_wasm as broken
// to ensure we don't try to compile as asm-wasm.
if (compilation_info->literal()->scope()->IsAsmModule()) {
shared_info->set_is_asm_wasm_broken(true);
}
+#endif // V8_ENABLE_WEBASSEMBLY
shared_info->set_bytecode_array(*compilation_info->bytecode_array());
@@ -575,29 +582,24 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
isolate, compilation_info->feedback_vector_spec());
shared_info->set_feedback_metadata(*feedback_metadata);
} else {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(compilation_info->has_asm_wasm_data());
// We should only have asm/wasm data when finalizing on the main thread.
DCHECK((std::is_same<LocalIsolate, Isolate>::value));
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
void LogUnoptimizedCompilation(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
- UnoptimizedCompileFlags flags,
+ CodeEventListener::LogEventsAndTags log_tag,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize) {
- CodeEventListener::LogEventsAndTags log_tag;
- if (flags.is_toplevel()) {
- log_tag = flags.is_eval() ? CodeEventListener::EVAL_TAG
- : CodeEventListener::SCRIPT_TAG;
- } else {
- log_tag = flags.is_lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
- : CodeEventListener::FUNCTION_TAG;
- }
-
RecordUnoptimizedFunctionCompilation(isolate, log_tag, shared_info,
time_taken_to_execute,
time_taken_to_finalize);
@@ -684,7 +686,11 @@ bool CompileSharedWithBaseline(Isolate* isolate,
base::TimeDelta time_taken;
{
ScopedTimer timer(&time_taken);
- code = GenerateBaselineCode(isolate, shared);
+ if (!GenerateBaselineCode(isolate, shared).ToHandle(&code)) {
+ // TODO(leszeks): This can only fail because of an OOM. Do we want to
+ // report these somehow, or silently ignore them?
+ return false;
+ }
Handle<HeapObject> function_data =
handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
@@ -1341,19 +1347,24 @@ void FinalizeUnoptimizedCompilation(
if (need_source_positions) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
}
- if (FLAG_interpreted_frames_native_stack) {
- InstallInterpreterTrampolineCopy(isolate, shared_info);
+ CodeEventListener::LogEventsAndTags log_tag;
+ if (shared_info->is_toplevel()) {
+ log_tag = flags.is_eval() ? CodeEventListener::EVAL_TAG
+ : CodeEventListener::SCRIPT_TAG;
+ } else {
+ log_tag = flags.is_lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
+ : CodeEventListener::FUNCTION_TAG;
}
- if (FLAG_always_sparkplug) {
- CompileSharedWithBaseline(isolate, shared_info, Compiler::KEEP_EXCEPTION,
- &is_compiled_scope);
+ log_tag = Logger::ToNativeByScript(log_tag, *script);
+ if (FLAG_interpreted_frames_native_stack) {
+ InstallInterpreterTrampolineCopy(isolate, shared_info, log_tag);
}
Handle<CoverageInfo> coverage_info;
if (finalize_data.coverage_info().ToHandle(&coverage_info)) {
isolate->debug()->InstallCoverageInfo(shared_info, coverage_info);
}
- LogUnoptimizedCompilation(isolate, shared_info, flags,
+ LogUnoptimizedCompilation(isolate, shared_info, log_tag,
finalize_data.time_taken_to_execute(),
finalize_data.time_taken_to_finalize());
}
@@ -1394,6 +1405,19 @@ void FinalizeUnoptimizedScriptCompilation(
}
}
+void CompileAllWithBaseline(Isolate* isolate,
+ const FinalizeUnoptimizedCompilationDataList&
+ finalize_unoptimized_compilation_data_list) {
+ for (const auto& finalize_data : finalize_unoptimized_compilation_data_list) {
+ Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
+ IsCompiledScope is_compiled_scope(*shared_info, isolate);
+ if (!is_compiled_scope.is_compiled()) continue;
+ if (!CanCompileWithBaseline(isolate, shared_info)) continue;
+ CompileSharedWithBaseline(isolate, shared_info, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope);
+ }
+}
+
// Create shared function info for top level and shared function infos array for
// inner functions.
template <typename LocalIsolate>
@@ -1462,6 +1486,11 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
FinalizeUnoptimizedScriptCompilation(
isolate, script, parse_info->flags(), parse_info->state(),
finalize_unoptimized_compilation_data_list);
+
+ if (FLAG_always_sparkplug) {
+ CompileAllWithBaseline(isolate, finalize_unoptimized_compilation_data_list);
+ }
+
return shared_info;
}
@@ -1919,6 +1948,10 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
FinalizeUnoptimizedCompilation(isolate, script, flags, &compile_state,
finalize_unoptimized_compilation_data_list);
+ if (FLAG_always_sparkplug) {
+ CompileAllWithBaseline(isolate, finalize_unoptimized_compilation_data_list);
+ }
+
DCHECK(!isolate->has_pending_exception());
DCHECK(is_compiled_scope->is_compiled());
return true;
@@ -1958,13 +1991,12 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// immediately after a flush would be better.
JSFunction::InitializeFeedbackCell(function, is_compiled_scope, true);
- // If --always-sparkplug is enabled, make sure we have baseline code.
- if (FLAG_always_sparkplug && CanCompileWithBaseline(isolate, shared_info)) {
- DCHECK(shared_info->HasBaselineData());
- }
-
// Optimize now if --always-opt is enabled.
+#if V8_ENABLE_WEBASSEMBLY
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
+#else
+ if (FLAG_always_opt) {
+#endif // V8_ENABLE_WEBASSEMBLY
CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
CodeKindForTopTier());
@@ -2769,7 +2801,11 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
}
// Join with background thread and finalize compilation.
- background_compile_thread.Join();
+ {
+ ParkedScope scope(isolate->main_thread_local_isolate());
+ background_compile_thread.Join();
+ }
+
MaybeHandle<SharedFunctionInfo> maybe_result =
Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, source, script_details, origin_options,
@@ -2838,8 +2874,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// First check per-isolate compilation cache.
maybe_result = compilation_cache->LookupScript(
source, script_details.name_obj, script_details.line_offset,
- script_details.column_offset, origin_options, isolate->native_context(),
- language_mode);
+ script_details.column_offset, origin_options, language_mode);
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
} else if (can_consume_code_cache) {
@@ -2858,8 +2893,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Promote to per-isolate compilation cache.
is_compiled_scope = inner_result->is_compiled_scope(isolate);
DCHECK(is_compiled_scope.is_compiled());
- compilation_cache->PutScript(source, isolate->native_context(),
- language_mode, inner_result);
+ compilation_cache->PutScript(source, language_mode, inner_result);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
maybe_result = inner_result;
} else {
@@ -2897,8 +2931,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<SharedFunctionInfo> result;
if (use_compilation_cache && maybe_result.ToHandle(&result)) {
DCHECK(is_compiled_scope.is_compiled());
- compilation_cache->PutScript(source, isolate->native_context(),
- language_mode, result);
+ compilation_cache->PutScript(source, language_mode, result);
} else if (maybe_result.is_null() && natives != EXTENSION_CODE) {
isolate->ReportPendingMessages();
}
@@ -3028,8 +3061,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
"V8.StreamingFinalization.CheckCache");
maybe_result = compilation_cache->LookupScript(
source, script_details.name_obj, script_details.line_offset,
- script_details.column_offset, origin_options, isolate->native_context(),
- task->language_mode());
+ script_details.column_offset, origin_options, task->language_mode());
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
}
@@ -3038,6 +3070,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
if (maybe_result.is_null()) {
// No cache entry found, finalize compilation of the script and add it to
// the isolate cache.
+ DCHECK_EQ(task->flags().is_module(), origin_options.IsModule());
Handle<Script> script;
if (FLAG_finalize_streaming_on_background && !origin_options.IsModule()) {
@@ -3068,8 +3101,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
isolate->heap()->SetRootScriptList(*scripts);
} else {
ParseInfo* parse_info = task->info();
- DCHECK(parse_info->flags().is_toplevel());
DCHECK_EQ(parse_info->flags().is_module(), origin_options.IsModule());
+ DCHECK(parse_info->flags().is_toplevel());
script = parse_info->CreateScript(isolate, source, kNullMaybeHandle,
origin_options);
@@ -3119,8 +3152,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
// Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
- compilation_cache->PutScript(source, isolate->native_context(),
- task->language_mode(), result);
+ compilation_cache->PutScript(source, task->language_mode(), result);
}
}
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 7cb45c77852..b9a450ea3a0 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -58,6 +58,7 @@ enum CpuFeature {
ISELECT,
VSX,
MODULO,
+ SIMD,
#elif V8_TARGET_ARCH_S390X
FPU,
@@ -108,9 +109,9 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
- static inline bool SupportsOptimizer();
+ static bool SupportsWasmSimd128();
- static inline bool SupportsWasmSimd128();
+ static inline bool SupportsOptimizer();
static inline unsigned icache_line_size() {
DCHECK_NE(icache_line_size_, 0);
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 88ce90f4fdc..454b04e8935 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -30,7 +30,10 @@
#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-stack.h"
#include "src/strings/string-search.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-external-refs.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#ifdef V8_INTL_SUPPORT
#include "src/base/platform/wrappers.h"
@@ -335,53 +338,66 @@ FUNCTION_REFERENCE(new_deoptimizer_function, Deoptimizer::New)
FUNCTION_REFERENCE(compute_output_frames_function,
Deoptimizer::ComputeOutputFrames)
-FUNCTION_REFERENCE(wasm_f32_trunc, wasm::f32_trunc_wrapper)
-FUNCTION_REFERENCE(wasm_f32_floor, wasm::f32_floor_wrapper)
-FUNCTION_REFERENCE(wasm_f32_ceil, wasm::f32_ceil_wrapper)
-FUNCTION_REFERENCE(wasm_f32_nearest_int, wasm::f32_nearest_int_wrapper)
-FUNCTION_REFERENCE(wasm_f64_trunc, wasm::f64_trunc_wrapper)
-FUNCTION_REFERENCE(wasm_f64_floor, wasm::f64_floor_wrapper)
-FUNCTION_REFERENCE(wasm_f64_ceil, wasm::f64_ceil_wrapper)
-FUNCTION_REFERENCE(wasm_f64_nearest_int, wasm::f64_nearest_int_wrapper)
-FUNCTION_REFERENCE(wasm_int64_to_float32, wasm::int64_to_float32_wrapper)
-FUNCTION_REFERENCE(wasm_uint64_to_float32, wasm::uint64_to_float32_wrapper)
-FUNCTION_REFERENCE(wasm_int64_to_float64, wasm::int64_to_float64_wrapper)
-FUNCTION_REFERENCE(wasm_uint64_to_float64, wasm::uint64_to_float64_wrapper)
-FUNCTION_REFERENCE(wasm_float32_to_int64, wasm::float32_to_int64_wrapper)
-FUNCTION_REFERENCE(wasm_float32_to_uint64, wasm::float32_to_uint64_wrapper)
-FUNCTION_REFERENCE(wasm_float64_to_int64, wasm::float64_to_int64_wrapper)
-FUNCTION_REFERENCE(wasm_float64_to_uint64, wasm::float64_to_uint64_wrapper)
-FUNCTION_REFERENCE(wasm_float32_to_int64_sat,
- wasm::float32_to_int64_sat_wrapper)
-FUNCTION_REFERENCE(wasm_float32_to_uint64_sat,
- wasm::float32_to_uint64_sat_wrapper)
-FUNCTION_REFERENCE(wasm_float64_to_int64_sat,
- wasm::float64_to_int64_sat_wrapper)
-FUNCTION_REFERENCE(wasm_float64_to_uint64_sat,
- wasm::float64_to_uint64_sat_wrapper)
-FUNCTION_REFERENCE(wasm_int64_div, wasm::int64_div_wrapper)
-FUNCTION_REFERENCE(wasm_int64_mod, wasm::int64_mod_wrapper)
-FUNCTION_REFERENCE(wasm_uint64_div, wasm::uint64_div_wrapper)
-FUNCTION_REFERENCE(wasm_uint64_mod, wasm::uint64_mod_wrapper)
-FUNCTION_REFERENCE(wasm_word32_ctz, wasm::word32_ctz_wrapper)
-FUNCTION_REFERENCE(wasm_word64_ctz, wasm::word64_ctz_wrapper)
-FUNCTION_REFERENCE(wasm_word32_popcnt, wasm::word32_popcnt_wrapper)
-FUNCTION_REFERENCE(wasm_word64_popcnt, wasm::word64_popcnt_wrapper)
-FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
-FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
-FUNCTION_REFERENCE(wasm_word64_rol, wasm::word64_rol_wrapper)
-FUNCTION_REFERENCE(wasm_word64_ror, wasm::word64_ror_wrapper)
-FUNCTION_REFERENCE(wasm_f64x2_ceil, wasm::f64x2_ceil_wrapper)
-FUNCTION_REFERENCE(wasm_f64x2_floor, wasm::f64x2_floor_wrapper)
-FUNCTION_REFERENCE(wasm_f64x2_trunc, wasm::f64x2_trunc_wrapper)
-FUNCTION_REFERENCE(wasm_f64x2_nearest_int, wasm::f64x2_nearest_int_wrapper)
-FUNCTION_REFERENCE(wasm_f32x4_ceil, wasm::f32x4_ceil_wrapper)
-FUNCTION_REFERENCE(wasm_f32x4_floor, wasm::f32x4_floor_wrapper)
-FUNCTION_REFERENCE(wasm_f32x4_trunc, wasm::f32x4_trunc_wrapper)
-FUNCTION_REFERENCE(wasm_f32x4_nearest_int, wasm::f32x4_nearest_int_wrapper)
-FUNCTION_REFERENCE(wasm_memory_init, wasm::memory_init_wrapper)
-FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
-FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32_trunc, wasm::f32_trunc_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32_floor, wasm::f32_floor_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32_ceil, wasm::f32_ceil_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32_nearest_int, wasm::f32_nearest_int_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64_trunc, wasm::f64_trunc_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64_floor, wasm::f64_floor_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64_ceil, wasm::f64_ceil_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64_nearest_int, wasm::f64_nearest_int_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_int64_to_float32,
+ wasm::int64_to_float32_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_uint64_to_float32,
+ wasm::uint64_to_float32_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_int64_to_float64,
+ wasm::int64_to_float64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_uint64_to_float64,
+ wasm::uint64_to_float64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float32_to_int64,
+ wasm::float32_to_int64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float32_to_uint64,
+ wasm::float32_to_uint64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float64_to_int64,
+ wasm::float64_to_int64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float64_to_uint64,
+ wasm::float64_to_uint64_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float32_to_int64_sat,
+ wasm::float32_to_int64_sat_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float32_to_uint64_sat,
+ wasm::float32_to_uint64_sat_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float64_to_int64_sat,
+ wasm::float64_to_int64_sat_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float64_to_uint64_sat,
+ wasm::float64_to_uint64_sat_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_int64_div, wasm::int64_div_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_int64_mod, wasm::int64_mod_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_uint64_div, wasm::uint64_div_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_uint64_mod, wasm::uint64_mod_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word32_ctz, wasm::word32_ctz_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word64_ctz, wasm::word64_ctz_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word32_popcnt, wasm::word32_popcnt_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word64_popcnt, wasm::word64_popcnt_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word32_rol, wasm::word32_rol_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word32_ror, wasm::word32_ror_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word64_rol, wasm::word64_rol_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_word64_ror, wasm::word64_ror_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64x2_ceil, wasm::f64x2_ceil_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64x2_floor, wasm::f64x2_floor_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64x2_trunc, wasm::f64x2_trunc_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f64x2_nearest_int,
+ wasm::f64x2_nearest_int_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32x4_ceil, wasm::f32x4_ceil_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32x4_floor, wasm::f32x4_floor_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32x4_trunc, wasm::f32x4_trunc_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_f32x4_nearest_int,
+ wasm::f32x4_nearest_int_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_memory_init, wasm::memory_init_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_memory_copy, wasm::memory_copy_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_memory_fill, wasm::memory_fill_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
+ wasm::call_trap_callback_for_testing)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
@@ -397,7 +413,6 @@ static void f64_asin_wrapper(Address data) {
FUNCTION_REFERENCE(f64_asin_wrapper_function, f64_asin_wrapper)
-FUNCTION_REFERENCE(wasm_float64_pow, wasm::float64_pow_wrapper)
static void f64_mod_wrapper(Address data) {
double dividend = ReadUnalignedValue<double>(data);
@@ -407,9 +422,6 @@ static void f64_mod_wrapper(Address data) {
FUNCTION_REFERENCE(f64_mod_wrapper_function, f64_mod_wrapper)
-FUNCTION_REFERENCE(wasm_call_trap_callback_for_testing,
- wasm::call_trap_callback_for_testing)
-
ExternalReference ExternalReference::isolate_root(Isolate* isolate) {
return ExternalReference(isolate->isolate_root());
}
@@ -609,6 +621,34 @@ ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
}
+namespace {
+
+static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
+ int bytecode_offset,
+ Address raw_bytecode_array) {
+ Code code_obj = Code::cast(Object(raw_code_obj));
+ BytecodeArray bytecode_array =
+ BytecodeArray::cast(Object(raw_bytecode_array));
+ return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
+ bytecode_array);
+}
+
+static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
+ int bytecode_offset,
+ Address raw_bytecode_array) {
+ Code code_obj = Code::cast(Object(raw_code_obj));
+ BytecodeArray bytecode_array =
+ BytecodeArray::cast(Object(raw_bytecode_array));
+ return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,
+ bytecode_array);
+}
+
+} // namespace
+
+FUNCTION_REFERENCE(baseline_pc_for_bytecode_offset, BaselinePCForBytecodeOffset)
+FUNCTION_REFERENCE(baseline_pc_for_next_executed_bytecode,
+ BaselinePCForNextExecutedBytecode)
+
ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
Isolate* isolate) {
return ExternalReference(isolate->thread_in_wasm_flag_address_address());
@@ -925,11 +965,6 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
-ExternalReference ExternalReference::promise_hook_flags_address(
- Isolate* isolate) {
- return ExternalReference(isolate->promise_hook_flags_address());
-}
-
ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
return ExternalReference(isolate->promise_hook_address());
}
@@ -939,6 +974,21 @@ ExternalReference ExternalReference::async_event_delegate_address(
return ExternalReference(isolate->async_event_delegate_address());
}
+ExternalReference
+ExternalReference::promise_hook_or_async_event_delegate_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->promise_hook_or_async_event_delegate_address());
+}
+
+ExternalReference ExternalReference::
+ promise_hook_or_debug_is_active_or_async_event_delegate_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate
+ ->promise_hook_or_debug_is_active_or_async_event_delegate_address());
+}
+
ExternalReference ExternalReference::debug_execution_mode_address(
Isolate* isolate) {
return ExternalReference(isolate->debug_execution_mode_address());
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 0cd80ca6f1b..f53db401c9e 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -50,9 +50,13 @@ class StatsCounter;
V(handle_scope_limit_address, "HandleScope::limit") \
V(scheduled_exception_address, "Isolate::scheduled_exception") \
V(address_of_pending_message_obj, "address_of_pending_message_obj") \
- V(promise_hook_flags_address, "Isolate::promise_hook_flags_address()") \
V(promise_hook_address, "Isolate::promise_hook_address()") \
V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
+ V(promise_hook_or_async_event_delegate_address, \
+ "Isolate::promise_hook_or_async_event_delegate_address()") \
+ V(promise_hook_or_debug_is_active_or_async_event_delegate_address, \
+ "Isolate::promise_hook_or_debug_is_active_or_async_event_delegate_" \
+ "address()") \
V(debug_execution_mode_address, "Isolate::debug_execution_mode_address()") \
V(debug_is_active_address, "Debug::is_active_address()") \
V(debug_hook_on_function_call_address, \
@@ -120,6 +124,9 @@ class StatsCounter;
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
+ V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
+ V(baseline_pc_for_next_executed_bytecode, \
+ "BaselinePCForNextExecutedBytecode") \
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
V(check_object_type, "check_object_type") \
V(compute_integer_hash, "ComputeSeededHash") \
@@ -194,52 +201,54 @@ class StatsCounter;
V(string_to_array_index_function, "String::ToArrayIndex") \
V(try_string_to_index_or_lookup_existing, \
"try_string_to_index_or_lookup_existing") \
- V(wasm_call_trap_callback_for_testing, \
- "wasm::call_trap_callback_for_testing") \
- V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
- V(wasm_f32_floor, "wasm::f32_floor_wrapper") \
- V(wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
- V(wasm_f32_trunc, "wasm::f32_trunc_wrapper") \
- V(wasm_f64_ceil, "wasm::f64_ceil_wrapper") \
- V(wasm_f64_floor, "wasm::f64_floor_wrapper") \
- V(wasm_f64_nearest_int, "wasm::f64_nearest_int_wrapper") \
- V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
- V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
- V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
- V(wasm_float32_to_int64_sat, "wasm::float32_to_int64_sat_wrapper") \
- V(wasm_float32_to_uint64_sat, "wasm::float32_to_uint64_sat_wrapper") \
- V(wasm_float64_pow, "wasm::float64_pow") \
- V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
- V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
- V(wasm_float64_to_int64_sat, "wasm::float64_to_int64_sat_wrapper") \
- V(wasm_float64_to_uint64_sat, "wasm::float64_to_uint64_sat_wrapper") \
- V(wasm_int64_div, "wasm::int64_div") \
- V(wasm_int64_mod, "wasm::int64_mod") \
- V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
- V(wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
- V(wasm_uint64_div, "wasm::uint64_div") \
- V(wasm_uint64_mod, "wasm::uint64_mod") \
- V(wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
- V(wasm_uint64_to_float64, "wasm::uint64_to_float64_wrapper") \
- V(wasm_word32_ctz, "wasm::word32_ctz") \
- V(wasm_word32_popcnt, "wasm::word32_popcnt") \
- V(wasm_word32_rol, "wasm::word32_rol") \
- V(wasm_word32_ror, "wasm::word32_ror") \
- V(wasm_word64_rol, "wasm::word64_rol") \
- V(wasm_word64_ror, "wasm::word64_ror") \
- V(wasm_word64_ctz, "wasm::word64_ctz") \
- V(wasm_word64_popcnt, "wasm::word64_popcnt") \
- V(wasm_f64x2_ceil, "wasm::f64x2_ceil_wrapper") \
- V(wasm_f64x2_floor, "wasm::f64x2_floor_wrapper") \
- V(wasm_f64x2_trunc, "wasm::f64x2_trunc_wrapper") \
- V(wasm_f64x2_nearest_int, "wasm::f64x2_nearest_int_wrapper") \
- V(wasm_f32x4_ceil, "wasm::f32x4_ceil_wrapper") \
- V(wasm_f32x4_floor, "wasm::f32x4_floor_wrapper") \
- V(wasm_f32x4_trunc, "wasm::f32x4_trunc_wrapper") \
- V(wasm_f32x4_nearest_int, "wasm::f32x4_nearest_int_wrapper") \
- V(wasm_memory_init, "wasm::memory_init") \
- V(wasm_memory_copy, "wasm::memory_copy") \
- V(wasm_memory_fill, "wasm::memory_fill") \
+ IF_WASM(V, wasm_call_trap_callback_for_testing, \
+ "wasm::call_trap_callback_for_testing") \
+ IF_WASM(V, wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
+ IF_WASM(V, wasm_f32_floor, "wasm::f32_floor_wrapper") \
+ IF_WASM(V, wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
+ IF_WASM(V, wasm_f32_trunc, "wasm::f32_trunc_wrapper") \
+ IF_WASM(V, wasm_f64_ceil, "wasm::f64_ceil_wrapper") \
+ IF_WASM(V, wasm_f64_floor, "wasm::f64_floor_wrapper") \
+ IF_WASM(V, wasm_f64_nearest_int, "wasm::f64_nearest_int_wrapper") \
+ IF_WASM(V, wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
+ IF_WASM(V, wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
+ IF_WASM(V, wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
+ IF_WASM(V, wasm_float32_to_int64_sat, "wasm::float32_to_int64_sat_wrapper") \
+ IF_WASM(V, wasm_float32_to_uint64_sat, \
+ "wasm::float32_to_uint64_sat_wrapper") \
+ IF_WASM(V, wasm_float64_pow, "wasm::float64_pow") \
+ IF_WASM(V, wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
+ IF_WASM(V, wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
+ IF_WASM(V, wasm_float64_to_int64_sat, "wasm::float64_to_int64_sat_wrapper") \
+ IF_WASM(V, wasm_float64_to_uint64_sat, \
+ "wasm::float64_to_uint64_sat_wrapper") \
+ IF_WASM(V, wasm_int64_div, "wasm::int64_div") \
+ IF_WASM(V, wasm_int64_mod, "wasm::int64_mod") \
+ IF_WASM(V, wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
+ IF_WASM(V, wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
+ IF_WASM(V, wasm_uint64_div, "wasm::uint64_div") \
+ IF_WASM(V, wasm_uint64_mod, "wasm::uint64_mod") \
+ IF_WASM(V, wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
+ IF_WASM(V, wasm_uint64_to_float64, "wasm::uint64_to_float64_wrapper") \
+ IF_WASM(V, wasm_word32_ctz, "wasm::word32_ctz") \
+ IF_WASM(V, wasm_word32_popcnt, "wasm::word32_popcnt") \
+ IF_WASM(V, wasm_word32_rol, "wasm::word32_rol") \
+ IF_WASM(V, wasm_word32_ror, "wasm::word32_ror") \
+ IF_WASM(V, wasm_word64_rol, "wasm::word64_rol") \
+ IF_WASM(V, wasm_word64_ror, "wasm::word64_ror") \
+ IF_WASM(V, wasm_word64_ctz, "wasm::word64_ctz") \
+ IF_WASM(V, wasm_word64_popcnt, "wasm::word64_popcnt") \
+ IF_WASM(V, wasm_f64x2_ceil, "wasm::f64x2_ceil_wrapper") \
+ IF_WASM(V, wasm_f64x2_floor, "wasm::f64x2_floor_wrapper") \
+ IF_WASM(V, wasm_f64x2_trunc, "wasm::f64x2_trunc_wrapper") \
+ IF_WASM(V, wasm_f64x2_nearest_int, "wasm::f64x2_nearest_int_wrapper") \
+ IF_WASM(V, wasm_f32x4_ceil, "wasm::f32x4_ceil_wrapper") \
+ IF_WASM(V, wasm_f32x4_floor, "wasm::f32x4_floor_wrapper") \
+ IF_WASM(V, wasm_f32x4_trunc, "wasm::f32x4_trunc_wrapper") \
+ IF_WASM(V, wasm_f32x4_nearest_int, "wasm::f32x4_nearest_int_wrapper") \
+ IF_WASM(V, wasm_memory_init, "wasm::memory_init") \
+ IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
+ IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
V(address_of_wasm_f64x2_convert_low_i32x4_u_int_mask, \
"wasm_f64x2_convert_low_i32x4_u_int_mask") \
V(supports_wasm_simd_128_address, "wasm::supports_wasm_simd_128_address") \
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index 8aec047d137..7bede6aa9b9 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -11,7 +11,10 @@
#include "src/codegen/assembler-inl.h"
#include "src/objects/code-inl.h"
#include "src/objects/objects-inl.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -20,9 +23,11 @@ HandlerTable::HandlerTable(Code code)
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
kReturnAddressBasedEncoding) {}
+#if V8_ENABLE_WEBASSEMBLY
HandlerTable::HandlerTable(const wasm::WasmCode* code)
: HandlerTable(code->handler_table(), code->handler_table_size(),
kReturnAddressBasedEncoding) {}
+#endif // V8_ENABLE_WEBASSEMBLY
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array.handler_table()) {}
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index d2ed80a0e05..e1626e2be50 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -58,7 +58,9 @@ class V8_EXPORT_PRIVATE HandlerTable {
// Constructors for the various encodings.
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
+#if V8_ENABLE_WEBASSEMBLY
explicit HandlerTable(const wasm::WasmCode* code);
+#endif // V8_ENABLE_WEBASSEMBLY
explicit HandlerTable(BytecodeArray bytecode_array);
HandlerTable(Address handler_table, int handler_table_size,
EncodingMode encoding_mode);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 25d2d486cec..1585f970e84 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -49,12 +49,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() {
- if (IsSupported(SSE4_1)) return true;
- if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
- return false;
-}
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 3f9d7ddfa2f..809df1daef0 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -122,6 +122,14 @@ bool OSHasAVXSupport() {
} // namespace
+bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsSupported(SSE4_1)) return true;
+ if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
+}
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
@@ -130,14 +138,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2;
- if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
- if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
- if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
- if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << AVX;
- }
+ // To deal with any combination of flags (e.g. --no-enable-sse4-1
+ // --enable-sse-4-2), we start checking from the "highest" supported
+ // extension, for each extension, enable if newer extension is supported.
if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
supported_ |= 1u << AVX2;
}
@@ -145,6 +148,19 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
OSHasAVXSupport()) {
supported_ |= 1u << FMA3;
}
+ if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
+ OSHasAVXSupport()) ||
+ IsSupported(AVX2) || IsSupported(FMA3)) {
+ supported_ |= 1u << AVX;
+ }
+ if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX))
+ supported_ |= 1u << SSE4_2;
+ if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2))
+ supported_ |= 1u << SSE4_1;
+ if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1))
+ supported_ |= 1u << SSSE3;
+ if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
+ supported_ |= 1u << SSE3;
if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 3914c355446..2a8fd3ee28b 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -235,6 +235,12 @@ class V8_EXPORT_PRIVATE Operand {
explicit Operand(Register base, int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE);
+ // [rip + disp/r]
+ explicit Operand(Label* label) {
+ set_modrm(0, ebp);
+ set_dispr(reinterpret_cast<intptr_t>(label), RelocInfo::INTERNAL_REFERENCE);
+ }
+
// [base + index*scale + disp/r]
explicit Operand(Register base, Register index, ScaleFactor scale,
int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index d732fa2fbbd..fd76e01590b 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -90,13 +90,9 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
+ return esi;
}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() { return edi; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
@@ -224,8 +220,8 @@ void CompareDescriptor::InitializePlatformSpecific(
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
+ Register registers[] = {edx, eax, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
@@ -236,8 +232,8 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
+ Register registers[] = {edx, eax, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 7a99d6c7012..9892eb9470e 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -628,28 +628,6 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
-void TurboAssembler::Roundps(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundps(dst, src, mode);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- roundps(dst, src, mode);
- }
-}
-
-void TurboAssembler::Roundpd(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundpd(dst, src, mode);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- roundpd(dst, src, mode);
- }
-}
-
void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -657,217 +635,13 @@ void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
vpmulhrsw(dst, src1, src2);
} else {
if (dst != src1) {
- movdqu(dst, src1);
+ movaps(dst, src1);
}
CpuFeatureScope sse_scope(this, SSSE3);
pmulhrsw(dst, src2);
}
}
-// 1. Unpack src0, src1 into even-number elements of scratch.
-// 2. Unpack src1, src0 into even-number elements of dst.
-// 3. Multiply 1. with 2.
-// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
-void TurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool low, bool is_signed) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- if (low) {
- vpunpckldq(scratch, src1, src1);
- vpunpckldq(dst, src2, src2);
- } else {
- vpunpckhdq(scratch, src1, src1);
- vpunpckhdq(dst, src2, src2);
- }
- if (is_signed) {
- vpmuldq(dst, scratch, dst);
- } else {
- vpmuludq(dst, scratch, dst);
- }
- } else {
- uint8_t mask = low ? 0x50 : 0xFA;
- pshufd(scratch, src1, mask);
- pshufd(dst, src2, mask);
- if (is_signed) {
- CpuFeatureScope sse4_scope(this, SSE4_1);
- pmuldq(dst, scratch);
- } else {
- pmuludq(dst, scratch);
- }
- }
-}
-
-// 1. Multiply low word into scratch.
-// 2. Multiply high word (can be signed or unsigned) into dst.
-// 3. Unpack and interleave scratch and dst into dst.
-void TurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool low, bool is_signed) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmullw(scratch, src1, src2);
- is_signed ? vpmulhw(dst, src1, src2) : vpmulhuw(dst, src1, src2);
- low ? vpunpcklwd(dst, scratch, dst) : vpunpckhwd(dst, scratch, dst);
- } else {
- DCHECK_EQ(dst, src1);
- movdqu(scratch, src1);
- pmullw(dst, src2);
- is_signed ? pmulhw(scratch, src2) : pmulhuw(scratch, src2);
- low ? punpcklwd(dst, scratch) : punpckhwd(dst, scratch);
- }
-}
-
-void TurboAssembler::I16x8ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool low, bool is_signed) {
- if (low) {
- is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
- is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
- Pmullw(dst, scratch);
- } else {
- Palignr(scratch, src1, uint8_t{8});
- is_signed ? Pmovsxbw(scratch, scratch) : Pmovzxbw(scratch, scratch);
- Palignr(dst, src2, uint8_t{8});
- is_signed ? Pmovsxbw(dst, dst) : Pmovzxbw(dst, dst);
- Pmullw(dst, scratch);
- }
-}
-
-void TurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
- XMMRegister src1, XMMRegister src2,
- XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpandn(scratch, mask, src2);
- vpand(dst, src1, mask);
- vpor(dst, dst, scratch);
- } else {
- DCHECK_EQ(dst, mask);
- // Use float ops as they are 1 byte shorter than int ops.
- movaps(scratch, dst);
- andnps(scratch, src2);
- andps(dst, src1);
- orps(dst, scratch);
- }
-}
-
-void TurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpunpckhqdq(dst, src, src);
- vpmovsxdq(dst, dst);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pshufd(dst, src, 0xEE);
- pmovsxdq(dst, dst);
- }
-}
-
-void TurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpxor(scratch, scratch, scratch);
- vpunpckhdq(dst, src, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pshufd(dst, src, 0xEE);
- pmovzxdq(dst, dst);
- }
-}
-
-void TurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (high)
- // dst = |e|e|f|f|g|g|h|h|
- vpunpckhwd(dst, src, src);
- vpsrad(dst, dst, 16);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // 2 bytes shorter than pshufd, but has depdency on dst.
- movhlps(dst, src);
- pmovsxwd(dst, dst);
- } else {
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovsxwd(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // scratch = |0|0|0|0|0|0|0|0|
- // src = |a|b|c|d|e|f|g|h|
- // dst = |0|a|0|b|0|c|0|d|
- XMMRegister tmp = dst == src ? scratch : dst;
- vpxor(tmp, tmp, tmp);
- vpunpckhwd(dst, src, tmp);
- } else {
- if (dst == src) {
- // xorps can be executed on more ports than pshufd.
- xorps(scratch, scratch);
- punpckhwd(dst, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovzxwd(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
- // dst = |i|i|j|j|k|k|l|l|m|m|n|n|o|o|p|p|
- vpunpckhbw(dst, src, src);
- vpsraw(dst, dst, 8);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // 2 bytes shorter than pshufd, but has depdency on dst.
- movhlps(dst, src);
- pmovsxbw(dst, dst);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovsxbw(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // tmp = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
- // src = |a|b|c|d|e|f|g|h | i|j|k|l|m|n|o|p|
- // dst = |0|a|0|b|0|c|0|d | 0|e|0|f|0|g|0|h|
- XMMRegister tmp = dst == src ? scratch : dst;
- vpxor(tmp, tmp, tmp);
- vpunpckhbw(dst, src, tmp);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // xorps can be executed on more ports than pshufd.
- xorps(scratch, scratch);
- punpckhbw(dst, scratch);
- } else {
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovzxbw(dst, dst);
- }
- }
-}
-
void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch) {
// k = i16x8.splat(0x8000)
@@ -879,16 +653,6 @@ void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
Pxor(dst, scratch);
}
-void TurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
- if (laneidx == 0) {
- Movss(dst, src);
- } else {
- DCHECK_GE(3, laneidx);
- Extractps(dst, src, laneidx);
- }
-}
-
void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
XMMRegister tmp1, XMMRegister tmp2,
Register scratch) {
@@ -915,8 +679,8 @@ void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
// PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
// algorithm on these processors. ATOM CPU feature captures exactly
// the right set of processors.
- xorps(tmp1, tmp1);
- pavgb(tmp1, src);
+ movaps(tmp1, src);
+ psrlw(tmp1, 1);
if (dst != src) {
movaps(dst, src);
}
@@ -963,6 +727,10 @@ void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
// dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
// 0x43300000'00000000 is a special double where the significand bits
// precisely represents all uint32 numbers.
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
+ }
Unpcklps(dst, src,
ExternalReferenceAsOperand(
ExternalReference::
@@ -1053,85 +821,6 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
}
}
-void TurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister tmp = dst == src ? scratch : dst;
- vpxor(tmp, tmp, tmp);
- vpsubq(tmp, tmp, src);
- vblendvpd(dst, src, tmp, src);
- } else {
- CpuFeatureScope sse_scope(this, SSE3);
- movshdup(scratch, src);
- if (dst != src) {
- movaps(dst, src);
- }
- psrad(scratch, 31);
- xorps(dst, scratch);
- psubq(dst, scratch);
- }
-}
-
-void TurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1, XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpcmpgtq(dst, src0, src1);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_EQ(dst, src0);
- pcmpgtq(dst, src1);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- DCHECK_NE(dst, src0);
- DCHECK_NE(dst, src1);
- movaps(dst, src1);
- movaps(scratch, src0);
- psubq(dst, src0);
- pcmpeqd(scratch, src1);
- andps(dst, scratch);
- movaps(scratch, src0);
- pcmpgtd(scratch, src1);
- orps(dst, scratch);
- movshdup(dst, dst);
- }
-}
-
-void TurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1, XMMRegister scratch) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpcmpgtq(dst, src1, src0);
- vpcmpeqd(scratch, scratch, scratch);
- vpxor(dst, dst, scratch);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_NE(dst, src0);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pcmpgtq(dst, src0);
- pcmpeqd(scratch, scratch);
- xorps(dst, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- DCHECK_NE(dst, src0);
- DCHECK_NE(dst, src1);
- movaps(dst, src0);
- movaps(scratch, src1);
- psubq(dst, src1);
- pcmpeqd(scratch, src0);
- andps(dst, scratch);
- movaps(scratch, src1);
- pcmpgtd(scratch, src0);
- orps(dst, scratch);
- movshdup(dst, dst);
- pcmpeqd(scratch, scratch);
- xorps(dst, scratch);
- }
-}
-
void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
XMMRegister tmp,
Register scratch) {
@@ -1160,19 +849,20 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
Register scratch) {
Operand op = ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(dst, src, op);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
movaps(dst, src);
+ pmaddubsw(dst, op);
}
- Pmaddubsw(dst, src, op);
}
void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
Register scratch) {
Operand op = ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- }
// pmaddwd multiplies signed words in src and op, producing
// signed doublewords, then adds pairwise.
// src = |a|b|c|d|e|f|g|h|
@@ -1182,16 +872,68 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
XMMRegister tmp) {
- // src = |a|b|c|d|e|f|g|h|
- // tmp = i32x4.splat(0x0000FFFF)
- Pcmpeqd(tmp, tmp);
- Psrld(tmp, tmp, byte{16});
- // tmp =|0|b|0|d|0|f|0|h|
- Pand(tmp, src);
- // dst = |0|a|0|c|0|e|0|g|
- Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- Paddd(dst, dst, tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (low)
+ // scratch = |0|a|0|c|0|e|0|g|
+ vpsrld(tmp, src, 16);
+ // dst = |0|b|0|d|0|f|0|h|
+ vpblendw(dst, src, tmp, 0xAA);
+ // dst = |a+b|c+d|e+f|g+h|
+ vpaddd(dst, tmp, dst);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // There is a potentially better lowering if we get rip-relative constants,
+ // see https://github.com/WebAssembly/simd/pull/380.
+ movaps(tmp, src);
+ psrld(tmp, 16);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pblendw(dst, tmp, 0xAA);
+ paddd(dst, tmp);
+ } else {
+ // src = |a|b|c|d|e|f|g|h|
+ // tmp = i32x4.splat(0x0000FFFF)
+ pcmpeqd(tmp, tmp);
+ psrld(tmp, byte{16});
+ // tmp =|0|b|0|d|0|f|0|h|
+ andps(tmp, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrld(dst, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ paddd(dst, tmp);
+ }
+}
+
+void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
+ XMMRegister mask, XMMRegister scratch,
+ Register tmp, bool omit_add) {
+ if (omit_add) {
+ Pshufb(dst, src, mask);
+ return;
+ }
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpaddusb(scratch, mask, op);
+ vpshufb(dst, src, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(scratch, op);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ paddusb(scratch, mask);
+ pshufb(dst, scratch);
+ }
}
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
@@ -1399,11 +1141,13 @@ void TurboAssembler::Prologue() {
void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
- push(Immediate(StackFrame::TypeToMarker(type)));
+ if (!StackFrame::IsJavaScript(type)) {
+ Push(Immediate(StackFrame::TypeToMarker(type)));
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
+ if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -2019,6 +1763,8 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
}
}
+void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
+
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src);
@@ -2090,152 +1836,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Cmpeqps(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vcmpeqps(dst, src1, src2);
- } else {
- movaps(dst, src1);
- cmpeqps(dst, src2);
- }
-}
-
-void TurboAssembler::Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufhw(dst, src, shuffle);
- } else {
- pshufhw(dst, src, shuffle);
- }
-}
-
-void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshuflw(dst, src, shuffle);
- } else {
- pshuflw(dst, src, shuffle);
- }
-}
-
-void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufd(dst, src, shuffle);
- } else {
- pshufd(dst, src, shuffle);
- }
-}
-
-void TurboAssembler::Psraw(XMMRegister dst, uint8_t shift) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsraw(dst, dst, shift);
- } else {
- psraw(dst, shift);
- }
-}
-
-void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst, dst, shift);
- } else {
- psrlw(dst, shift);
- }
-}
-
-void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlq(dst, dst, shift);
- } else {
- psrlq(dst, shift);
- }
-}
-
-void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsignb(dst, dst, src);
- return;
- }
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope sse_scope(this, SSSE3);
- psignb(dst, src);
- return;
- }
- FATAL("no AVX or SSE3 support");
-}
-
-void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsignw(dst, dst, src);
- return;
- }
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope sse_scope(this, SSSE3);
- psignw(dst, src);
- return;
- }
- FATAL("no AVX or SSE3 support");
-}
-
-void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsignd(dst, dst, src);
- return;
- }
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope sse_scope(this, SSSE3);
- psignd(dst, src);
- return;
- }
- FATAL("no AVX or SSE3 support");
-}
-
-void TurboAssembler::Haddps(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vhaddps(dst, src1, src2);
- } else {
- CpuFeatureScope scope(this, SSE3);
- DCHECK_EQ(dst, src1);
- haddps(dst, src2);
- }
-}
-
-void TurboAssembler::Pcmpeqq(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpcmpeqq(dst, dst, src);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- pcmpeqq(dst, src);
- }
-}
-
-void TurboAssembler::Pcmpeqq(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- Pcmpeqq(dst, src1, Operand(src2));
-}
-
-void TurboAssembler::Pcmpeqq(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpcmpeqq(dst, src1, src2);
- } else {
- // pcmpeqq is only used by Wasm SIMD, which requires SSE4_1.
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- pcmpeqq(dst, src2);
- }
-}
-
void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2280,58 +1880,6 @@ void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
FATAL("no AVX or SSE3 support");
}
-void TurboAssembler::Pextrb(Operand dst, XMMRegister src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrb(dst, src, imm8);
- return;
- }
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrb(dst, src, imm8);
- return;
-}
-
-void TurboAssembler::Pextrb(Register dst, XMMRegister src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrb(dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrb(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
-void TurboAssembler::Pextrw(Operand dst, XMMRegister src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrw(dst, src, imm8);
- return;
- }
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrw(dst, src, imm8);
- return;
-}
-
-void TurboAssembler::Pextrw(Register dst, XMMRegister src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrw(dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrw(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2371,7 +1919,7 @@ void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
if (dst != src1) {
- movdqu(dst, src1);
+ movaps(dst, src1);
}
pinsrb(dst, src2, imm8);
return;
@@ -2387,7 +1935,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
return;
}
if (dst != src1) {
- movdqu(dst, src1);
+ movaps(dst, src1);
}
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -2429,7 +1977,7 @@ void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
return;
} else {
if (dst != src1) {
- movdqu(dst, src1);
+ movaps(dst, src1);
}
pinsrw(dst, src2, imm8);
return;
@@ -2446,17 +1994,6 @@ void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
shufps(dst, dst, static_cast<byte>(0));
}
-void TurboAssembler::Extractps(Operand dst, XMMRegister src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vextractps(dst, src, imm8);
- }
-
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope avx_scope(this, SSE4_1);
- extractps(dst, src, imm8);
-}
-
void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2732,6 +2269,12 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
call(entry, RelocInfo::OFF_HEAP_TARGET);
}
+Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ return Operand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 29bb8ca2a0a..4c5c3ade021 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -23,6 +23,7 @@
#include "src/codegen/label.h"
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
+#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h"
#include "src/execution/frames.h"
@@ -70,9 +71,9 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using SharedTurboAssembler::SharedTurboAssembler;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -125,6 +126,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
void Move(Register dst, Register src);
+ void Move(Register dst, Operand src);
void Move(Operand dst, const Immediate& src);
// Move an immediate into an XMM register.
@@ -133,7 +135,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+ Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
+
void Call(Register reg) { call(reg); }
+ void Call(Operand op) { call(op); }
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
@@ -189,6 +194,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
+ void SmiUntag(Register output, Register value) {
+ mov(output, value);
+ SmiUntag(output);
+ }
// Removes current frame and its arguments from the stack preserving the
// arguments and a return address pushed to the stack for the next call. Both
@@ -243,6 +252,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
+ void PushReturnAddressFrom(XMMRegister src, Register scratch) {
+ Push(src, scratch);
+ }
+ void PopReturnAddressTo(XMMRegister dst, Register scratch) {
+ Pop(dst, scratch);
+ }
+
void Ret();
// Root register utility functions.
@@ -285,72 +301,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- // Three-operand cmpeqps that moves src1 to dst if AVX is not supported.
- void Cmpeqps(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- void Pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- Pshufhw(dst, Operand(src), shuffle);
- }
- void Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
- void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- Pshuflw(dst, Operand(src), shuffle);
- }
- void Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
- void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- Pshufd(dst, Operand(src), shuffle);
- }
- void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
- void Psraw(XMMRegister dst, uint8_t shift);
- void Psrlw(XMMRegister dst, uint8_t shift);
- void Psrlq(XMMRegister dst, uint8_t shift);
-
-// SSE/SSE2 instructions with AVX version.
-#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src); \
- } else { \
- name(dst, src); \
- } \
- }
-
- AVX_OP2_WITH_TYPE(Movss, movss, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Movss, movss, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Movsd, movsd, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Movsd, movsd, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Rcpps, rcpps, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Rsqrtps, rsqrtps, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
- AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Cvtdq2pd, cvtdq2pd, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Cvtps2pd, cvtps2pd, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Cvtpd2ps, cvtpd2ps, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Movaps, movaps, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Movups, movups, XMMRegister, Operand)
- AVX_OP2_WITH_TYPE(Movups, movups, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Movups, movups, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister)
- AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Movupd, movupd, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Pmovmskb, pmovmskb, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movmskpd, movmskpd, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movmskps, movmskps, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movlps, movlps, Operand, XMMRegister)
- AVX_OP2_WITH_TYPE(Movhps, movlps, Operand, XMMRegister)
-
-#undef AVX_OP2_WITH_TYPE
-
// Only use these macros when non-destructive source of AVX version is not
// needed.
#define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -384,7 +334,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Punpcklqdq, punpcklqdq)
AVX_OP3_XO(Pxor, pxor)
AVX_OP3_XO(Andps, andps)
- AVX_OP3_XO(Andnps, andnps)
AVX_OP3_XO(Andpd, andpd)
AVX_OP3_XO(Xorps, xorps)
AVX_OP3_XO(Xorpd, xorpd)
@@ -393,12 +342,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Orps, orps)
AVX_OP3_XO(Orpd, orpd)
AVX_OP3_XO(Andnpd, andnpd)
- AVX_OP3_XO(Pmullw, pmullw)
AVX_OP3_WITH_TYPE(Movhlps, movhlps, XMMRegister, XMMRegister)
+ AVX_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
+ AVX_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
+// Same as AVX_OP3_WITH_TYPE but supports a CpuFeatureScope
+#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
+ sse_scope) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ } else if (CpuFeatures::IsSupported(sse_scope)) { \
+ CpuFeatureScope scope(this, sse_scope); \
+ name(dst, src); \
+ } \
+ }
+#define AVX_OP2_XO(macro_name, name, sse_scope) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, \
+ sse_scope) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, sse_scope)
+ AVX_OP2_XO(Psignb, psignb, SSSE3)
+ AVX_OP2_XO(Psignw, psignw, SSSE3)
+ AVX_OP2_XO(Psignd, psignd, SSSE3)
+ AVX_OP2_XO(Pcmpeqq, pcmpeqq, SSE4_1)
+#undef AVX_OP2_XO
+#undef AVX_OP2_WITH_TYPE_SCOPE
+
// Only use this macro when dst and src1 is the same in SSE case.
#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
void macro_name(dst_type dst, dst_type src1, src_type src2) { \
@@ -415,6 +388,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
AVX_PACKED_OP3(Unpcklps, unpcklps)
+ AVX_PACKED_OP3(Andnps, andnps)
AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
@@ -442,7 +416,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Psrlq, psrlq)
AVX_PACKED_OP3(Psraw, psraw)
AVX_PACKED_OP3(Psrad, psrad)
- AVX_PACKED_OP3(Pmaddwd, pmaddwd)
AVX_PACKED_OP3(Paddd, paddd)
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubd, psubd)
@@ -489,58 +462,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
name(dst, src2); \
} \
}
+ AVX_OP3_WITH_MOVE(Cmpeqps, cmpeqps, XMMRegister, XMMRegister)
AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
+ AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
#undef AVX_OP3_WITH_MOVE
-// Non-SSE2 instructions.
-#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
- sse_scope) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src); \
- return; \
- } \
- if (CpuFeatures::IsSupported(sse_scope)) { \
- CpuFeatureScope scope(this, sse_scope); \
- name(dst, src); \
- return; \
- } \
- UNREACHABLE(); \
- }
-#define AVX_OP2_XO_SSE3(macro_name, name) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE3) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE3)
- AVX_OP2_XO_SSE3(Movddup, movddup)
- AVX_OP2_WITH_TYPE_SCOPE(Movshdup, movshdup, XMMRegister, XMMRegister, SSE3)
-
-#undef AVX_OP2_XO_SSE3
-
-#define AVX_OP2_XO_SSSE3(macro_name, name) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSSE3) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSSE3)
- AVX_OP2_XO_SSSE3(Pabsb, pabsb)
- AVX_OP2_XO_SSSE3(Pabsw, pabsw)
- AVX_OP2_XO_SSSE3(Pabsd, pabsd)
-
-#undef AVX_OP2_XO_SSE3
-
-#define AVX_OP2_XO_SSE4(macro_name, name) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
-
- AVX_OP2_XO_SSE4(Ptest, ptest)
- AVX_OP2_XO_SSE4(Pmovsxbw, pmovsxbw)
- AVX_OP2_XO_SSE4(Pmovsxwd, pmovsxwd)
- AVX_OP2_XO_SSE4(Pmovsxdq, pmovsxdq)
- AVX_OP2_XO_SSE4(Pmovzxbw, pmovzxbw)
- AVX_OP2_XO_SSE4(Pmovzxwd, pmovzxwd)
- AVX_OP2_XO_SSE4(Pmovzxdq, pmovzxdq)
-
-#undef AVX_OP2_WITH_TYPE_SCOPE
-#undef AVX_OP2_XO_SSE4
-
#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
sse_scope) \
void macro_name(dst_type dst, dst_type src1, src_type src2) { \
@@ -561,20 +488,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
+ AVX_OP3_WITH_TYPE_SCOPE(Haddps, haddps, XMMRegister, Operand, SSE3)
AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
AVX_OP3_XO_SSE4(Pminsb, pminsb)
AVX_OP3_XO_SSE4(Pmaxsb, pmaxsb)
- AVX_OP3_WITH_TYPE_SCOPE(Pmaddubsw, pmaddubsw, XMMRegister, XMMRegister, SSSE3)
- AVX_OP3_WITH_TYPE_SCOPE(Pmaddubsw, pmaddubsw, XMMRegister, Operand, SSSE3)
+ AVX_OP3_XO_SSE4(Pcmpeqq, pcmpeqq)
#undef AVX_OP3_XO_SSE4
#undef AVX_OP3_WITH_TYPE_SCOPE
- void Haddps(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pcmpeqq(XMMRegister dst, Operand src);
- void Pcmpeqq(XMMRegister dst, XMMRegister src) { Pcmpeqq(dst, Operand(src)); }
- void Pcmpeqq(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pcmpeqq(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ // TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
+ void Movlps(Operand dst, XMMRegister src) {
+ SharedTurboAssembler::Movlps(dst, src);
+ }
+ void Movhps(Operand dst, XMMRegister src) {
+ SharedTurboAssembler::Movhps(dst, src);
+ }
+
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
// Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
@@ -588,22 +518,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
- void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
- void Psignb(XMMRegister dst, Operand src);
- void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
- void Psignw(XMMRegister dst, Operand src);
- void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
- void Psignd(XMMRegister dst, Operand src);
-
void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
Palignr(dst, Operand(src), imm8);
}
void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
- void Pextrb(Operand dst, XMMRegister src, uint8_t imm8);
- void Pextrb(Register dst, XMMRegister src, uint8_t imm8);
- void Pextrw(Operand dst, XMMRegister src, uint8_t imm8);
- void Pextrw(Register dst, XMMRegister src, uint8_t imm8);
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
Pinsrb(dst, Operand(src), imm8);
@@ -624,7 +543,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Moves src1 to dst if AVX is not supported.
void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
void Vbroadcastss(XMMRegister dst, Operand src);
- void Extractps(Operand dst, XMMRegister src, uint8_t imm8);
// Shufps that will mov src1 into dst if AVX is not supported.
void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -656,37 +574,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
- void Roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void Roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
// Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
// These Wasm SIMD ops do not have direct lowerings on IA32. These
// helpers are optimized to produce the fastest and smallest codegen.
// Defined here to allow usage on both TurboFan and Liftoff.
- void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch, bool low, bool is_signed);
- // Requires that dst == src1 if AVX is not supported.
- void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch, bool low, bool is_signed);
- void I16x8ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch, bool low, bool is_signed);
- // Requires dst == mask when AVX is not supported.
- void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch);
- void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
- void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch);
- void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
- void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch);
- void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
- void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
- XMMRegister scratch);
void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch);
- void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
XMMRegister tmp2, Register scratch);
void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src, Register tmp);
@@ -694,11 +589,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
XMMRegister scratch, Register tmp);
void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
XMMRegister scratch, Register tmp);
- void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
- void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
- XMMRegister scratch);
- void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
- XMMRegister scratch);
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
XMMRegister tmp, Register scratch);
void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
@@ -707,12 +597,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch);
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
XMMRegister tmp);
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
+ XMMRegister scratch, Register tmp, bool omit_add = false);
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi smi) { Push(Immediate(smi)); }
+ void Push(XMMRegister src, Register scratch) {
+ movd(scratch, src);
+ push(scratch);
+ }
+
+ void Pop(Register dst) { pop(dst); }
+ void Pop(Operand dst) { pop(dst); }
+ void Pop(XMMRegister dst, Register scratch) {
+ pop(scratch);
+ movd(dst, scratch);
+ }
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
@@ -994,9 +897,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// from the stack, clobbering only the esp register.
void Drop(int element_count);
- void Pop(Register dst) { pop(dst); }
- void Pop(Operand dst) { pop(dst); }
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index df3117e8d0a..5dc035d9669 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -76,7 +76,12 @@ GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
constexpr Register no_reg = Register::no_reg();
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index cb686a1bfa3..53b678580e4 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -25,6 +25,9 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
// within the calling convention are disallowed.
#ifdef DEBUG
CHECK_NE(registers[i], kRootRegister);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ CHECK_NE(registers[i], kPointerCageBaseRegister);
+#endif
// Check for duplicated registers.
for (int j = i + 1; j < register_parameter_count; j++) {
CHECK_NE(registers[i], registers[j]);
@@ -331,11 +334,16 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- Register registers[] = {
- kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
- data->InitializePlatformSpecific(kParameterCount, registers);
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
+ V8_TARGET_ARCH_ARM
+ Register registers[] = {kContextRegister,
+ kJSFunctionRegister,
+ kJavaScriptCallArgCountRegister,
+ kJavaScriptCallExtraArg1Register,
+ kJavaScriptCallNewTargetRegister,
+ kInterpreterBytecodeArrayRegister};
+ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
+ registers);
#else
InitializePlatformUnimplemented(data, kParameterCount);
#endif
@@ -344,7 +352,8 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
data->InitializePlatformSpecific(kParameterCount, registers);
#else
@@ -609,5 +618,15 @@ void ForInPrepareDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void SuspendGeneratorBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void ResumeGeneratorBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index d9ae65f5c62..8d03907efc0 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -97,6 +97,8 @@ namespace internal {
V(NoContext) \
V(RecordWrite) \
V(ResumeGenerator) \
+ V(SuspendGeneratorBaseline) \
+ V(ResumeGeneratorBaseline) \
V(RunMicrotasks) \
V(RunMicrotasksEntry) \
V(SingleParameterOnStack) \
@@ -1466,16 +1468,26 @@ class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
- kJavaScriptCallArgCount,
- kInterpreterBytecodeArray,
- kJavaScriptCallNewTarget)
+ kJavaScriptCallArgCount, kStackFrameSize,
+ kJavaScriptCallNewTarget,
+ kInterpreterBytecodeArray)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kCalleeContext
MachineType::AnyTagged(), // kClosure
MachineType::Int32(), // kJavaScriptCallArgCount
- MachineType::AnyTagged(), // kInterpreterBytecodeArray
- MachineType::AnyTagged()) // kJavaScriptCallNewTarget
+ MachineType::Int32(), // kStackFrameSize
+ MachineType::AnyTagged(), // kJavaScriptCallNewTarget
+ MachineType::AnyTagged()) // kInterpreterBytecodeArray
DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
CallInterfaceDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass bytecode array through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
};
class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
@@ -1577,6 +1589,31 @@ class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
};
+class ResumeGeneratorBaselineDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kGeneratorObject, kRegisterCount)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::TaggedSigned(), // return type
+ MachineType::AnyTagged(), // kGeneratorObject
+ MachineType::IntPtr(), // kRegisterCount
+ )
+ DECLARE_DESCRIPTOR(ResumeGeneratorBaselineDescriptor, CallInterfaceDescriptor)
+};
+
+class SuspendGeneratorBaselineDescriptor final
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kGeneratorObject, kSuspendId, kBytecodeOffset,
+ kRegisterCount)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kGeneratorObject
+ MachineType::IntPtr(), // kSuspendId
+ MachineType::IntPtr(), // kBytecodeOffset
+ MachineType::IntPtr(), // kRegisterCount
+ )
+ DECLARE_DESCRIPTOR(SuspendGeneratorBaselineDescriptor,
+ CallInterfaceDescriptor)
+};
+
class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kRestartFp)
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index cd46fd1aeff..ac21d3c3e60 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -17,6 +17,7 @@ namespace internal {
enum class MachineRepresentation : uint8_t {
kNone,
kBit,
+ // Integral representations must be consecutive, in order of increasing order.
kWord8,
kWord16,
kWord32,
@@ -26,7 +27,7 @@ enum class MachineRepresentation : uint8_t {
kTagged, // (uncompressed) Object (Smi or HeapObject)
kCompressedPointer, // (compressed) HeapObject
kCompressed, // (compressed) Object (Smi or HeapObject)
- // FP representations must be last, and in order of increasing size.
+ // FP and SIMD representations must be last, and in order of increasing size.
kFloat32,
kFloat64,
kSimd128,
@@ -36,6 +37,22 @@ enum class MachineRepresentation : uint8_t {
bool IsSubtype(MachineRepresentation rep1, MachineRepresentation rep2);
+#define ASSERT_CONSECUTIVE(rep1, rep2) \
+ static_assert(static_cast<uint8_t>(MachineRepresentation::k##rep1) + 1 == \
+ static_cast<uint8_t>(MachineRepresentation::k##rep2), \
+ #rep1 " and " #rep2 " must be consecutive.");
+
+ASSERT_CONSECUTIVE(Word8, Word16)
+ASSERT_CONSECUTIVE(Word16, Word32)
+ASSERT_CONSECUTIVE(Word32, Word64)
+ASSERT_CONSECUTIVE(Float32, Float64)
+ASSERT_CONSECUTIVE(Float64, Simd128)
+#undef ASSERT_CONSECUTIVE
+
+static_assert(MachineRepresentation::kLastRepresentation ==
+ MachineRepresentation::kSimd128,
+ "FP and SIMD representations must be last.");
+
static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
kIntSize * kBitsPerByte,
"Bit masks of MachineRepresentation should fit in an int");
@@ -255,6 +272,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
std::ostream& operator<<(std::ostream& os, MachineSemantic type);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, MachineType type);
+inline bool IsIntegral(MachineRepresentation rep) {
+ return rep >= MachineRepresentation::kWord8 &&
+ rep <= MachineRepresentation::kWord64;
+}
+
inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep >= MachineRepresentation::kFirstFPRepresentation;
}
diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index 8ffc46003f5..d00da6efbac 100644
--- a/deps/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -47,8 +47,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
-bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
-
// -----------------------------------------------------------------------------
// Operand and MemOperand.
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 9864c0f2530..2ef08ae87c2 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -67,6 +67,8 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
+bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 060a4b748af..8bbdbca6627 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -23,7 +23,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -2741,8 +2744,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -4805,7 +4813,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
Lw(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
Lw(dst,
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 1fe4c451f95..8d54e0b737c 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -83,6 +83,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void LeaveFrame(StackFrame::Type type);
+ void AllocateStackSpace(Register bytes) { Subu(sp, sp, bytes); }
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Subu(sp, sp, Operand(bytes));
+ }
+
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
@@ -998,10 +1005,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// -------------------------------------------------------------------------
// JavaScript invokes.
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index 2b5f454dd47..95164a86c1c 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -203,7 +203,12 @@ int ToNumber(Register reg);
Register ToRegister(int num);
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index e5ac8d209c7..2924b661f21 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -47,8 +47,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
-bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
-
// -----------------------------------------------------------------------------
// Operand and MemOperand.
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 11dd818922d..7f7ebd2c736 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -67,6 +67,8 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
+bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index ec19fa5db54..29443a2e58d 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -23,7 +23,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -3379,8 +3382,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -5326,7 +5334,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
Ld(dst,
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 721326ae965..756b594edb7 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -103,6 +103,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void LeaveFrame(StackFrame::Type type);
+ void AllocateStackSpace(Register bytes) { Dsubu(sp, sp, bytes); }
+
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Dsubu(sp, sp, Operand(bytes));
+ }
+
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
@@ -1048,10 +1056,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 8267d6b2ffa..51b03aba1fa 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -203,7 +203,12 @@ int ToNumber(Register reg);
Register ToRegister(int num);
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 2dc30fb55d3..f6fd5862fdc 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -13,7 +13,10 @@
#include "src/objects/shared-function-info.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/function-compiler.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -170,6 +173,7 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
case CodeKind::BYTECODE_HANDLER:
case CodeKind::BUILTIN:
return StackFrame::STUB;
+#if V8_ENABLE_WEBASSEMBLY
case CodeKind::WASM_FUNCTION:
return StackFrame::WASM;
case CodeKind::WASM_TO_CAPI_FUNCTION:
@@ -180,6 +184,7 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
return StackFrame::WASM_TO_JS;
case CodeKind::C_WASM_ENTRY:
return StackFrame::C_WASM_ENTRY;
+#endif // V8_ENABLE_WEBASSEMBLY
default:
UNIMPLEMENTED();
return StackFrame::NONE;
@@ -191,6 +196,7 @@ void OptimizedCompilationInfo::SetCode(Handle<Code> code) {
code_ = code;
}
+#if V8_ENABLE_WEBASSEMBLY
void OptimizedCompilationInfo::SetWasmCompilationResult(
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result) {
wasm_compilation_result_ = std::move(wasm_compilation_result);
@@ -200,6 +206,7 @@ std::unique_ptr<wasm::WasmCompilationResult>
OptimizedCompilationInfo::ReleaseWasmCompilationResult() {
return std::move(wasm_compilation_result_);
}
+#endif // V8_ENABLE_WEBASSEMBLY
bool OptimizedCompilationInfo::has_context() const {
return !closure().is_null();
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index b72526ab61a..20386cbbee0 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -142,8 +142,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
void SetCode(Handle<Code> code);
+#if V8_ENABLE_WEBASSEMBLY
void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
std::unique_ptr<wasm::WasmCompilationResult> ReleaseWasmCompilationResult();
+#endif // V8_ENABLE_WEBASSEMBLY
bool has_context() const;
Context context() const;
@@ -162,7 +164,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
+#if V8_ENABLE_WEBASSEMBLY
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
+#endif // V8_ENABLE_WEBASSEMBLY
void SetOptimizingForOsr(BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
@@ -283,8 +287,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Basic block profiling support.
BasicBlockProfilerData* profiler_data_ = nullptr;
+#if V8_ENABLE_WEBASSEMBLY
// The WebAssembly compilation result, not published in the NativeModule yet.
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
+#endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
BytecodeOffset osr_offset_ = BytecodeOffset::None();
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index a638ba97593..d8cd524451b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -48,8 +48,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return false; }
-
void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 7ea115ee401..7da9484cce1 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -54,6 +54,10 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
+bool CpuFeatures::SupportsWasmSimd128() {
+ return CpuFeatures::IsSupported(SIMD);
+}
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
icache_line_size_ = 128;
@@ -67,37 +71,42 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef USE_SIMULATOR
// Probe for additional features at runtime.
base::CPU cpu;
- if (cpu.part() == base::CPU::PPC_POWER9 ||
- cpu.part() == base::CPU::PPC_POWER10) {
+ if (cpu.part() == base::CPU::kPPCPower9 ||
+ cpu.part() == base::CPU::kPPCPower10) {
supported_ |= (1u << MODULO);
}
#if V8_TARGET_ARCH_PPC64
- if (cpu.part() == base::CPU::PPC_POWER8 ||
- cpu.part() == base::CPU::PPC_POWER9 ||
- cpu.part() == base::CPU::PPC_POWER10) {
+ if (cpu.part() == base::CPU::kPPCPower8 ||
+ cpu.part() == base::CPU::kPPCPower9 ||
+ cpu.part() == base::CPU::kPPCPower10) {
supported_ |= (1u << FPR_GPR_MOV);
}
+ // V8 PPC Simd implementations need P9 at a minimum.
+ if (cpu.part() == base::CPU::kPPCPower9 ||
+ cpu.part() == base::CPU::kPPCPower10) {
+ supported_ |= (1u << SIMD);
+ }
#endif
- if (cpu.part() == base::CPU::PPC_POWER6 ||
- cpu.part() == base::CPU::PPC_POWER7 ||
- cpu.part() == base::CPU::PPC_POWER8 ||
- cpu.part() == base::CPU::PPC_POWER9 ||
- cpu.part() == base::CPU::PPC_POWER10) {
+ if (cpu.part() == base::CPU::kPPCPower6 ||
+ cpu.part() == base::CPU::kPPCPower7 ||
+ cpu.part() == base::CPU::kPPCPower8 ||
+ cpu.part() == base::CPU::kPPCPower9 ||
+ cpu.part() == base::CPU::kPPCPower10) {
supported_ |= (1u << LWSYNC);
}
- if (cpu.part() == base::CPU::PPC_POWER7 ||
- cpu.part() == base::CPU::PPC_POWER8 ||
- cpu.part() == base::CPU::PPC_POWER9 ||
- cpu.part() == base::CPU::PPC_POWER10) {
+ if (cpu.part() == base::CPU::kPPCPower7 ||
+ cpu.part() == base::CPU::kPPCPower8 ||
+ cpu.part() == base::CPU::kPPCPower9 ||
+ cpu.part() == base::CPU::kPPCPower10) {
supported_ |= (1u << ISELECT);
supported_ |= (1u << VSX);
}
#if V8_OS_LINUX
- if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
+ if (!(cpu.part() == base::CPU::kPPCG5 || cpu.part() == base::CPU::kPPCG4)) {
// Assume support
supported_ |= (1u << FPU);
}
- if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
+ if (cpu.icache_line_size() != base::CPU::kUnknownCacheLineSize) {
icache_line_size_ = cpu.icache_line_size();
}
#elif V8_OS_AIX
@@ -110,6 +119,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= (1u << ISELECT);
supported_ |= (1u << VSX);
supported_ |= (1u << MODULO);
+ supported_ |= (1u << SIMD);
#if V8_TARGET_ARCH_PPC64
supported_ |= (1u << FPR_GPR_MOV);
#endif
@@ -1479,7 +1489,7 @@ void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
-void Assembler::mtcrf(unsigned char FXM, Register src) {
+void Assembler::mtcrf(Register src, uint8_t FXM) {
emit(MTCRF | src.code() * B21 | FXM * B12);
}
#if V8_TARGET_ARCH_PPC64
@@ -1874,6 +1884,12 @@ void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
}
+void Assembler::xxbrq(const Simd128Register rt, const Simd128Register rb) {
+ int BX = 1;
+ int TX = 1;
+ emit(XXBRQ | rt.code() * B21 | 31 * B16 | rb.code() * B11 | BX * B1 | TX);
+}
+
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index a7c729c43b1..d5b37fe59fe 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -195,6 +195,12 @@ class Assembler : public AssemblerBase {
void MaybeEmitOutOfLineConstantPool() { EmitConstantPool(); }
+ inline void CheckTrampolinePoolQuick(int extra_space = 0) {
+ if (pc_offset() >= next_trampoline_check_ - extra_space) {
+ CheckTrampolinePool();
+ }
+ }
+
// Label operations & relative jumps (PPUM Appendix D)
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -922,7 +928,7 @@ class Assembler : public AssemblerBase {
void mtxer(Register src);
void mcrfs(CRegister cr, FPSCRBit bit);
void mfcr(Register dst);
- void mtcrf(unsigned char FXM, Register src);
+ void mtcrf(Register src, uint8_t FXM);
#if V8_TARGET_ARCH_PPC64
void mffprd(Register dst, DoubleRegister src);
void mffprwz(Register dst, DoubleRegister src);
@@ -1032,6 +1038,7 @@ class Assembler : public AssemblerBase {
void stxsiwx(const Simd128Register rs, const MemOperand& src);
void stxvd(const Simd128Register rt, const MemOperand& src);
void xxspltib(const Simd128Register rt, const Operand& imm);
+ void xxbrq(const Simd128Register rt, const Simd128Register rb);
// Pseudo instructions
@@ -1331,12 +1338,6 @@ class Assembler : public AssemblerBase {
}
inline void UntrackBranch();
- void CheckTrampolinePoolQuick() {
- if (pc_offset() >= next_trampoline_check_) {
- CheckTrampolinePool();
- }
- }
-
// Instruction generation
void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
DoubleRegister frb, RCBit r);
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 31bbb480442..5b37a2ee110 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -359,49 +359,64 @@ using Instr = uint32_t;
/* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
- /* VSX Vector Absolute Value Double-Precision */ \
- V(xvabsdp, XVABSDP, 0xF0000764) \
- /* VSX Vector Negate Double-Precision */ \
- V(xvnegdp, XVNEGDP, 0xF00007E4) \
- /* VSX Vector Square Root Double-Precision */ \
- V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
- /* VSX Vector Absolute Value Single-Precision */ \
- V(xvabssp, XVABSSP, 0xF0000664) \
- /* VSX Vector Negate Single-Precision */ \
- V(xvnegsp, XVNEGSP, 0xF00006E4) \
- /* VSX Vector Reciprocal Estimate Single-Precision */ \
- V(xvresp, XVRESP, 0xF0000268) \
- /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
- V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
- /* VSX Vector Square Root Single-Precision */ \
- V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
- /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
- /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
- /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
- V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
- /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
- V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
- /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
- V(xvrdpip, XVRDPIP, 0xF00003A4) \
- /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
- V(xvrdpim, XVRDPIM, 0xF00003E4) \
- /* VSX Vector Round to Double-Precision Integer toward Zero */ \
- V(xvrdpiz, XVRDPIZ, 0xF0000364) \
- /* VSX Vector Round to Double-Precision Integer */ \
- V(xvrdpi, XVRDPI, 0xF0000324) \
- /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
- V(xvrspip, XVRSPIP, 0xF00002A4) \
- /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
- V(xvrspim, XVRSPIM, 0xF00002E4) \
- /* VSX Vector Round to Single-Precision Integer toward Zero */ \
- V(xvrspiz, XVRSPIZ, 0xF0000264) \
- /* VSX Vector Round to Single-Precision Integer */ \
- V(xvrspi, XVRSPI, 0xF0000224)
+#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
+ /* VSX Vector Absolute Value Double-Precision */ \
+ V(xvabsdp, XVABSDP, 0xF0000764) \
+ /* VSX Vector Negate Double-Precision */ \
+ V(xvnegdp, XVNEGDP, 0xF00007E4) \
+ /* VSX Vector Square Root Double-Precision */ \
+ V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
+ /* VSX Vector Absolute Value Single-Precision */ \
+ V(xvabssp, XVABSSP, 0xF0000664) \
+ /* VSX Vector Negate Single-Precision */ \
+ V(xvnegsp, XVNEGSP, 0xF00006E4) \
+ /* VSX Vector Reciprocal Estimate Single-Precision */ \
+ V(xvresp, XVRESP, 0xF0000268) \
+ /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
+ /* VSX Vector Square Root Single-Precision */ \
+ V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
+ V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
+ V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
+ /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
+ V(xvrdpip, XVRDPIP, 0xF00003A4) \
+ /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
+ V(xvrdpim, XVRDPIM, 0xF00003E4) \
+ /* VSX Vector Round to Double-Precision Integer toward Zero */ \
+ V(xvrdpiz, XVRDPIZ, 0xF0000364) \
+ /* VSX Vector Round to Double-Precision Integer */ \
+ V(xvrdpi, XVRDPI, 0xF0000324) \
+ /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
+ V(xvrspip, XVRSPIP, 0xF00002A4) \
+ /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
+ V(xvrspim, XVRSPIM, 0xF00002E4) \
+ /* VSX Vector Round to Single-Precision Integer toward Zero */ \
+ V(xvrspiz, XVRSPIZ, 0xF0000264) \
+ /* VSX Vector Round to Single-Precision Integer */ \
+ V(xvrspi, XVRSPI, 0xF0000224) \
+ /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
+ V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
+ /* Precision */ \
+ V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
+ /* VSX Vector Convert Single-Precision to Double-Precision */ \
+ V(xvcvspdp, XVCVSPDP, 0xF0000724) \
+ /* VSX Vector Convert Double-Precision to Single-Precision */ \
+ V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
+ /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
+ /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
/* VSX Scalar Square Root Double-Precision */ \
@@ -412,14 +427,6 @@ using Instr = uint32_t;
V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
/* VSX Scalar Square Root Single-Precision */ \
V(xssqrtsp, XSSQRTSP, 0xF000002C) \
- /* Move To VSR Doubleword */ \
- V(mtvsrd, MTVSRD, 0x7C000166) \
- /* Move To VSR Double Doubleword */ \
- V(mtvsrdd, MTVSRDD, 0x7C000366) \
- /* Move To VSR Word Algebraic */ \
- V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
- /* Move To VSR Word and Zero */ \
- V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
/* VSX Scalar Absolute Value Double-Precision */ \
V(xsabsdp, XSABSDP, 0xF0000564) \
/* VSX Scalar Convert Double-Precision to Single-Precision */ \
@@ -475,37 +482,22 @@ using Instr = uint32_t;
V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
/* VSX Scalar Test for software Square Root Double-Precision */ \
V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
- /* VSX Vector Convert Double-Precision to Single-Precision */ \
- V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
/* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
- /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
/* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
/* Doubleword Saturate */ \
V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
- /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \
- /* VSX Vector Convert Single-Precision to Double-Precision */ \
- V(xvcvspdp, XVCVSPDP, 0xF0000724) \
/* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
/* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
/* Doubleword Saturate */ \
V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
- /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
- V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
/* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
/* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
- /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
- /* Precision */ \
- V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
/* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
/* Precision */ \
V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
@@ -528,7 +520,9 @@ using Instr = uint32_t;
/* VSX Vector Test for software Square Root Single-Precision */ \
V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) \
/* Vector Splat Immediate Byte */ \
- V(xxspltib, XXSPLTIB, 0xF00002D0)
+ V(xxspltib, XXSPLTIB, 0xF00002D0) \
+ /* Vector Byte-Reverse Quadword */ \
+ V(xxbrq, XXBRQ, 0xF000076C)
#define PPC_XX2_OPCODE_LIST(V) \
PPC_XX2_OPCODE_A_FORM_LIST(V) \
@@ -1993,6 +1987,14 @@ using Instr = uint32_t;
V(lxvdsx, LXVDSX, 0x7C000298) \
/* Load VSR Vector Word*4 Indexed */ \
V(lxvw, LXVW, 0x7C000618) \
+ /* Move To VSR Doubleword */ \
+ V(mtvsrd, MTVSRD, 0x7C000166) \
+ /* Move To VSR Double Doubleword */ \
+ V(mtvsrdd, MTVSRDD, 0x7C000366) \
+ /* Move To VSR Word Algebraic */ \
+ V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
+ /* Move To VSR Word and Zero */ \
+ V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
/* Move From VSR Doubleword */ \
V(mfvsrd, MFVSRD, 0x7C000066) \
/* Move From VSR Word and Zero */ \
@@ -2289,6 +2291,14 @@ using Instr = uint32_t;
V(vmulouh, VMULOUH, 0x10000048) \
/* Vector Multiply Odd Signed Halfword */ \
V(vmulosh, VMULOSH, 0x10000148) \
+ /* Vector Multiply Even Signed Word */ \
+ V(vmulesw, VMULESW, 0x10000388) \
+ /* Vector Multiply Even Unsigned Word */ \
+ V(vmuleuw, VMULEUW, 0x10000288) \
+ /* Vector Multiply Odd Signed Word */ \
+ V(vmulosw, VMULOSW, 0x10000188) \
+ /* Vector Multiply Odd Unsigned Word */ \
+ V(vmulouw, VMULOUW, 0x10000088) \
/* Vector Sum across Quarter Signed Halfword Saturate */ \
V(vsum4shs, VSUM4SHS, 0x10000648) \
/* Vector Pack Unsigned Word Unsigned Saturate */ \
@@ -2390,7 +2400,19 @@ using Instr = uint32_t;
/* Vector Maximum Single-Precision */ \
V(vmaxfp, VMAXFP, 0x1000040A) \
/* Vector Bit Permute Quadword */ \
- V(vbpermq, VBPERMQ, 0x1000054C)
+ V(vbpermq, VBPERMQ, 0x1000054C) \
+ /* Vector Merge High Byte */ \
+ V(vmrghb, VMRGHB, 0x1000000C) \
+ /* Vector Merge High Halfword */ \
+ V(vmrghh, VMRGHH, 0x1000004C) \
+ /* Vector Merge High Word */ \
+ V(vmrghw, VMRGHW, 0x1000008C) \
+ /* Vector Merge Low Byte */ \
+ V(vmrglb, VMRGLB, 0x1000010C) \
+ /* Vector Merge Low Halfword */ \
+ V(vmrglh, VMRGLH, 0x1000014C) \
+ /* Vector Merge Low Word */ \
+ V(vmrglw, VMRGLW, 0x1000018C)
#define PPC_VX_OPCODE_C_FORM_LIST(V) \
/* Vector Unpack Low Signed Word */ \
@@ -2404,7 +2426,9 @@ using Instr = uint32_t;
/* Vector Unpack Low Signed Byte */ \
V(vupklsb, VUPKLSB, 0x1000028E) \
/* Vector Unpack High Signed Byte */ \
- V(vupkhsb, VUPKHSB, 0x1000020E)
+ V(vupkhsb, VUPKHSB, 0x1000020E) \
+ /* Vector Population Count Byte */ \
+ V(vpopcntb, VPOPCNTB, 0x10000703)
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
@@ -2459,26 +2483,6 @@ using Instr = uint32_t;
V(vgbbd, VGBBD, 0x1000050C) \
/* Vector Log Base 2 Estimate Single-Precision */ \
V(vlogefp, VLOGEFP, 0x100001CA) \
- /* Vector Merge High Byte */ \
- V(vmrghb, VMRGHB, 0x1000000C) \
- /* Vector Merge High Halfword */ \
- V(vmrghh, VMRGHH, 0x1000004C) \
- /* Vector Merge High Word */ \
- V(vmrghw, VMRGHW, 0x1000008C) \
- /* Vector Merge Low Byte */ \
- V(vmrglb, VMRGLB, 0x1000010C) \
- /* Vector Merge Low Halfword */ \
- V(vmrglh, VMRGLH, 0x1000014C) \
- /* Vector Merge Low Word */ \
- V(vmrglw, VMRGLW, 0x1000018C) \
- /* Vector Multiply Even Signed Word */ \
- V(vmulesw, VMULESW, 0x10000388) \
- /* Vector Multiply Even Unsigned Word */ \
- V(vmuleuw, VMULEUW, 0x10000288) \
- /* Vector Multiply Odd Signed Word */ \
- V(vmulosw, VMULOSW, 0x10000188) \
- /* Vector Multiply Odd Unsigned Word */ \
- V(vmulouw, VMULOUW, 0x10000088) \
/* Vector NAND */ \
V(vnand, VNAND, 0x10000584) \
/* Vector OR with Complement */ \
@@ -2503,8 +2507,6 @@ using Instr = uint32_t;
V(vpmsumh, VPMSUMH, 0x10000448) \
/* Vector Polynomial Multiply-Sum Word */ \
V(vpmsumw, VPMSUMW, 0x10000488) \
- /* Vector Population Count Byte */ \
- V(vpopcntb, VPOPCNTB, 0x10000703) \
/* Vector Population Count Doubleword */ \
V(vpopcntd, VPOPCNTD, 0x100007C3) \
/* Vector Population Count Halfword */ \
@@ -2912,7 +2914,11 @@ class Instruction {
PPC_M_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
-
+ opcode = extcode | BitField(5, 0);
+ switch (opcode) {
+ PPC_VA_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 0);
switch (opcode) {
PPC_VX_OPCODE_LIST(OPCODE_CASES)
@@ -2929,13 +2935,17 @@ class Instruction {
PPC_XFX_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
+ opcode = extcode | BitField(10, 2);
+ switch (opcode) {
+ PPC_XX2_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 1);
switch (opcode) {
PPC_X_OPCODE_LIST(OPCODE_CASES)
PPC_XL_OPCODE_LIST(OPCODE_CASES)
PPC_XFL_OPCODE_LIST(OPCODE_CASES)
PPC_XX1_OPCODE_LIST(OPCODE_CASES)
- PPC_XX2_OPCODE_LIST(OPCODE_CASES)
PPC_EVX_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
@@ -2961,11 +2971,6 @@ class Instruction {
PPC_Z23_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
- opcode = extcode | BitField(5, 0);
- switch (opcode) {
- PPC_VA_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
- }
opcode = extcode | BitField(5, 1);
switch (opcode) {
PPC_A_OPCODE_LIST(OPCODE_CASES)
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index e78130ee426..658a41f381f 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -22,7 +22,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -1792,8 +1795,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// Put input on stack.
stfdu(double_input, MemOperand(sp, -kDoubleSize));
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -1967,7 +1975,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
FieldMemOperand(object, HeapObject::kMapOffset));
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
LoadTaggedPointerField(
dst, FieldMemOperand(
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 5da219ba840..1d8f3a388d4 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -119,6 +119,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Add(sp, sp, -bytes, r0);
+ }
+
// Push a fixed frame, consisting of lr, fp, constant pool.
void PushCommonFrame(Register marker_reg = no_reg);
@@ -781,10 +787,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// ----------------------------------------------------------------
// new PPC macro-assembler interfaces that are slightly higher level
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index bbbee603de0..ffeb327055f 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -213,7 +213,12 @@ constexpr Register kConstantPoolRegister = r28; // Constant pool.
constexpr Register kRootRegister = r29; // Roots array pointer.
constexpr Register cp = r30; // JavaScript context pointer.
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -249,6 +254,29 @@ static_assert(sizeof(DoubleRegister) == sizeof(int),
using FloatRegister = DoubleRegister;
+// | | 0
+// | | 1
+// | | 2
+// | | ...
+// | | 31
+// VSX |
+// | | 32
+// | | 33
+// | VMX | 34
+// | | ...
+// | | 63
+//
+// VSX registers (0 to 63) can be used by VSX vector instructions, which are
+// mainly focused on Floating Point arithmetic. They do have few Integer
+// Instructions such as logical operations, merge and select. The main Simd
+// integer instructions such as add/sub/mul/ extract_lane/replace_lane,
+// comparisons etc. are only available with VMX instructions and can only access
+// the VMX set of vector registers (which is a subset of VSX registers). So to
+// assure access to all Simd instructions in V8 and avoid moving data between
+// registers, we are only using the upper 32 registers (VMX set) for Simd
+// operations and only use the lower set for scalar (non simd) floating point
+// operations which makes our Simd register set separate from Floating Point
+// ones.
enum Simd128RegisterCode {
#define REGISTER_CODE(R) kSimd128Code_##R,
SIMD128_REGISTERS(REGISTER_CODE)
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index 3a72daae27f..3936ee80cc2 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -30,4 +30,18 @@
#error Unknown architecture.
#endif
+namespace v8 {
+namespace internal {
+
+constexpr int AddArgumentPaddingSlots(int argument_count) {
+ return argument_count + ArgumentPaddingSlots(argument_count);
+}
+
+constexpr bool ShouldPadArguments(int argument_count) {
+ return ArgumentPaddingSlots(argument_count) != 0;
+}
+
+} // namespace internal
+} // namespace v8
+
#endif // V8_CODEGEN_REGISTER_ARCH_H_
diff --git a/deps/v8/src/codegen/register.cc b/deps/v8/src/codegen/register.cc
deleted file mode 100644
index 4ad76c6caaf..00000000000
--- a/deps/v8/src/codegen/register.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/register.h"
-#include "src/codegen/register-arch.h"
-
-namespace v8 {
-namespace internal {
-
-bool ShouldPadArguments(int argument_count) {
- return kPadArguments && (argument_count % 2 != 0);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 2dcf0fbe8fd..57f3a1c62ab 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -70,9 +70,6 @@ class RegisterBase {
int reg_code_;
};
-// Whether padding is needed for the given stack argument count.
-bool ShouldPadArguments(int argument_count);
-
template <typename RegType,
typename = decltype(RegisterName(std::declval<RegType>()))>
inline std::ostream& operator<<(std::ostream& os, RegType reg) {
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 25b3daef8e5..753b34bdbf5 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -519,14 +519,15 @@ void RelocInfo::Verify(Isolate* isolate) {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
Code code = Code::cast(isolate->FindCodeObject(pc));
- CHECK(target >= code.InstructionStart());
- CHECK(target <= code.InstructionEnd());
+ CHECK(target >= code.InstructionStart(isolate, pc));
+ CHECK(target <= code.InstructionEnd(isolate, pc));
break;
}
case OFF_HEAP_TARGET: {
Address addr = target_off_heap_target();
CHECK_NE(addr, kNullAddress);
- CHECK(!InstructionStream::TryLookupCode(isolate, addr).is_null());
+ CHECK(Builtins::IsBuiltinId(
+ InstructionStream::TryLookupCode(isolate, addr)));
break;
}
case RUNTIME_ENTRY:
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index f478de86a1d..bef433e10b1 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -65,6 +65,9 @@ class RelocInfo {
WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
WASM_STUB_CALL,
+ // TODO(ishell): rename to UNEMBEDDED_BUILTIN_ENTRY.
+ // An un-embedded off-heap instruction stream target.
+ // See http://crbug.com/v8/11527 for details.
RUNTIME_ENTRY,
EXTERNAL_REFERENCE, // The address of an external C++ function.
@@ -148,6 +151,7 @@ class RelocInfo {
return base::IsInRange(mode, FIRST_EMBEDDED_OBJECT_RELOC_MODE,
LAST_EMBEDDED_OBJECT_RELOC_MODE);
}
+ // TODO(ishell): rename to IsUnembeddedBuiltinEntry().
static constexpr bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index b99262cb367..40bd56d15b5 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -45,8 +45,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
-bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
-
// -----------------------------------------------------------------------------
// Operand and MemOperand.
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index e070e72f45e..914ea26f9fe 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -60,6 +60,8 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
+bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
index 26730aceca7..23953097cd1 100644
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
@@ -281,6 +281,18 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index a4796661203..ff798da0e9f 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -3862,7 +3862,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
Ld(dst,
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
@@ -4564,7 +4564,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 75c03cc27bb..b260f1c2009 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -150,6 +150,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef COND_TYPED_ARGS
#undef COND_ARGS
+ void AllocateStackSpace(Register bytes) { Sub64(sp, sp, bytes); }
+
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Sub64(sp, sp, Operand(bytes));
+ }
+
inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
// Compare float, if any operand is NaN, result is false except for NE
@@ -219,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
@@ -986,10 +995,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 2626c4eae70..b97594becda 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -41,6 +41,12 @@ namespace internal {
V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
V(fa6) V(fa7)
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
// clang-format on
// Note that the bit values must match those used in actual instruction
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index f58e891a900..dc04acec613 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -48,10 +48,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() {
- return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
-}
-
void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 76b3d9953e4..dd5f59bc0bc 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -159,6 +159,10 @@ static bool supportsSTFLE() {
#endif
}
+bool CpuFeatures::SupportsWasmSimd128() {
+ return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
+}
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
icache_line_size_ = 256;
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 511649af80f..be5798d8d4f 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -23,7 +23,10 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
@@ -324,8 +327,6 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
-
mov(ip, Operand(target, rmode));
b(ip);
@@ -1838,8 +1839,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
lay(sp, MemOperand(sp, -kDoubleSize));
StoreF64(double_input, MemOperand(sp));
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -2002,7 +2008,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
FieldMemOperand(object, HeapObject::kMapOffset));
}
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
LoadTaggedPointerField(
dst, FieldMemOperand(
@@ -4755,6 +4761,218 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
bind(&done);
}
+void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
+ Register old_value,
+ Register new_value, int start,
+ int end, int shift_amount,
+ int offset, Register temp0,
+ Register temp1) {
+ LoadU32(temp0, MemOperand(addr, offset));
+ llgfr(temp1, temp0);
+ RotateInsertSelectBits(temp0, old_value, Operand(start), Operand(end),
+ Operand(shift_amount), false);
+ RotateInsertSelectBits(temp1, new_value, Operand(start), Operand(end),
+ Operand(shift_amount), false);
+ CmpAndSwap(temp0, temp1, MemOperand(addr, offset));
+ RotateInsertSelectBits(output, temp0, Operand(start + shift_amount),
+ Operand(end + shift_amount),
+ Operand(64 - shift_amount), true);
+}
+
+void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output,
+ Register old_value, Register new_value,
+ Register temp0, Register temp1) {
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (3 - idx) * 8; \
+ AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
+ shift_amount, -idx, temp0, temp1); \
+ }
+#else
+#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
+ shift_amount, -idx, temp0, temp1); \
+ }
+#endif
+
+ Label one, two, three, done;
+ tmll(addr, Operand(3));
+ b(Condition(1), &three);
+ b(Condition(2), &two);
+ b(Condition(4), &one);
+ /* ending with 0b00 */
+ ATOMIC_COMP_EXCHANGE_BYTE(0);
+ b(&done);
+ /* ending with 0b01 */
+ bind(&one);
+ ATOMIC_COMP_EXCHANGE_BYTE(1);
+ b(&done);
+ /* ending with 0b10 */
+ bind(&two);
+ ATOMIC_COMP_EXCHANGE_BYTE(2);
+ b(&done);
+ /* ending with 0b11 */
+ bind(&three);
+ ATOMIC_COMP_EXCHANGE_BYTE(3);
+ bind(&done);
+}
+
+void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output,
+ Register old_value,
+ Register new_value, Register temp0,
+ Register temp1) {
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (1 - idx) * 16; \
+ AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
+ shift_amount, -idx * 2, temp0, temp1); \
+ }
+#else
+#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
+ shift_amount, -idx * 2, temp0, temp1); \
+ }
+#endif
+
+ Label two, done;
+ tmll(addr, Operand(3));
+ b(Condition(2), &two);
+ ATOMIC_COMP_EXCHANGE_HALFWORD(0);
+ b(&done);
+ bind(&two);
+ ATOMIC_COMP_EXCHANGE_HALFWORD(1);
+ bind(&done);
+}
+
+void TurboAssembler::AtomicExchangeHelper(Register addr, Register value,
+ Register output, int start, int end,
+ int shift_amount, int offset,
+ Register scratch) {
+ Label do_cs;
+ LoadU32(output, MemOperand(addr, offset));
+ bind(&do_cs);
+ llgfr(scratch, output);
+ RotateInsertSelectBits(scratch, value, Operand(start), Operand(end),
+ Operand(shift_amount), false);
+ csy(output, scratch, MemOperand(addr, offset));
+ bne(&do_cs, Label::kNear);
+ srl(output, Operand(shift_amount));
+}
+
+void TurboAssembler::AtomicExchangeU8(Register addr, Register value,
+ Register output, Register scratch) {
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (3 - idx) * 8; \
+ AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
+ scratch); \
+ }
+#else
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
+ scratch); \
+ }
+#endif
+ Label three, two, one, done;
+ tmll(addr, Operand(3));
+ b(Condition(1), &three);
+ b(Condition(2), &two);
+ b(Condition(4), &one);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_BYTE(0);
+ b(&done);
+
+ // ending with 0b01
+ bind(&one);
+ ATOMIC_EXCHANGE_BYTE(1);
+ b(&done);
+
+ // ending with 0b10
+ bind(&two);
+ ATOMIC_EXCHANGE_BYTE(2);
+ b(&done);
+
+ // ending with 0b11
+ bind(&three);
+ ATOMIC_EXCHANGE_BYTE(3);
+
+ bind(&done);
+}
+
+void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
+ Register output, Register scratch) {
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (1 - idx) * 16; \
+ AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
+ -idx * 2, scratch); \
+ }
+#else
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
+ -idx * 2, scratch); \
+ }
+#endif
+ Label two, done;
+ tmll(addr, Operand(3));
+ b(Condition(2), &two);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_HALFWORD(0);
+ b(&done);
+
+ // ending with 0b10
+ bind(&two);
+ ATOMIC_EXCHANGE_HALFWORD(1);
+
+ bind(&done);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f4c3d038b33..f2719c3086c 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -46,6 +46,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void AtomicCmpExchangeHelper(Register addr, Register output,
+ Register old_value, Register new_value,
+ int start, int end, int shift_amount, int offset,
+ Register temp0, Register temp1);
+ void AtomicCmpExchangeU8(Register addr, Register output, Register old_value,
+ Register new_value, Register temp0, Register temp1);
+ void AtomicCmpExchangeU16(Register addr, Register output, Register old_value,
+ Register new_value, Register temp0, Register temp1);
+ void AtomicExchangeHelper(Register addr, Register value, Register output,
+ int start, int end, int shift_amount, int offset,
+ Register scratch);
+ void AtomicExchangeU8(Register addr, Register value, Register output,
+ Register scratch);
+ void AtomicExchangeU16(Register addr, Register value, Register output,
+ Register scratch);
+
void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg,
DoubleRegister right_reg);
void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg,
@@ -977,6 +993,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ lay(sp, MemOperand(sp, -bytes));
+ }
+
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
@@ -1239,10 +1261,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// ---------------------------------------------------------------------------
// Smi utilities
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 0c6da03901d..48accf08c5d 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -167,7 +167,12 @@ constexpr Register no_reg = Register::no_reg();
constexpr Register kRootRegister = r10; // Roots array pointer.
constexpr Register cp = r13; // JavaScript context pointer.
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index dd379e0535e..58fb6ed9e19 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -10,19 +10,24 @@
#include "src/diagnostics/disasm.h"
#include "src/execution/frames-inl.h"
#include "src/utils/ostreams.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
-SafepointTable::SafepointTable(Code code)
- : SafepointTable(code.InstructionStart(), code.SafepointTableAddress(),
- code.stack_slots(), true) {}
+SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
+ : SafepointTable(code.InstructionStart(isolate, pc),
+ code.SafepointTableAddress(), code.stack_slots(), true) {}
+#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
: SafepointTable(code->instruction_start(),
code->instruction_start() + code->safepoint_table_offset(),
code->stack_slots(), false) {}
+#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(Address instruction_start,
Address safepoint_table_address,
@@ -92,7 +97,7 @@ Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler) {
deoptimization_info_.push_back(
DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
DeoptimizationInfo& new_info = deoptimization_info_.back();
- return Safepoint(new_info.indexes);
+ return Safepoint(new_info.stack_indexes, &new_info.register_indexes);
}
unsigned SafepointTableBuilder::GetCodeOffset() const {
@@ -143,14 +148,22 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
STATIC_ASSERT(SafepointTable::kFixedEntrySize == 3 * kIntSize);
for (const DeoptimizationInfo& info : deoptimization_info_) {
assembler->dd(info.pc);
- assembler->dd(info.deopt_index);
+ if (info.register_indexes) {
+ // We emit the register indexes in the same bits as the deopt_index.
+ // Register indexes and deopt_index should not exist at the same time.
+ DCHECK_EQ(info.deopt_index,
+ static_cast<uint32_t>(Safepoint::kNoDeoptimizationIndex));
+ assembler->dd(info.register_indexes);
+ } else {
+ assembler->dd(info.deopt_index);
+ }
assembler->dd(info.trampoline);
}
// Emit table of bitmaps.
ZoneVector<uint8_t> bits(bytes_per_entry, 0, zone_);
for (const DeoptimizationInfo& info : deoptimization_info_) {
- ZoneChunkList<int>* indexes = info.indexes;
+ ZoneChunkList<int>* indexes = info.stack_indexes;
std::fill(bits.begin(), bits.end(), 0);
// Run through the indexes and build a bitmap.
@@ -194,13 +207,15 @@ bool SafepointTableBuilder::IsIdenticalExceptForPc(
const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
if (info1.deopt_index != info2.deopt_index) return false;
- ZoneChunkList<int>* indexes1 = info1.indexes;
- ZoneChunkList<int>* indexes2 = info2.indexes;
+ ZoneChunkList<int>* indexes1 = info1.stack_indexes;
+ ZoneChunkList<int>* indexes2 = info2.stack_indexes;
if (indexes1->size() != indexes2->size()) return false;
if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
return false;
}
+ if (info1.register_indexes != info2.register_indexes) return false;
+
return true;
}
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 9efdbfa7843..623b5246980 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -50,6 +50,18 @@ class SafepointEntry {
return deopt_index_;
}
+ uint32_t register_bits() const {
+ // The register bits use the same field as the deopt_index_.
+ DCHECK(is_valid());
+ return deopt_index_;
+ }
+
+ bool has_register_bits() const {
+ // The register bits use the same field as the deopt_index_.
+ DCHECK(is_valid());
+ return deopt_index_ != kNoDeoptIndex;
+ }
+
bool has_deoptimization_index() const {
DCHECK(is_valid());
return deopt_index_ != kNoDeoptIndex;
@@ -61,7 +73,7 @@ class SafepointEntry {
}
private:
- unsigned deopt_index_;
+ uint32_t deopt_index_;
uint8_t* bits_;
// It needs to be an integer as it is -1 for eager deoptimizations.
int trampoline_pc_;
@@ -69,8 +81,12 @@ class SafepointEntry {
class SafepointTable {
public:
- explicit SafepointTable(Code code);
+ // The isolate and pc arguments are used for figuring out whether pc
+ // belongs to the embedded or un-embedded code blob.
+ explicit SafepointTable(Isolate* isolate, Address pc, Code code);
+#if V8_ENABLE_WEBASSEMBLY
explicit SafepointTable(const wasm::WasmCode* code);
+#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable(const SafepointTable&) = delete;
SafepointTable& operator=(const SafepointTable&) = delete;
@@ -170,11 +186,20 @@ class Safepoint {
public:
static const int kNoDeoptimizationIndex = SafepointEntry::kNoDeoptIndex;
- void DefinePointerSlot(int index) { indexes_->push_back(index); }
+ void DefinePointerSlot(int index) { stack_indexes_->push_back(index); }
+
+ void DefineRegister(int reg_code) {
+ // Make sure the recorded index is always less than 31, so that we don't
+ // generate {kNoDeoptimizationIndex} by accident.
+ DCHECK_LT(reg_code, 31);
+ *register_indexes_ |= 1u << reg_code;
+ }
private:
- explicit Safepoint(ZoneChunkList<int>* indexes) : indexes_(indexes) {}
- ZoneChunkList<int>* const indexes_;
+ Safepoint(ZoneChunkList<int>* stack_indexes, uint32_t* register_indexes)
+ : stack_indexes_(stack_indexes), register_indexes_(register_indexes) {}
+ ZoneChunkList<int>* const stack_indexes_;
+ uint32_t* register_indexes_;
friend class SafepointTableBuilder;
};
@@ -211,13 +236,15 @@ class SafepointTableBuilder {
unsigned pc;
unsigned deopt_index;
int trampoline;
- ZoneChunkList<int>* indexes;
+ ZoneChunkList<int>* stack_indexes;
+ uint32_t register_indexes;
DeoptimizationInfo(Zone* zone, unsigned pc)
: pc(pc),
deopt_index(Safepoint::kNoDeoptimizationIndex),
trampoline(-1),
- indexes(zone->New<ZoneChunkList<int>>(
- zone, ZoneChunkList<int>::StartMode::kSmall)) {}
+ stack_indexes(zone->New<ZoneChunkList<int>>(
+ zone, ZoneChunkList<int>::StartMode::kSmall)),
+ register_indexes(0) {}
};
// Compares all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
new file mode 100644
index 00000000000..366d1afac9e
--- /dev/null
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -0,0 +1,403 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/cpu-features.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/register-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/register-x64.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovapd(dst, src);
+ } else {
+ // On SSE, movaps is 1 byte shorter than movapd, and has the same
+ // behavior.
+ movaps(dst, src);
+ }
+}
+
+void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movss(dst, src);
+ } else {
+ DCHECK_GE(3, laneidx);
+ Extractps(dst, src, laneidx);
+ }
+}
+
+void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister scratch,
+ bool is_signed) {
+ is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
+ is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
+ Pmullw(dst, scratch);
+}
+
+void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpunpckhbw(scratch, src1, src1);
+ vpsraw(scratch, scratch, 8);
+ vpunpckhbw(dst, src2, src2);
+ vpsraw(dst, dst, 8);
+ vpmullw(dst, dst, scratch);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ movaps(scratch, src2);
+ punpckhbw(dst, dst);
+ psraw(dst, 8);
+ punpckhbw(scratch, scratch);
+ psraw(scratch, 8);
+ pmullw(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
+ // The logic here is slightly complicated to handle all the cases of register
+ // aliasing. This allows flexibility for callers in TurboFan and Liftoff.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ if (src1 == src2) {
+ vpxor(scratch, scratch, scratch);
+ vpunpckhbw(dst, src1, scratch);
+ vpmullw(dst, dst, dst);
+ } else {
+ if (dst == src2) {
+ // We overwrite dst, then use src2, so swap src1 and src2.
+ std::swap(src1, src2);
+ }
+ vpxor(scratch, scratch, scratch);
+ vpunpckhbw(dst, src1, scratch);
+ vpunpckhbw(scratch, src2, scratch);
+ vpmullw(dst, dst, scratch);
+ }
+ } else {
+ if (src1 == src2) {
+ xorps(scratch, scratch);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ punpckhbw(dst, scratch);
+ pmullw(dst, scratch);
+ } else {
+ // When dst == src1, nothing special needs to be done.
+ // When dst == src2, swap src1 and src2, since we overwrite dst.
+ // When dst is unique, copy src1 to dst first.
+ if (dst == src2) {
+ std::swap(src1, src2);
+ // Now, dst == src1.
+ } else if (dst != src1) {
+ // dst != src1 && dst != src2.
+ movaps(dst, src1);
+ }
+ xorps(scratch, scratch);
+ punpckhbw(dst, scratch);
+ punpckhbw(scratch, src2);
+ psrlw(scratch, 8);
+ pmullw(dst, scratch);
+ }
+ }
+}
+
+void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
+ XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
+ // dst = |i|i|j|j|k|k|l|l|m|m|n|n|o|o|p|p|
+ vpunpckhbw(dst, src, src);
+ vpsraw(dst, dst, 8);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // 2 bytes shorter than pshufd, but has depdency on dst.
+ movhlps(dst, src);
+ pmovsxbw(dst, dst);
+ } else {
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovsxbw(dst, dst);
+ }
+ }
+}
+
+void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // tmp = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
+ // src = |a|b|c|d|e|f|g|h | i|j|k|l|m|n|o|p|
+ // dst = |0|a|0|b|0|c|0|d | 0|e|0|f|0|g|0|h|
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpunpckhbw(dst, src, tmp);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhbw(dst, scratch);
+ } else {
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxbw(dst, dst);
+ }
+ }
+}
+
+// 1. Multiply low word into scratch.
+// 2. Multiply high word (can be signed or unsigned) into dst.
+// 3. Unpack and interleave scratch and dst into dst.
+void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister scratch,
+ bool low, bool is_signed) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmullw(scratch, src1, src2);
+ is_signed ? vpmulhw(dst, src1, src2) : vpmulhuw(dst, src1, src2);
+ low ? vpunpcklwd(dst, scratch, dst) : vpunpckhwd(dst, scratch, dst);
+ } else {
+ DCHECK_EQ(dst, src1);
+ movaps(scratch, src1);
+ pmullw(dst, src2);
+ is_signed ? pmulhw(scratch, src2) : pmulhuw(scratch, src2);
+ low ? punpcklwd(dst, scratch) : punpckhwd(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
+ XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (high)
+ // dst = |e|e|f|f|g|g|h|h|
+ vpunpckhwd(dst, src, src);
+ vpsrad(dst, dst, 16);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // 2 bytes shorter than pshufd, but has depdency on dst.
+ movhlps(dst, src);
+ pmovsxwd(dst, dst);
+ } else {
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovsxwd(dst, dst);
+ }
+ }
+}
+
+void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // scratch = |0|0|0|0|0|0|0|0|
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = |0|a|0|b|0|c|0|d|
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpunpckhwd(dst, src, tmp);
+ } else {
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhwd(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxwd(dst, dst);
+ }
+ }
+}
+
+void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpsubq(tmp, tmp, src);
+ vblendvpd(dst, src, tmp, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE3);
+ movshdup(scratch, src);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrad(scratch, 31);
+ xorps(dst, scratch);
+ psubq(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src0, src1);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_EQ(dst, src0);
+ pcmpgtq(dst, src1);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE3);
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movaps(dst, src1);
+ movaps(scratch, src0);
+ psubq(dst, src0);
+ pcmpeqd(scratch, src1);
+ andps(dst, scratch);
+ movaps(scratch, src0);
+ pcmpgtd(scratch, src1);
+ orps(dst, scratch);
+ movshdup(dst, dst);
+ }
+}
+
+void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src1, src0);
+ vpcmpeqd(scratch, scratch, scratch);
+ vpxor(dst, dst, scratch);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_NE(dst, src0);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ pcmpgtq(dst, src0);
+ pcmpeqd(scratch, scratch);
+ xorps(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE3);
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movaps(dst, src0);
+ movaps(scratch, src1);
+ psubq(dst, src1);
+ pcmpeqd(scratch, src0);
+ andps(dst, scratch);
+ movaps(scratch, src1);
+ pcmpgtd(scratch, src0);
+ orps(dst, scratch);
+ movshdup(dst, dst);
+ pcmpeqd(scratch, scratch);
+ xorps(dst, scratch);
+ }
+}
+
+// 1. Unpack src0, src1 into even-number elements of scratch.
+// 2. Unpack src1, src0 into even-number elements of dst.
+// 3. Multiply 1. with 2.
+// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
+void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister scratch,
+ bool low, bool is_signed) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ if (low) {
+ vpunpckldq(scratch, src1, src1);
+ vpunpckldq(dst, src2, src2);
+ } else {
+ vpunpckhdq(scratch, src1, src1);
+ vpunpckhdq(dst, src2, src2);
+ }
+ if (is_signed) {
+ vpmuldq(dst, scratch, dst);
+ } else {
+ vpmuludq(dst, scratch, dst);
+ }
+ } else {
+ uint8_t mask = low ? 0x50 : 0xFA;
+ pshufd(scratch, src1, mask);
+ pshufd(dst, src2, mask);
+ if (is_signed) {
+ CpuFeatureScope sse4_scope(this, SSE4_1);
+ pmuldq(dst, scratch);
+ } else {
+ pmuludq(dst, scratch);
+ }
+ }
+}
+
+void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
+ XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpunpckhqdq(dst, src, src);
+ vpmovsxdq(dst, dst);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ movhlps(dst, src);
+ } else {
+ pshufd(dst, src, 0xEE);
+ }
+ pmovsxdq(dst, dst);
+ }
+}
+
+void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpxor(scratch, scratch, scratch);
+ vpunpckhdq(dst, src, scratch);
+ } else {
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ xorps(scratch, scratch);
+ punpckhdq(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
+ XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch) {
+ // v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
+ // pandn(x, y) = !x & y, so we have to flip the mask and input.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpandn(scratch, mask, src2);
+ vpand(dst, src1, mask);
+ vpor(dst, dst, scratch);
+ } else {
+ DCHECK_EQ(dst, mask);
+ // Use float ops as they are 1 byte shorter than int ops.
+ movaps(scratch, mask);
+ andnps(scratch, src2);
+ andps(dst, src1);
+ orps(dst, scratch);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
new file mode 100644
index 00000000000..e2778e472d4
--- /dev/null
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -0,0 +1,189 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
+#define V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
+
+#include "src/base/macros.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/turbo-assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/register-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/register-x64.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+class Assembler;
+
+class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
+ public:
+ using TurboAssemblerBase::TurboAssemblerBase;
+
+ void Movapd(XMMRegister dst, XMMRegister src);
+
+ template <typename Dst, typename Src>
+ void Movdqu(Dst dst, Src src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqu(dst, src);
+ } else {
+ // movups is 1 byte shorter than movdqu. On most SSE systems, this incurs
+ // no delay moving between integer and floating-point domain.
+ movups(dst, src);
+ }
+ }
+
+ template <typename Dst, typename... Args>
+ struct AvxHelper {
+ Assembler* assm;
+ base::Optional<CpuFeature> feature = base::nullopt;
+ // Call a method where the AVX version expects the dst argument to be
+ // duplicated.
+ template <void (Assembler::*avx)(Dst, Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, dst, args...);
+ } else if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*no_avx)(dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+
+ // Call a method where the AVX version expects no duplicated dst argument.
+ template <void (Assembler::*avx)(Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, args...);
+ } else if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*no_avx)(dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+ };
+
+#define AVX_OP(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+ }
+
+#define AVX_OP_SSE3(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+ }
+
+#define AVX_OP_SSSE3(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+ }
+
+#define AVX_OP_SSE4_1(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+ }
+
+#define AVX_OP_SSE4_2(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+ }
+
+ AVX_OP(Cvtdq2pd, cvtdq2pd)
+ AVX_OP(Cvtdq2ps, cvtdq2ps)
+ AVX_OP(Cvtps2pd, cvtps2pd)
+ AVX_OP(Cvtpd2ps, cvtpd2ps)
+ AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Movaps, movaps)
+ AVX_OP(Movd, movd)
+ AVX_OP(Movhps, movhps)
+ AVX_OP(Movlps, movlps)
+ AVX_OP(Movmskpd, movmskpd)
+ AVX_OP(Movmskps, movmskps)
+ AVX_OP(Movss, movss)
+ AVX_OP(Movsd, movsd)
+ AVX_OP(Movupd, movupd)
+ AVX_OP(Movups, movups)
+ AVX_OP(Pmovmskb, pmovmskb)
+ AVX_OP(Pmullw, pmullw)
+ AVX_OP(Pshuflw, pshuflw)
+ AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Pshufd, pshufd)
+ AVX_OP(Rcpps, rcpps)
+ AVX_OP(Rsqrtps, rsqrtps)
+ AVX_OP(Sqrtps, sqrtps)
+ AVX_OP(Sqrtpd, sqrtpd)
+ AVX_OP_SSE3(Movddup, movddup)
+ AVX_OP_SSE3(Movshdup, movshdup)
+ AVX_OP_SSSE3(Pabsb, pabsb)
+ AVX_OP_SSSE3(Pabsw, pabsw)
+ AVX_OP_SSSE3(Pabsd, pabsd)
+ AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Pextrb, pextrb)
+ AVX_OP_SSE4_1(Pextrw, pextrw)
+ AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
+ AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
+ AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
+ AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
+ AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
+ AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
+ AVX_OP_SSE4_1(Ptest, ptest)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundpd, roundpd)
+
+ void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scrat, bool is_signed);
+ void I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
+ void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ // Requires that dst == src1 if AVX is not supported.
+ void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch, bool low, bool is_signed);
+ void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
+ void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
+ void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
+ XMMRegister scratch);
+ void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
+ XMMRegister scratch);
+ void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch, bool low, bool is_signed);
+ void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
+ void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ // Requires dst == mask when AVX is not supported.
+ void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
+ XMMRegister src2, XMMRegister scratch);
+};
+} // namespace internal
+} // namespace v8
+#endif // V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h
index bba3a1b13d8..2c4ca3e0d92 100644
--- a/deps/v8/src/codegen/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -124,6 +124,60 @@ size_t hash_value(const Signature<T>& sig) {
return hash;
}
+template <typename T, size_t kNumReturns = 0, size_t kNumParams = 0>
+class FixedSizeSignature : public Signature<T> {
+ public:
+ // Add return types to this signature (only allowed if there are none yet).
+ template <typename... ReturnTypes>
+ auto Returns(ReturnTypes... return_types) const {
+ static_assert(kNumReturns == 0, "Please specify all return types at once");
+ return FixedSizeSignature<T, sizeof...(ReturnTypes), kNumParams>{
+ std::initializer_list<T>{return_types...}.begin(), reps_};
+ }
+
+ // Add parameters to this signature (only allowed if there are none yet).
+ template <typename... ParamTypes>
+ auto Params(ParamTypes... param_types) const {
+ static_assert(kNumParams == 0, "Please specify all parameters at once");
+ return FixedSizeSignature<T, kNumReturns, sizeof...(ParamTypes)>{
+ reps_, std::initializer_list<T>{param_types...}.begin()};
+ }
+
+ private:
+ // Other template instantiations can call the private constructor.
+ template <typename T2, size_t kNumReturns2, size_t kNumParams2>
+ friend class FixedSizeSignature;
+
+ FixedSizeSignature(const T* returns, const T* params)
+ : Signature<T>(kNumReturns, kNumParams, reps_) {
+ std::copy(returns, returns + kNumReturns, reps_);
+ std::copy(params, params + kNumParams, reps_ + kNumReturns);
+ }
+
+ T reps_[kNumReturns + kNumParams];
+};
+
+// Specialization for zero-sized signatures.
+template <typename T>
+class FixedSizeSignature<T, 0, 0> : public Signature<T> {
+ public:
+ constexpr FixedSizeSignature() : Signature<T>(0, 0, nullptr) {}
+
+ // Add return types.
+ template <typename... ReturnTypes>
+ static auto Returns(ReturnTypes... return_types) {
+ return FixedSizeSignature<T, sizeof...(ReturnTypes), 0>{
+ std::initializer_list<T>{return_types...}.begin(), nullptr};
+ }
+
+ // Add parameters.
+ template <typename... ParamTypes>
+ static auto Params(ParamTypes... param_types) {
+ return FixedSizeSignature<T, 0, sizeof...(ParamTypes)>{
+ nullptr, std::initializer_list<T>{param_types...}.begin()};
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index a6cfc6983ab..3a56fda9d14 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -110,6 +110,16 @@ struct BoolT : Word32T {};
template <class T1, class T2>
struct PairT {};
+struct Simd128T : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kSimd128;
+ static constexpr MachineType kMachineType = MachineType::Simd128();
+};
+
+struct I8x16T : Simd128T {};
+struct I16x8T : Simd128T {};
+struct I32x2T : Simd128T {};
+
inline constexpr MachineType CommonMachineType(MachineType type1,
MachineType type2) {
return (type1 == type2) ? type1
@@ -347,16 +357,12 @@ class TNode {
return *this;
}
- bool is_null() const { return node_ == nullptr; }
-
operator compiler::Node*() const { return node_; }
static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
- protected:
- explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
-
private:
+ explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
// These checks shouldn't be checked before TNode is actually used.
void LazyTemplateChecks() {
static_assert(is_valid_type_tag<T>::value, "invalid type tag");
@@ -365,21 +371,6 @@ class TNode {
compiler::Node* node_;
};
-// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
-// Node*. It is intended for function arguments as long as some call sites
-// still use untyped Node* arguments.
-// TODO(turbofan): Delete this class once transition is finished.
-template <class T>
-class SloppyTNode : public TNode<T> {
- public:
- SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
- : TNode<T>(node) {}
- template <class U, typename std::enable_if<is_subtype<U, T>::value,
- int>::type = 0>
- SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
- : TNode<T>(other) {}
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 836566a1aca..02879ba3542 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -17,12 +17,6 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() {
- if (IsSupported(SSE4_1)) return true;
- if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
- return false;
-}
-
// -----------------------------------------------------------------------------
// Implementation of Assembler
@@ -41,8 +35,10 @@ void Assembler::emitw(uint16_t x) {
pc_ += sizeof(uint16_t);
}
+// TODO(ishell): Rename accordingly once RUNTIME_ENTRY is renamed.
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
+ DCHECK_NE(options().code_range_start, 0);
RecordRelocInfo(rmode);
emitl(static_cast<uint32_t>(entry - options().code_range_start));
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 18330a91266..032f7eb13d3 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -71,6 +71,14 @@ bool OSHasAVXSupport() {
} // namespace
+bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsSupported(SSE4_1)) return true;
+ if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
+}
+
void CpuFeatures::ProbeImpl(bool cross_compile) {
base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
@@ -79,19 +87,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2;
- if (cpu.has_sse41() && FLAG_enable_sse4_1) {
- supported_ |= 1u << SSE4_1;
- supported_ |= 1u << SSSE3;
- }
- if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
- if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
- // SAHF is not generally available in long mode.
- if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
- if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << AVX;
- }
+ // To deal with any combination of flags (e.g. --no-enable-sse4-1
+ // --enable-sse-4-2), we start checking from the "highest" supported
+ // extension, for each extension, enable if newer extension is supported.
if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
supported_ |= 1u << AVX2;
}
@@ -99,6 +97,24 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
OSHasAVXSupport()) {
supported_ |= 1u << FMA3;
}
+ if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
+ OSHasAVXSupport()) ||
+ IsSupported(AVX2) || IsSupported(FMA3)) {
+ supported_ |= 1u << AVX;
+ }
+ if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX)) {
+ supported_ |= 1u << SSE4_2;
+ }
+ if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2)) {
+ supported_ |= 1u << SSE4_1;
+ }
+ if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1)) {
+ supported_ |= 1u << SSSE3;
+ }
+ if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
+ supported_ |= 1u << SSE3;
+ // SAHF is not generally available in long mode.
+ if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
@@ -1194,16 +1210,6 @@ void Assembler::cpuid() {
emit(0xA2);
}
-void Assembler::prefetch(Operand src, int level) {
- DCHECK(is_uint2(level));
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x18);
- // Emit hint number in Reg position of RegR/M.
- XMMRegister code = XMMRegister::from_code(level);
- emit_sse_operand(code, src);
-}
-
void Assembler::cqo() {
EnsureSpace ensure_space(this);
emit_rex_64();
@@ -1439,6 +1445,14 @@ void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
emitl(code_target_index);
}
+void Assembler::jmp(Address entry, RelocInfo::Mode rmode) {
+ DCHECK(RelocInfo::IsRuntimeEntry(rmode));
+ EnsureSpace ensure_space(this);
+ // 1110 1001 #32-bit disp.
+ emit(0xE9);
+ emit_runtime_entry(entry, rmode);
+}
+
void Assembler::jmp_rel(int32_t offset) {
EnsureSpace ensure_space(this);
// The offset is encoded relative to the next instruction.
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index c1dc4a3db15..97e18ed8fef 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -786,7 +786,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void ret(int imm16);
void ud2();
void setcc(Condition cc, Register reg);
- void prefetch(Operand src, int level);
void pblendw(XMMRegister dst, Operand src, uint8_t mask);
void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
@@ -833,6 +832,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(Handle<Code> target, RelocInfo::Mode rmode);
+ void jmp(Address entry, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
void jmp(Register adr);
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index b91e8319acd..cb254370b25 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -10,6 +10,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference-table.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
@@ -269,8 +270,8 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
Smi value) {
if (SmiValuesAre32Bits()) {
- movl(Operand(dst_field_operand, kSmiShift / kBitsPerByte),
- Immediate(value.value()));
+ Move(kScratchRegister, value);
+ movq(dst_field_operand, kScratchRegister);
} else {
StoreTaggedField(dst_field_operand, Immediate(value));
}
@@ -287,7 +288,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, field_operand);
- addq(destination, kRootRegister);
+ addq(destination, kPointerCageBaseRegister);
RecordComment("]");
}
@@ -295,7 +296,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, source);
- addq(destination, kRootRegister);
+ addq(destination, kPointerCageBaseRegister);
RecordComment("]");
}
@@ -303,7 +304,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) {
RecordComment("[ DecompressAnyTagged");
movl(destination, field_operand);
- addq(destination, kRootRegister);
+ addq(destination, kPointerCageBaseRegister);
RecordComment("]");
}
@@ -738,35 +739,6 @@ void TurboAssembler::Movdqa(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovapd(dst, src);
- } else {
- // On SSE, movaps is 1 byte shorter than movapd, and has the same behavior.
- movaps(dst, src);
- }
-}
-
-template <typename Dst, typename Src>
-void TurboAssembler::Movdqu(Dst dst, Src src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqu(dst, src);
- } else {
- // movups is 1 byte shorter than movdqu. On most SSE systems, this incurs
- // no delay moving between integer and floating-point domain.
- movups(dst, src);
- }
-}
-
-template void TurboAssembler::Movdqu<XMMRegister, Operand>(XMMRegister dst,
- Operand src);
-template void TurboAssembler::Movdqu<Operand, XMMRegister>(Operand dst,
- XMMRegister src);
-template void TurboAssembler::Movdqu<XMMRegister, XMMRegister>(XMMRegister dst,
- XMMRegister src);
-
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1619,14 +1591,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
- jmp(kScratchRegister);
- if (FLAG_code_comments) RecordComment("]");
+ TailCallBuiltin(builtin_index);
bind(&skip);
return;
}
@@ -1705,10 +1670,17 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
- call(kScratchRegister);
+ if (options().short_builtin_calls) {
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ call(entry, RelocInfo::RUNTIME_ENTRY);
+
+ } else {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+ call(kScratchRegister);
+ }
if (FLAG_code_comments) RecordComment("]");
}
@@ -1716,10 +1688,16 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
- jmp(kScratchRegister);
+ if (options().short_builtin_calls) {
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+
+ } else {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
if (FLAG_code_comments) RecordComment("]");
}
@@ -1936,7 +1914,7 @@ void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
}
if (dst != src1) {
- assm->movdqu(dst, src1);
+ assm->movaps(dst, src1);
}
if (feature.has_value()) {
DCHECK(CpuFeatures::IsSupported(*feature));
@@ -2108,7 +2086,7 @@ void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
// Make sure these are different so that we won't overwrite mask.
DCHECK_NE(dst, mask);
if (dst != src) {
- movapd(dst, src);
+ movaps(dst, src);
}
CpuFeatureScope sse_scope(this, SSSE3);
pshufb(dst, mask);
@@ -2129,189 +2107,6 @@ void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
}
}
-void TurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (high)
- // dst = |e|e|f|f|g|g|h|h|
- vpunpckhwd(dst, src, src);
- vpsrad(dst, dst, 16);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // 2 bytes shorter than pshufd, but has depdency on dst.
- movhlps(dst, src);
- pmovsxwd(dst, dst);
- } else {
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovsxwd(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // scratch = |0|0|0|0|0|0|0|0|
- // src = |a|b|c|d|e|f|g|h|
- // dst = |0|a|0|b|0|c|0|d|
- XMMRegister scratch = dst == src ? kScratchDoubleReg : dst;
- vpxor(scratch, scratch, scratch);
- vpunpckhwd(dst, src, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // xorps can be executed on more ports than pshufd.
- xorps(kScratchDoubleReg, kScratchDoubleReg);
- punpckhwd(dst, kScratchDoubleReg);
- } else {
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovzxwd(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
- // dst = |i|i|j|j|k|k|l|l|m|m|n|n|o|o|p|p|
- vpunpckhbw(dst, src, src);
- vpsraw(dst, dst, 8);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst == src) {
- // 2 bytes shorter than pshufd, but has depdency on dst.
- movhlps(dst, src);
- pmovsxbw(dst, dst);
- } else {
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovsxbw(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // scratch = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
- // src = |a|b|c|d|e|f|g|h | i|j|k|l|m|n|o|p|
- // dst = |0|a|0|b|0|c|0|d | 0|e|0|f|0|g|0|h|
- XMMRegister scratch = dst == src ? kScratchDoubleReg : dst;
- vpxor(scratch, scratch, scratch);
- vpunpckhbw(dst, src, scratch);
- } else {
- if (dst == src) {
- // xorps can be executed on more ports than pshufd.
- xorps(kScratchDoubleReg, kScratchDoubleReg);
- punpckhbw(dst, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // No dependency on dst.
- pshufd(dst, src, 0xEE);
- pmovzxbw(dst, dst);
- }
- }
-}
-
-void TurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpunpckhqdq(dst, src, src);
- vpmovsxdq(dst, dst);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pshufd(dst, src, 0xEE);
- pmovsxdq(dst, dst);
- }
-}
-
-void TurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- vpunpckhdq(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pshufd(dst, src, 0xEE);
- pmovzxdq(dst, dst);
- }
-}
-
-// 1. Unpack src0, src0 into even-number elements of scratch.
-// 2. Unpack src1, src1 into even-number elements of dst.
-// 3. Multiply 1. with 2.
-// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
-void TurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, bool low, bool is_signed) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- if (low) {
- vpunpckldq(kScratchDoubleReg, src1, src1);
- vpunpckldq(dst, src2, src2);
- } else {
- vpunpckhdq(kScratchDoubleReg, src1, src1);
- vpunpckhdq(dst, src2, src2);
- }
- if (is_signed) {
- vpmuldq(dst, kScratchDoubleReg, dst);
- } else {
- vpmuludq(dst, kScratchDoubleReg, dst);
- }
- } else {
- uint8_t mask = low ? 0x50 : 0xFA;
- pshufd(kScratchDoubleReg, src1, mask);
- pshufd(dst, src2, mask);
- if (is_signed) {
- CpuFeatureScope avx_scope(this, SSE4_1);
- pmuldq(dst, kScratchDoubleReg);
- } else {
- pmuludq(dst, kScratchDoubleReg);
- }
- }
-}
-
-// 1. Multiply low word into scratch.
-// 2. Multiply high word (can be signed or unsigned) into dst.
-// 3. Unpack and interleave scratch and dst into dst.
-void TurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, bool low, bool is_signed) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmullw(kScratchDoubleReg, src1, src2);
- is_signed ? vpmulhw(dst, src1, src2) : vpmulhuw(dst, src1, src2);
- low ? vpunpcklwd(dst, kScratchDoubleReg, dst)
- : vpunpckhwd(dst, kScratchDoubleReg, dst);
- } else {
- DCHECK_EQ(dst, src1);
- movdqu(kScratchDoubleReg, src1);
- pmullw(dst, src2);
- is_signed ? pmulhw(kScratchDoubleReg, src2)
- : pmulhuw(kScratchDoubleReg, src2);
- low ? punpcklwd(dst, kScratchDoubleReg) : punpckhwd(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::I16x8ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, bool low, bool is_signed) {
- if (low) {
- is_signed ? Pmovsxbw(kScratchDoubleReg, src1)
- : Pmovzxbw(kScratchDoubleReg, src1);
- is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
- Pmullw(dst, kScratchDoubleReg);
- } else {
- Palignr(kScratchDoubleReg, src1, uint8_t{8});
- is_signed ? Pmovsxbw(kScratchDoubleReg, kScratchDoubleReg)
- : Pmovzxbw(kScratchDoubleReg, kScratchDoubleReg);
- Palignr(dst, src2, uint8_t{8});
- is_signed ? Pmovsxbw(dst, dst) : Pmovzxbw(dst, dst);
- Pmullw(dst, kScratchDoubleReg);
- }
-}
-
void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
// k = i16x8.splat(0x8000)
@@ -2323,16 +2118,6 @@ void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
Pxor(dst, kScratchDoubleReg);
}
-void TurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
- if (laneidx == 0) {
- Movss(dst, src);
- } else {
- DCHECK_GE(3, laneidx);
- Extractps(dst, src, laneidx);
- }
-}
-
void TurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
if (laneidx == 0) {
@@ -2347,6 +2132,7 @@ void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
XMMRegister tmp) {
DCHECK_NE(dst, tmp);
DCHECK_NE(src, tmp);
+ DCHECK_NE(kScratchDoubleReg, tmp);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vmovdqa(tmp, ExternalReferenceAsOperand(
@@ -2364,8 +2150,8 @@ void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
// PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
// algorithm on these processors. ATOM CPU feature captures exactly
// the right set of processors.
- xorps(tmp, tmp);
- pavgb(tmp, src);
+ movaps(tmp, src);
+ psrlw(tmp, 1);
if (dst != src) {
movaps(dst, src);
}
@@ -2405,6 +2191,10 @@ void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src) {
// dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
// 0x43300000'00000000 is a special double where the significand bits
// precisely represents all uint32 numbers.
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
+ }
Unpcklps(dst, src,
ExternalReferenceAsOperand(
ExternalReference::
@@ -2485,82 +2275,6 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- XMMRegister tmp = dst == src ? kScratchDoubleReg : dst;
- CpuFeatureScope avx_scope(this, AVX);
- vpxor(tmp, tmp, tmp);
- vpsubq(tmp, tmp, src);
- vblendvpd(dst, src, tmp, src);
- } else {
- CpuFeatureScope sse_scope(this, SSE3);
- movshdup(kScratchDoubleReg, src);
- if (dst != src) {
- movaps(dst, src);
- }
- psrad(kScratchDoubleReg, 31);
- xorps(dst, kScratchDoubleReg);
- psubq(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpcmpgtq(dst, src0, src1);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_EQ(dst, src0);
- pcmpgtq(dst, src1);
- } else {
- DCHECK_NE(dst, src0);
- DCHECK_NE(dst, src1);
- movdqa(dst, src1);
- movdqa(kScratchDoubleReg, src0);
- psubq(dst, src0);
- pcmpeqd(kScratchDoubleReg, src1);
- pand(dst, kScratchDoubleReg);
- movdqa(kScratchDoubleReg, src0);
- pcmpgtd(kScratchDoubleReg, src1);
- por(dst, kScratchDoubleReg);
- pshufd(dst, dst, 0xF5);
- }
-}
-
-void TurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpcmpgtq(dst, src1, src0);
- vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- vpxor(dst, dst, kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_NE(dst, src0);
- if (dst != src1) {
- movdqa(dst, src1);
- }
- pcmpgtq(dst, src0);
- pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- pxor(dst, kScratchDoubleReg);
- } else {
- DCHECK_NE(dst, src0);
- DCHECK_NE(dst, src1);
- movdqa(dst, src0);
- movdqa(kScratchDoubleReg, src1);
- psubq(dst, src1);
- pcmpeqd(kScratchDoubleReg, src0);
- pand(dst, kScratchDoubleReg);
- movdqa(kScratchDoubleReg, src1);
- pcmpgtd(kScratchDoubleReg, src0);
- por(dst, kScratchDoubleReg);
- pshufd(dst, dst, 0xF5);
- pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- pxor(dst, kScratchDoubleReg);
- }
-}
-
void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
XMMRegister src) {
// pmaddubsw treats the first operand as unsigned, so the external reference
@@ -2586,20 +2300,52 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
XMMRegister src) {
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- Pand(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- Paddd(dst, kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (low)
+ // scratch = |0|a|0|c|0|e|0|g|
+ vpsrld(kScratchDoubleReg, src, 16);
+ // dst = |0|b|0|d|0|f|0|h|
+ vpblendw(dst, src, kScratchDoubleReg, 0xAA);
+ // dst = |a+b|c+d|e+f|g+h|
+ vpaddd(dst, kScratchDoubleReg, dst);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // There is a potentially better lowering if we get rip-relative constants,
+ // see https://github.com/WebAssembly/simd/pull/380.
+ movaps(kScratchDoubleReg, src);
+ psrld(kScratchDoubleReg, 16);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pblendw(dst, kScratchDoubleReg, 0xAA);
+ paddd(dst, kScratchDoubleReg);
+ } else {
+ // src = |a|b|c|d|e|f|g|h|
+ // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
+ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ psrld(kScratchDoubleReg, byte{16});
+ // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
+ andps(kScratchDoubleReg, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrld(dst, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ paddd(dst, kScratchDoubleReg);
+ }
}
void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask) {
+ XMMRegister mask, bool omit_add) {
+ if (omit_add) {
+ // We have determined that the indices are immediates, and they are either
+ // within bounds, or the top bit is set, so we can omit the add.
+ Pshufb(dst, src, mask);
+ return;
+ }
+
// Out-of-range indices should return 0, add 112 so that any value > 15
// saturates to 128 (top bit set), so pshufb will zero that lane.
Operand op = ExternalReferenceAsOperand(
@@ -2610,7 +2356,7 @@ void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
vpshufb(dst, src, kScratchDoubleReg);
} else {
CpuFeatureScope sse_scope(this, SSSE3);
- movdqa(kScratchDoubleReg, op);
+ movaps(kScratchDoubleReg, op);
if (dst != src) {
movaps(dst, src);
}
@@ -2644,25 +2390,6 @@ void TurboAssembler::Psrld(XMMRegister dst, XMMRegister src, byte imm8) {
}
}
-void TurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
- XMMRegister src1, XMMRegister src2) {
- // v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
- // pandn(x, y) = !x & y, so we have to flip the mask and input.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpandn(kScratchDoubleReg, mask, src2);
- vpand(dst, src1, mask);
- vpor(dst, dst, kScratchDoubleReg);
- } else {
- DCHECK_EQ(dst, mask);
- // Use float ops as they are 1 byte shorter than int ops.
- movaps(kScratchDoubleReg, mask);
- andnps(kScratchDoubleReg, src2);
- andps(dst, src1);
- orps(dst, kScratchDoubleReg);
- }
-}
-
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2809,12 +2536,6 @@ void TurboAssembler::Popcntq(Register dst, Operand src) {
UNREACHABLE();
}
-// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-const int
- MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
- 0, 1, 2, 3, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11};
-
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
@@ -3356,7 +3077,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
}
#endif
-void MacroAssembler::EnterExitFramePrologue(bool save_rax,
+void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -3376,8 +3097,8 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
Push(Immediate(0)); // Saved entry sp, patched before call.
// Save the frame pointer and the context in top.
- if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ if (saved_rax_reg != no_reg) {
+ movq(saved_rax_reg, rax); // Backup rax in callee-save register.
}
Store(
@@ -3426,18 +3147,19 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
StackFrame::Type frame_type) {
- EnterExitFramePrologue(true, frame_type);
+ Register saved_rax_reg = r12;
+ EnterExitFramePrologue(saved_rax_reg, frame_type);
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
- leaq(r15, Operand(rbp, r14, times_system_pointer_size, offset));
+ leaq(r15, Operand(rbp, saved_rax_reg, times_system_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- EnterExitFramePrologue(false, StackFrame::EXIT);
+ EnterExitFramePrologue(no_reg, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false);
}
@@ -3502,7 +3224,7 @@ static const int kRegisterPassedArguments = 4;
static const int kRegisterPassedArguments = 6;
#endif
-void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
// Load native context.
LoadMap(dst, rsi);
LoadTaggedPointerField(
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index be0b07c17ff..40ab1b925c8 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -11,6 +11,7 @@
#include "src/base/flags.h"
#include "src/codegen/bailout-reason.h"
+#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
@@ -58,99 +59,17 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
-
- template <typename Dst, typename... Args>
- struct AvxHelper {
- Assembler* assm;
- base::Optional<CpuFeature> feature = base::nullopt;
- // Call a method where the AVX version expects the dst argument to be
- // duplicated.
- template <void (Assembler::*avx)(Dst, Dst, Args...),
- void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, dst, args...);
- } else if (feature.has_value()) {
- DCHECK(CpuFeatures::IsSupported(*feature));
- CpuFeatureScope scope(assm, *feature);
- (assm->*no_avx)(dst, args...);
- } else {
- (assm->*no_avx)(dst, args...);
- }
- }
-
- // Call a method where the AVX version expects no duplicated dst argument.
- template <void (Assembler::*avx)(Dst, Args...),
- void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, args...);
- } else if (feature.has_value()) {
- DCHECK(CpuFeatures::IsSupported(*feature));
- CpuFeatureScope scope(assm, *feature);
- (assm->*no_avx)(dst, args...);
- } else {
- (assm->*no_avx)(dst, args...);
- }
- }
- };
-
-#define AVX_OP(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
- }
-
-#define AVX_OP_SSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
- }
-
-#define AVX_OP_SSSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
- }
-
-#define AVX_OP_SSE4_1(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
- }
-#define AVX_OP_SSE4_2(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
- }
+ using SharedTurboAssembler::SharedTurboAssembler;
AVX_OP(Subsd, subsd)
AVX_OP(Divss, divss)
AVX_OP(Divsd, divsd)
AVX_OP(Orps, orps)
AVX_OP(Xorps, xorps)
AVX_OP(Xorpd, xorpd)
- AVX_OP(Movd, movd)
AVX_OP(Movq, movq)
- AVX_OP(Movaps, movaps)
- AVX_OP(Movups, movups)
- AVX_OP(Movmskps, movmskps)
- AVX_OP(Movmskpd, movmskpd)
- AVX_OP(Pmovmskb, pmovmskb)
- AVX_OP(Movss, movss)
- AVX_OP(Movsd, movsd)
AVX_OP(Movhlps, movhlps)
- AVX_OP(Movlps, movlps)
- AVX_OP(Movhps, movhps)
AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
@@ -182,9 +101,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Cmpnlepd, cmpnlepd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
- AVX_OP(Sqrtps, sqrtps)
- AVX_OP(Sqrtpd, sqrtpd)
- AVX_OP(Cvttps2dq, cvttps2dq)
AVX_OP(Cvttpd2dq, cvttpd2dq)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
@@ -218,7 +134,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Paddusb, paddusb)
AVX_OP(Paddusw, paddusw)
AVX_OP(Pcmpgtd, pcmpgtd)
- AVX_OP(Pmullw, pmullw)
AVX_OP(Pmuludq, pmuludq)
AVX_OP(Addpd, addpd)
AVX_OP(Subpd, subpd)
@@ -228,18 +143,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Divpd, divpd)
AVX_OP(Maxps, maxps)
AVX_OP(Maxpd, maxpd)
- AVX_OP(Cvtdq2ps, cvtdq2ps)
- AVX_OP(Cvtdq2pd, cvtdq2pd)
- AVX_OP(Cvtpd2ps, cvtpd2ps)
- AVX_OP(Cvtps2pd, cvtps2pd)
- AVX_OP(Rcpps, rcpps)
- AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Addps, addps)
AVX_OP(Subps, subps)
AVX_OP(Mulps, mulps)
AVX_OP(Divps, divps)
- AVX_OP(Pshuflw, pshuflw)
- AVX_OP(Pshufhw, pshufhw)
AVX_OP(Packsswb, packsswb)
AVX_OP(Packuswb, packuswb)
AVX_OP(Packssdw, packssdw)
@@ -251,13 +158,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Punpckhdq, punpckhdq)
AVX_OP(Punpcklqdq, punpcklqdq)
AVX_OP(Punpckhqdq, punpckhqdq)
- AVX_OP(Pshufd, pshufd)
AVX_OP(Cmpps, cmpps)
AVX_OP(Cmppd, cmppd)
AVX_OP(Movlhps, movlhps)
AVX_OP_SSE3(Haddps, haddps)
- AVX_OP_SSE3(Movddup, movddup)
- AVX_OP_SSE3(Movshdup, movshdup)
AVX_OP_SSSE3(Phaddd, phaddd)
AVX_OP_SSSE3(Phaddw, phaddw)
AVX_OP_SSSE3(Pshufb, pshufb)
@@ -265,9 +169,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSSE3(Psignd, psignd)
AVX_OP_SSSE3(Palignr, palignr)
- AVX_OP_SSSE3(Pabsb, pabsb)
- AVX_OP_SSSE3(Pabsw, pabsw)
- AVX_OP_SSSE3(Pabsd, pabsd)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Packusdw, packusdw)
AVX_OP_SSE4_1(Pminsb, pminsb)
@@ -279,22 +180,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pmaxud, pmaxud)
AVX_OP_SSE4_1(Pmulld, pmulld)
- AVX_OP_SSE4_1(Extractps, extractps)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
AVX_OP_SSE4_1(Pblendw, pblendw)
- AVX_OP_SSE4_1(Ptest, ptest)
- AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
- AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
- AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
- AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
- AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
- AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
- AVX_OP_SSE4_1(Pextrb, pextrb)
- AVX_OP_SSE4_1(Pextrw, pextrw)
AVX_OP_SSE4_1(Pextrq, pextrq)
- AVX_OP_SSE4_1(Roundps, roundps)
- AVX_OP_SSE4_1(Roundpd, roundpd)
AVX_OP_SSE4_1(Roundss, roundss)
AVX_OP_SSE4_1(Roundsd, roundsd)
AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
@@ -361,13 +250,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- void Movapd(XMMRegister dst, XMMRegister src);
void Movdqa(XMMRegister dst, Operand src);
void Movdqa(XMMRegister dst, XMMRegister src);
- template <typename Dst, typename Src>
- void Movdqu(Dst dst, Src src);
-
void Cvtss2sd(XMMRegister dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, Operand src);
void Cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -524,7 +409,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return CallBuiltin(static_cast<int>(builtin));
+ }
void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return TailCallBuiltin(static_cast<int>(builtin));
+ }
void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
@@ -598,28 +491,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// These Wasm SIMD ops do not have direct lowerings on x64. These
// helpers are optimized to produce the fastest and smallest codegen.
// Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
- void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src);
- void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
- void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src);
- void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
- void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src);
-
- // Requires dst == mask when AVX is not supported.
- void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
- XMMRegister src2);
-
- void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- bool low, bool is_signed);
- // Requires that dst == src1 if AVX is not supported.
- void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- bool low, bool is_signed);
- void I16x8ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- bool low, bool is_signed);
-
void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp);
@@ -628,14 +501,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src);
void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src);
- void I64x2Abs(XMMRegister dst, XMMRegister src);
- void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1);
- void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1);
-
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src);
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src);
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask);
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
+ bool omit_add = false);
void Abspd(XMMRegister dst);
void Negpd(XMMRegister dst);
@@ -704,6 +574,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ Move(kPointerCageBaseRegister, isolate_root);
+#endif
}
void SaveRegisters(RegList registers);
@@ -1091,11 +964,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
// Load the native context slot with the current index.
- void LoadNativeContextSlot(int index, Register dst);
+ void LoadNativeContextSlot(Register dst, int index);
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1140,25 +1013,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
- // ---------------------------------------------------------------------------
- // Debugging
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
private:
- // Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
- static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 12;
-
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeFlag flag);
- void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
+ void EnterExitFramePrologue(Register saved_rax_reg,
+ StackFrame::Type frame_type);
// Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the
// stack accessible via StackSpaceOperand.
@@ -1166,15 +1028,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue();
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code) {
- return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
- }
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class CommonFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index d9ba5a38b3b..9a812d06a1b 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -29,20 +29,29 @@ namespace internal {
V(r14) \
V(r15)
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(rax) \
- V(rbx) \
- V(rdx) \
- V(rcx) \
- V(rsi) \
- V(rdi) \
- V(r8) \
- V(r9) \
- V(r11) \
- V(r12) \
- V(r14) \
+#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rbx) \
+ V(rdx) \
+ V(rcx) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r11) \
+ V(r12) \
V(r15)
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
+#else
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(r14)
+#endif
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
+
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -146,7 +155,12 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
-constexpr bool kPadArguments = false;
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -201,7 +215,7 @@ constexpr Register kAllocateSizeRegister = rdx;
constexpr Register kSpeculationPoisonRegister = r12;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
-constexpr Register kInterpreterBytecodeArrayRegister = r14;
+constexpr Register kInterpreterBytecodeArrayRegister = r12;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kJavaScriptCallArgCountRegister = rax;
@@ -221,6 +235,11 @@ constexpr Register kWasmInstanceRegister = rsi;
constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
constexpr Register kRootRegister = r13; // callee save
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+constexpr Register kPointerCageBaseRegister = r14; // callee save
+#else
+constexpr Register kPointerCageBaseRegister = kRootRegister;
+#endif
constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
diff --git a/deps/v8/src/common/external-pointer-inl.h b/deps/v8/src/common/external-pointer-inl.h
index 070d787b637..bc7aea3691b 100644
--- a/deps/v8/src/common/external-pointer-inl.h
+++ b/deps/v8/src/common/external-pointer-inl.h
@@ -12,11 +12,17 @@
namespace v8 {
namespace internal {
-V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate_root,
+V8_INLINE Address DecodeExternalPointer(PtrComprCageBase isolate_root,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag) {
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
#ifdef V8_HEAP_SANDBOX
+
+ // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
+#endif
+
uint32_t index = static_cast<uint32_t>(encoded_pointer);
const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
return isolate->external_pointer_table().get(index) ^ tag;
@@ -62,7 +68,7 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
}
V8_INLINE Address ReadExternalPointerField(Address field_address,
- IsolateRoot isolate_root,
+ PtrComprCageBase cage_base,
ExternalPointerTag tag) {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
@@ -73,7 +79,7 @@ V8_INLINE Address ReadExternalPointerField(Address field_address,
} else {
encoded_value = base::Memory<ExternalPointer_t>(field_address);
}
- return DecodeExternalPointer(isolate_root, encoded_value, tag);
+ return DecodeExternalPointer(cage_base, encoded_value, tag);
}
V8_INLINE void WriteExternalPointerField(Address field_address,
diff --git a/deps/v8/src/common/external-pointer.h b/deps/v8/src/common/external-pointer.h
index 5a380df7628..c0941f29782 100644
--- a/deps/v8/src/common/external-pointer.h
+++ b/deps/v8/src/common/external-pointer.h
@@ -12,7 +12,7 @@ namespace internal {
// Convert external pointer from on-V8-heap representation to an actual external
// pointer value.
-V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate,
+V8_INLINE Address DecodeExternalPointer(PtrComprCageBase isolate,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag);
@@ -34,7 +34,7 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
// Reads external pointer for the field, and decodes it if the sandbox is
// enabled.
V8_INLINE Address ReadExternalPointerField(Address field_address,
- IsolateRoot isolate,
+ PtrComprCageBase isolate,
ExternalPointerTag tag);
// Encodes value if the sandbox is enabled and writes it into the field.
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 5b9dd0923f9..f51c3210f8c 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -99,11 +99,24 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
kStackLimitSlackForDeoptimizationInBytes <=
MB);
+// Determine whether the short builtin calls optimization is enabled.
+#ifdef V8_SHORT_BUILTIN_CALLS
+#ifndef V8_COMPRESS_POINTERS
+// TODO(11527): Fix this by passing Isolate* to Code::OffHeapInstructionStart()
+// and friends.
+#error Short builtin calls feature requires pointer compression
+#endif
+#endif
+
+// This constant is used for detecting whether the machine has >= 4GB of
+// physical memory by checking the max old space size.
+const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
+
// Determine whether dict mode prototypes feature is enabled.
-#ifdef V8_DICT_MODE_PROTOTYPES
-#define V8_DICT_MODE_PROTOTYPES_BOOL true
+#ifdef V8_ENABLE_SWISS_NAME_DICTIONARY
+#define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL true
#else
-#define V8_DICT_MODE_PROTOTYPES_BOOL false
+#define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL false
#endif
// Determine whether dict property constness tracking feature is enabled.
@@ -1694,7 +1707,9 @@ enum IcCheckType { ELEMENT, PROPERTY };
// without going through the on-heap Code trampoline.
enum class StubCallMode {
kCallCodeObject,
+#if V8_ENABLE_WEBASSEMBLY
kCallWasmRuntimeStub,
+#endif // V8_ENABLE_WEBASSEMBLY
kCallBuiltinPointer,
};
@@ -1733,13 +1748,13 @@ enum class DynamicCheckMapsStatus : uint8_t {
};
#ifdef V8_COMPRESS_POINTERS
-class IsolateRoot {
+class PtrComprCageBase {
public:
- explicit constexpr IsolateRoot(Address address) : address_(address) {}
+ explicit constexpr PtrComprCageBase(Address address) : address_(address) {}
// NOLINTNEXTLINE
- inline IsolateRoot(const Isolate* isolate);
+ inline PtrComprCageBase(const Isolate* isolate);
// NOLINTNEXTLINE
- inline IsolateRoot(const LocalIsolate* isolate);
+ inline PtrComprCageBase(const LocalIsolate* isolate);
inline Address address() const;
@@ -1747,13 +1762,13 @@ class IsolateRoot {
Address address_;
};
#else
-class IsolateRoot {
+class PtrComprCageBase {
public:
- IsolateRoot() = default;
+ PtrComprCageBase() = default;
// NOLINTNEXTLINE
- IsolateRoot(const Isolate* isolate) {}
+ PtrComprCageBase(const Isolate* isolate) {}
// NOLINTNEXTLINE
- IsolateRoot(const LocalIsolate* isolate) {}
+ PtrComprCageBase(const LocalIsolate* isolate) {}
};
#endif
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index ff75adea90a..5193a0bae3e 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -474,7 +474,6 @@ namespace internal {
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
- T(ParenthesisInArgString, "Function arg string contains parenthesis") \
T(ArgStringTerminatesParametersEarly, \
"Arg string terminates parameters early") \
T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index f74c4d82c95..66c22311b02 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -15,15 +15,35 @@ namespace internal {
#ifdef V8_COMPRESS_POINTERS
-IsolateRoot::IsolateRoot(const Isolate* isolate)
+#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+
+PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
: address_(isolate->isolate_root()) {}
-IsolateRoot::IsolateRoot(const LocalIsolate* isolate)
+PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
: address_(isolate->isolate_root()) {}
-Address IsolateRoot::address() const {
+#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
+ : address_(isolate->isolate_root()) {
+ UNIMPLEMENTED();
+}
+PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
+ : address_(isolate->isolate_root()) {
+ UNIMPLEMENTED();
+}
+
+#else
+
+#error "Pointer compression build configuration error"
+
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE,
+ // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+Address PtrComprCageBase::address() const {
Address ret = address_;
ret = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
- reinterpret_cast<void*>(ret), kPtrComprIsolateRootAlignment));
+ reinterpret_cast<void*>(ret), kPtrComprCageBaseAlignment));
return ret;
}
@@ -33,12 +53,17 @@ V8_INLINE Tagged_t CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
-V8_INLINE constexpr Address GetIsolateRootAddress(Address on_heap_addr) {
- return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr);
+V8_INLINE constexpr Address GetPtrComprCageBaseAddress(Address on_heap_addr) {
+ return RoundDown<kPtrComprCageBaseAlignment>(on_heap_addr);
+}
+
+V8_INLINE Address GetPtrComprCageBaseAddress(PtrComprCageBase cage_base) {
+ return cage_base.address();
}
-V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) {
- return isolate.address();
+V8_INLINE constexpr PtrComprCageBase GetPtrComprCageBaseFromOnHeapAddress(
+ Address address) {
+ return PtrComprCageBase(GetPtrComprCageBaseAddress(address));
}
// Decompresses smi value.
@@ -52,7 +77,8 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- return GetIsolateRootAddress(on_heap_addr) + static_cast<Address>(raw_value);
+ return GetPtrComprCageBaseAddress(on_heap_addr) +
+ static_cast<Address>(raw_value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
@@ -62,18 +88,19 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
-STATIC_ASSERT(kPtrComprHeapReservationSize ==
- Internals::kPtrComprHeapReservationSize);
-STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
- Internals::kPtrComprIsolateRootAlignment);
+STATIC_ASSERT(kPtrComprCageReservationSize ==
+ Internals::kPtrComprCageReservationSize);
+STATIC_ASSERT(kPtrComprCageBaseAlignment ==
+ Internals::kPtrComprCageBaseAlignment);
#else
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
-V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) { UNREACHABLE(); }
-
-V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) { UNREACHABLE(); }
+V8_INLINE constexpr PtrComprCageBase GetPtrComprCageBaseFromOnHeapAddress(
+ Address address) {
+ return PtrComprCageBase();
+}
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
@@ -90,6 +117,11 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
}
#endif // V8_COMPRESS_POINTERS
+
+inline PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
+ return GetPtrComprCageBaseFromOnHeapAddress(object.ptr());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 0c82c2328cf..1d5668208a4 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -13,8 +13,8 @@ namespace v8 {
namespace internal {
// See v8:7703 for details about how pointer compression works.
-constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
-constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
+constexpr size_t kPtrComprCageReservationSize = size_t{4} * GB;
+constexpr size_t kPtrComprCageBaseAlignment = size_t{4} * GB;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index 7bc22f16625..6b3eadf8012 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -1,4 +1,3 @@
-ahaas@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 8bcb609f1ba..73292dd5ad6 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -31,10 +31,6 @@ void DisposeCompilationJob(OptimizedCompilationJob* job,
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
- // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
- // allocation, but we probably shouldn't call set_code either, as this
- // sometimes runs on the worker thread!
- // JSFunction::EnsureFeedbackVector(function);
}
delete job;
}
@@ -50,7 +46,6 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
dispatcher_(dispatcher) {
- base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
++dispatcher_->ref_count_;
}
@@ -62,12 +57,13 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
private:
// v8::Task overrides.
void RunInternal() override {
- LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
+ WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
+ worker_thread_runtime_call_stats_);
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground,
+ runtime_call_stats_scope.Get());
DCHECK(local_isolate.heap()->IsParked());
{
- WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
- worker_thread_runtime_call_stats_);
RuntimeCallTimerScope runtimeTimer(
runtime_call_stats_scope.Get(),
RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
@@ -81,7 +77,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
dispatcher_->recompilation_delay_));
}
- dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate, true),
+ dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate),
runtime_call_stats_scope.Get(), &local_isolate);
}
{
@@ -98,34 +94,19 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
};
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
-#ifdef DEBUG
- {
- base::MutexGuard lock_guard(&ref_count_mutex_);
- DCHECK_EQ(0, ref_count_);
- }
-#endif
+ DCHECK_EQ(0, ref_count_);
DCHECK_EQ(0, input_queue_length_);
DeleteArray(input_queue_);
}
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
- LocalIsolate* local_isolate, bool check_if_flushing) {
+ LocalIsolate* local_isolate) {
base::MutexGuard access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
- if (check_if_flushing) {
- if (mode_ == FLUSH) {
- UnparkedScope scope(local_isolate->heap());
- local_isolate->heap()->AttachPersistentHandles(
- job->compilation_info()->DetachPersistentHandles());
- DisposeCompilationJob(job, true);
- local_isolate->heap()->DetachPersistentHandles();
- return nullptr;
- }
- }
return job;
}
@@ -163,49 +144,42 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
}
}
-void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
- if (blocking_behavior == BlockingBehavior::kDontBlock) {
- if (FLAG_block_concurrent_recompilation) Unblock();
- base::MutexGuard access_input_queue_(&input_queue_mutex_);
- while (input_queue_length_ > 0) {
- OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
- DCHECK_NOT_NULL(job);
- input_queue_shift_ = InputQueueIndex(1);
- input_queue_length_--;
- DisposeCompilationJob(job, true);
- }
- FlushOutputQueue(true);
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Flushed concurrent recompilation queues (not blocking).\n");
- }
- return;
+void OptimizingCompileDispatcher::FlushInputQueue() {
+ base::MutexGuard access_input_queue_(&input_queue_mutex_);
+ while (input_queue_length_ > 0) {
+ OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
+ DCHECK_NOT_NULL(job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ DisposeCompilationJob(job, true);
}
- mode_ = FLUSH;
+}
+
+void OptimizingCompileDispatcher::FlushQueues(
+ BlockingBehavior blocking_behavior, bool restore_function_code) {
if (FLAG_block_concurrent_recompilation) Unblock();
- {
+ FlushInputQueue();
+ if (blocking_behavior == BlockingBehavior::kBlock) {
base::MutexGuard lock_guard(&ref_count_mutex_);
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- mode_ = COMPILE;
}
- FlushOutputQueue(true);
+ FlushOutputQueue(restore_function_code);
+}
+
+void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
+ FlushQueues(blocking_behavior, true);
if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Flushed concurrent recompilation queues.\n");
+ PrintF(" ** Flushed concurrent recompilation queues. (mode: %s)\n",
+ (blocking_behavior == BlockingBehavior::kBlock) ? "blocking"
+ : "non blocking");
}
}
void OptimizingCompileDispatcher::Stop() {
- mode_ = FLUSH;
- if (FLAG_block_concurrent_recompilation) Unblock();
- {
- base::MutexGuard lock_guard(&ref_count_mutex_);
- while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- mode_ = COMPILE;
- }
-
+ FlushQueues(BlockingBehavior::kBlock, false);
// At this point the optimizing compiler thread's event loop has stopped.
// There is no need for a mutex when reading input_queue_length_.
DCHECK_EQ(input_queue_length_, 0);
- FlushOutputQueue(false);
}
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
@@ -234,6 +208,14 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
}
+bool OptimizingCompileDispatcher::HasJobs() {
+ DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
+ // Note: This relies on {output_queue_} being mutated by a background thread
+ // only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
+ // by a background thread.
+ return !(ref_count_ == 0 && output_queue_.empty());
+}
+
void OptimizingCompileDispatcher::QueueForOptimization(
OptimizedCompilationJob* job) {
DCHECK(IsQueueAvailable());
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 36f285d1631..37454c67e0d 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -30,7 +30,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
- mode_(COMPILE),
blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
@@ -53,16 +52,21 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
static bool Enabled() { return FLAG_concurrent_recompilation; }
+ // This method must be called on the main thread.
+ bool HasJobs();
+
private:
class CompileTask;
enum ModeFlag { COMPILE, FLUSH };
+ void FlushQueues(BlockingBehavior blocking_behavior,
+ bool restore_function_code);
+ void FlushInputQueue();
void FlushOutputQueue(bool restore_function_code);
void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats,
LocalIsolate* local_isolate);
- OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate,
- bool check_if_flushing = false);
+ OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -86,11 +90,9 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- std::atomic<ModeFlag> mode_;
-
int blocked_jobs_;
- int ref_count_;
+ std::atomic<int> ref_count_;
base::Mutex ref_count_mutex_;
base::ConditionVariable ref_count_zero_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 6175ef3e063..5260502f32a 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,16 +1,13 @@
-bmeurer@chromium.org
-danno@chromium.org
-sigurds@chromium.org
-neis@chromium.org
-mvstanton@chromium.org
-mslekova@chromium.org
jgruber@chromium.org
+mslekova@chromium.org
+mvstanton@chromium.org
+neis@chromium.org
nicohartmann@chromium.org
+sigurds@chromium.org
solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
-per-file wasm-*=binji@chromium.org
per-file wasm-*=clemensb@chromium.org
per-file wasm-*=gdeepti@chromium.org
per-file wasm-*=jkummerow@chromium.org
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 06806feb420..bc3f113eb79 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -27,17 +27,27 @@ namespace compiler {
namespace {
-bool CanInlinePropertyAccess(Handle<Map> map) {
+bool CanInlinePropertyAccess(Handle<Map> map, AccessMode access_mode) {
// We can inline property access to prototypes of all primitives, except
// the special Oddball ones that have no wrapper counterparts (i.e. Null,
// Undefined and TheHole).
+ // We can only inline accesses to dictionary mode holders if the access is a
+ // load and the holder is a prototype. The latter ensures a 1:1
+ // relationship between the map and the object (and therefore the property
+ // dictionary).
STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
if (map->IsBooleanMap()) return true;
if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
- return map->IsJSObjectMap() && !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
- // TODO(verwaest): Allowlist contexts to which we have access.
- !map->is_access_check_needed();
+ if (map->IsJSObjectMap()) {
+ if (map->is_dictionary_map()) {
+ if (!V8_DICT_PROPERTY_CONST_TRACKING_BOOL) return false;
+ return access_mode == AccessMode::kLoad && map->is_prototype_map();
+ }
+ return !map->has_named_interceptor() &&
+ // TODO(verwaest): Allowlist contexts to which we have access.
+ !map->is_access_check_needed();
+ }
+ return false;
}
#ifdef DEBUG
@@ -108,24 +118,24 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
}
// static
-PropertyAccessInfo PropertyAccessInfo::DataConstant(
+PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
Zone* zone, Handle<Map> receiver_map,
ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(kDataConstant, holder, transition_map, field_index,
- field_representation, field_type, field_owner_map,
- field_map, {{receiver_map}, zone},
+ return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
+ field_index, field_representation, field_type,
+ field_owner_map, field_map, {{receiver_map}, zone},
std::move(dependencies));
}
// static
-PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
+PropertyAccessInfo PropertyAccessInfo::FastAccessorConstant(
Zone* zone, Handle<Map> receiver_map, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(zone, kAccessorConstant, holder, constant,
- {{receiver_map}, zone});
+ return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant,
+ MaybeHandle<Name>(), {{receiver_map}, zone});
}
// static
@@ -133,7 +143,7 @@ PropertyAccessInfo PropertyAccessInfo::ModuleExport(Zone* zone,
Handle<Map> receiver_map,
Handle<Cell> cell) {
return PropertyAccessInfo(zone, kModuleExport, MaybeHandle<JSObject>(), cell,
- {{receiver_map}, zone});
+ MaybeHandle<Name>{}, {{receiver_map}, zone});
}
// static
@@ -144,6 +154,22 @@ PropertyAccessInfo PropertyAccessInfo::StringLength(Zone* zone,
}
// static
+PropertyAccessInfo PropertyAccessInfo::DictionaryProtoDataConstant(
+ Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder,
+ InternalIndex dictionary_index, Handle<Name> name) {
+ return PropertyAccessInfo(zone, kDictionaryProtoDataConstant, holder,
+ {{receiver_map}, zone}, dictionary_index, name);
+}
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant(
+ Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder,
+ Handle<Object> constant, Handle<Name> property_name) {
+ return PropertyAccessInfo(zone, kDictionaryProtoAccessorConstant, holder,
+ constant, property_name, {{receiver_map}, zone});
+}
+
+// static
MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::DataField(
int offset, bool is_inobject, Representation field_representation,
Type field_type) {
@@ -162,29 +188,36 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
lookup_start_object_maps_(zone),
unrecorded_dependencies_(zone),
field_representation_(Representation::None()),
- field_type_(Type::None()) {}
+ field_type_(Type::None()),
+ dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
- unrecorded_dependencies_(zone),
holder_(holder),
+ unrecorded_dependencies_(zone),
field_representation_(Representation::None()),
- field_type_(Type::None()) {}
+ field_type_(Type::None()),
+ dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, ZoneVector<Handle<Map>>&& lookup_start_object_maps)
+ Handle<Object> constant, MaybeHandle<Name> property_name,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
- unrecorded_dependencies_(zone),
constant_(constant),
holder_(holder),
+ unrecorded_dependencies_(zone),
field_representation_(Representation::None()),
- field_type_(Type::Any()) {}
-
+ field_type_(Type::Any()),
+ dictionary_index_(InternalIndex::NotFound()),
+ name_(property_name) {
+ DCHECK_IMPLIES(kind == kDictionaryProtoAccessorConstant,
+ !property_name.is_null());
+}
PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, Representation field_representation,
@@ -193,18 +226,32 @@ PropertyAccessInfo::PropertyAccessInfo(
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
+ holder_(holder),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
transition_map_(transition_map),
- holder_(holder),
field_index_(field_index),
field_representation_(field_representation),
field_type_(field_type),
field_owner_map_(field_owner_map),
- field_map_(field_map) {
+ field_map_(field_map),
+ dictionary_index_(InternalIndex::NotFound()) {
DCHECK_IMPLIES(!transition_map.is_null(),
field_owner_map.address() == transition_map.address());
}
+PropertyAccessInfo::PropertyAccessInfo(
+ Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ InternalIndex dictionary_index, Handle<Name> name)
+ : kind_(kind),
+ lookup_start_object_maps_(lookup_start_object_maps),
+ holder_(holder),
+ unrecorded_dependencies_(zone),
+ field_representation_(Representation::None()),
+ field_type_(Type::Any()),
+ dictionary_index_(dictionary_index),
+ name_{name} {}
+
MinimorphicLoadPropertyAccessInfo::MinimorphicLoadPropertyAccessInfo(
Kind kind, int offset, bool is_inobject,
Representation field_representation, Type field_type)
@@ -224,7 +271,7 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
return that->kind_ == kInvalid;
case kDataField:
- case kDataConstant: {
+ case kFastDataConstant: {
// Check if we actually access the same field (we use the
// GetFieldAccessStubKey method here just like the ICs do
// since that way we only compare the relevant bits of the
@@ -278,7 +325,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
return false;
}
- case kAccessorConstant: {
+ case kDictionaryProtoAccessorConstant:
+ case kFastAccessorConstant: {
// Check if we actually access the same constant.
if (this->constant_.address() == that->constant_.address()) {
DCHECK(this->unrecorded_dependencies_.empty());
@@ -292,6 +340,18 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
return false;
}
+ case kDictionaryProtoDataConstant: {
+ DCHECK_EQ(AccessMode::kLoad, access_mode);
+ if (this->dictionary_index_ == that->dictionary_index_) {
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
+ return true;
+ }
+ return false;
+ }
+
case kNotFound:
case kStringLength: {
DCHECK(this->unrecorded_dependencies_.empty());
@@ -308,7 +368,7 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const {
- if (IsDataConstant()) {
+ if (IsFastDataConstant()) {
return ConstFieldInfo(field_owner_map_.ToHandleChecked());
}
return ConstFieldInfo::None();
@@ -367,7 +427,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
isolate());
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
@@ -449,7 +509,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
details_representation, field_type, field_owner_map, field_map,
holder);
case PropertyConstness::kConst:
- return PropertyAccessInfo::DataConstant(
+ return PropertyAccessInfo::FastDataConstant(
zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
details_representation, field_type, field_owner_map, field_map,
holder);
@@ -457,54 +517,57 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
UNREACHABLE();
}
-PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
- Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, InternalIndex descriptor,
- AccessMode access_mode) const {
- DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
- isolate());
- SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
+namespace {
+using AccessorsObjectGetter = std::function<Handle<Object>()>;
+
+PropertyAccessInfo AccessorAccessInfoHelper(
+ Isolate* isolate, Zone* zone, JSHeapBroker* broker,
+ const AccessInfoFactory* ai_factory, Handle<Map> receiver_map,
+ Handle<Name> name, Handle<Map> map, MaybeHandle<JSObject> holder,
+ AccessMode access_mode, AccessorsObjectGetter get_accessors) {
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(map->prototype_info()),
- isolate());
+ isolate);
Handle<JSModuleNamespace> module_namespace(
- JSModuleNamespace::cast(proto_info->module_namespace()), isolate());
+ JSModuleNamespace::cast(proto_info->module_namespace()), isolate);
Handle<Cell> cell(Cell::cast(module_namespace->module().exports().Lookup(
- isolate(), name, Smi::ToInt(name->GetHash()))),
- isolate());
- if (cell->value().IsTheHole(isolate())) {
+ isolate, name, Smi::ToInt(name->GetHash()))),
+ isolate);
+ if (cell->value().IsTheHole(isolate)) {
// This module has not been fully initialized yet.
- return PropertyAccessInfo::Invalid(zone());
+ return PropertyAccessInfo::Invalid(zone);
}
- return PropertyAccessInfo::ModuleExport(zone(), receiver_map, cell);
+ return PropertyAccessInfo::ModuleExport(zone, receiver_map, cell);
}
if (access_mode == AccessMode::kHas) {
+ // kHas is not supported for dictionary mode objects.
+ DCHECK(!map->is_dictionary_map());
+
// HasProperty checks don't call getter/setters, existence is sufficient.
- return PropertyAccessInfo::AccessorConstant(zone(), receiver_map,
- Handle<Object>(), holder);
+ return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map,
+ Handle<Object>(), holder);
}
- Handle<Object> accessors(descriptors->GetStrongValue(descriptor), isolate());
+ Handle<Object> accessors = get_accessors();
if (!accessors->IsAccessorPair()) {
- return PropertyAccessInfo::Invalid(zone());
+ return PropertyAccessInfo::Invalid(zone);
}
Handle<Object> accessor(access_mode == AccessMode::kLoad
? Handle<AccessorPair>::cast(accessors)->getter()
: Handle<AccessorPair>::cast(accessors)->setter(),
- isolate());
+ isolate);
if (!accessor->IsJSFunction()) {
- CallOptimization optimization(isolate(), accessor);
+ CallOptimization optimization(isolate, accessor);
if (!optimization.is_simple_api_call() ||
optimization.IsCrossContextLazyAccessorPair(
- *broker()->target_native_context().object(), *map)) {
- return PropertyAccessInfo::Invalid(zone());
+ *broker->target_native_context().object(), *map)) {
+ return PropertyAccessInfo::Invalid(zone);
}
CallOptimization::HolderLookup lookup;
holder = optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
if (lookup == CallOptimization::kHolderNotFound) {
- return PropertyAccessInfo::Invalid(zone());
+ return PropertyAccessInfo::Invalid(zone);
}
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
holder.is_null());
@@ -512,15 +575,66 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
}
if (access_mode == AccessMode::kLoad) {
Handle<Name> cached_property_name;
- if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), accessor)
+ if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate, accessor)
.ToHandle(&cached_property_name)) {
- PropertyAccessInfo access_info =
- ComputePropertyAccessInfo(map, cached_property_name, access_mode);
+ PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo(
+ map, cached_property_name, access_mode);
if (!access_info.IsInvalid()) return access_info;
}
}
- return PropertyAccessInfo::AccessorConstant(zone(), receiver_map, accessor,
- holder);
+ if (map->is_dictionary_map()) {
+ return PropertyAccessInfo::DictionaryProtoAccessorConstant(
+ zone, receiver_map, holder, accessor, name);
+ } else {
+ return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map,
+ accessor, holder);
+ }
+}
+
+} // namespace
+
+PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
+ Handle<Map> receiver_map, Handle<Name> name, Handle<Map> holder_map,
+ MaybeHandle<JSObject> holder, InternalIndex descriptor,
+ AccessMode access_mode) const {
+ DCHECK(descriptor.is_found());
+ Handle<DescriptorArray> descriptors(
+ holder_map->instance_descriptors(kRelaxedLoad), isolate());
+ SLOW_DCHECK(descriptor == descriptors->Search(*name, *holder_map));
+
+ auto get_accessors = [&]() {
+ return handle(descriptors->GetStrongValue(descriptor), isolate());
+ };
+ return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
+ receiver_map, name, holder_map, holder,
+ access_mode, get_accessors);
+}
+
+PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
+ Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder,
+ InternalIndex dictionary_index, AccessMode access_mode,
+ PropertyDetails details) const {
+ CHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+ DCHECK(holder->map().is_prototype_map());
+ DCHECK_EQ(access_mode, AccessMode::kLoad);
+
+ // We can only inline accesses to constant properties.
+ if (details.constness() != PropertyConstness::kConst) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
+
+ if (details.kind() == PropertyKind::kData) {
+ return PropertyAccessInfo::DictionaryProtoDataConstant(
+ zone(), receiver_map, holder, dictionary_index, name);
+ }
+
+ auto get_accessors = [&]() {
+ return JSObject::DictionaryPropertyAt(holder, dictionary_index);
+ };
+ Handle<Map> holder_map = handle(holder->map(), isolate());
+ return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
+ receiver_map, name, holder_map, holder,
+ access_mode, get_accessors);
}
MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
@@ -537,16 +651,64 @@ MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
field_rep, field_type);
}
+bool AccessInfoFactory::TryLoadPropertyDetails(
+ Handle<Map> map, MaybeHandle<JSObject> maybe_holder, Handle<Name> name,
+ InternalIndex* index_out, PropertyDetails* details_out) const {
+ if (map->is_dictionary_map()) {
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+ DCHECK(map->is_prototype_map());
+
+ DisallowGarbageCollection no_gc;
+
+ if (maybe_holder.is_null()) {
+ // TODO(v8:11457) In this situation, we have a dictionary mode prototype
+ // as a receiver. Consider other means of obtaining the holder in this
+ // situation.
+
+ // Without the holder, we can't get the property details.
+ return false;
+ }
+
+ Handle<JSObject> holder = maybe_holder.ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = holder->property_dictionary_swiss();
+ *index_out = dict.FindEntry(isolate(), name);
+ if (index_out->is_found()) {
+ *details_out = dict.DetailsAt(*index_out);
+ }
+ } else {
+ NameDictionary dict = holder->property_dictionary();
+ *index_out = dict.FindEntry(isolate(), name);
+ if (index_out->is_found()) {
+ *details_out = dict.DetailsAt(*index_out);
+ }
+ }
+ } else {
+ DescriptorArray descriptors = map->instance_descriptors(kAcquireLoad);
+ *index_out =
+ descriptors.Search(*name, *map, broker()->is_concurrent_inlining());
+ if (index_out->is_found()) {
+ *details_out = descriptors.GetDetails(*index_out);
+ }
+ }
+
+ return true;
+}
+
PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
CHECK(name->IsUniqueName());
+ base::SharedMutexGuardIf<base::kShared> mutex_guard(
+ isolate()->map_updater_access(), should_lock_mutex());
+ MapUpdaterMutexDepthScope mumd_scope(this);
+
if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
return PropertyAccessInfo::Invalid(zone());
}
// Check if it is safe to inline property access for the {map}.
- if (!CanInlinePropertyAccess(map)) {
+ if (!CanInlinePropertyAccess(map, access_mode)) {
return PropertyAccessInfo::Invalid(zone());
}
@@ -556,19 +718,25 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (!access_info.IsInvalid()) return access_info;
}
+ // Only relevant if V8_DICT_PROPERTY_CONST_TRACKING enabled.
+ bool dictionary_prototype_on_chain = false;
+ bool fast_mode_prototype_on_chain = false;
+
// Remember the receiver map. We use {map} as loop variable.
Handle<Map> receiver_map = map;
MaybeHandle<JSObject> holder;
while (true) {
- // Lookup the named property on the {map}.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kAcquireLoad),
- isolate());
- InternalIndex const number =
- descriptors->Search(*name, *map, broker()->is_concurrent_inlining());
- if (number.is_found()) {
- PropertyDetails const details = descriptors->GetDetails(number);
+ PropertyDetails details = PropertyDetails::Empty();
+ InternalIndex index = InternalIndex::NotFound();
+ if (!TryLoadPropertyDetails(map, holder, name, &index, &details)) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
+
+ if (index.is_found()) {
if (access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral) {
+ DCHECK(!map->is_dictionary_map());
+
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
return PropertyAccessInfo::Invalid(zone());
@@ -581,9 +749,43 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return LookupTransition(receiver_map, name, holder);
}
}
+ if (map->is_dictionary_map()) {
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+
+ if (fast_mode_prototype_on_chain) {
+ // TODO(v8:11248) While the work on dictionary mode prototypes is in
+ // progress, we may still see fast mode objects on the chain prior to
+ // reaching a dictionary mode prototype holding the property . Due to
+ // this only being an intermediate state, we don't stupport these kind
+ // of heterogenous prototype chains.
+ return PropertyAccessInfo::Invalid(zone());
+ }
+
+ // TryLoadPropertyDetails only succeeds if we know the holder.
+ return ComputeDictionaryProtoAccessInfo(receiver_map, name,
+ holder.ToHandleChecked(), index,
+ access_mode, details);
+ }
+ if (dictionary_prototype_on_chain) {
+ // If V8_DICT_PROPERTY_CONST_TRACKING_BOOL was disabled, then a
+ // dictionary prototype would have caused a bailout earlier.
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+
+ // TODO(v8:11248) We have a fast mode holder, but there was a dictionary
+ // mode prototype earlier on the chain. Note that seeing a fast mode
+ // prototype even though V8_DICT_PROPERTY_CONST_TRACKING is enabled
+ // should only be possible while the implementation of dictionary mode
+ // prototypes is work in progress. Eventually, enabling
+ // V8_DICT_PROPERTY_CONST_TRACKING will guarantee that all prototypes
+ // are always in dictionary mode, making this case unreachable. However,
+ // due to the complications of checking dictionary mode prototypes for
+ // modification, we don't attempt to support dictionary mode prototypes
+ // occuring before a fast mode holder on the chain.
+ return PropertyAccessInfo::Invalid(zone());
+ }
if (details.location() == kField) {
if (details.kind() == kData) {
- return ComputeDataFieldAccessInfo(receiver_map, map, holder, number,
+ return ComputeDataFieldAccessInfo(receiver_map, map, holder, index,
access_mode);
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -594,8 +796,9 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK_EQ(kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
- holder, number, access_mode);
+ holder, index, access_mode);
}
+
UNREACHABLE();
}
@@ -618,6 +821,17 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return PropertyAccessInfo::Invalid(zone());
}
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !holder.is_null()) {
+ // At this point, we are past the first loop iteration.
+ DCHECK(holder.ToHandleChecked()->map().is_prototype_map());
+ DCHECK_NE(holder.ToHandleChecked()->map(), *receiver_map);
+
+ fast_mode_prototype_on_chain =
+ fast_mode_prototype_on_chain || !map->is_dictionary_map();
+ dictionary_prototype_on_chain =
+ dictionary_prototype_on_chain || map->is_dictionary_map();
+ }
+
// Walk up the prototype chain.
MapRef(broker(), map).SerializePrototype();
// Acquire synchronously the map's prototype's map to guarantee that every
@@ -636,6 +850,15 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
handle(map->prototype().synchronized_map(), isolate());
DCHECK(map_prototype_map->IsJSObjectMap());
} else if (map->prototype().IsNull()) {
+ if (dictionary_prototype_on_chain) {
+ // TODO(v8:11248) See earlier comment about
+ // dictionary_prototype_on_chain. We don't support absent properties
+ // with dictionary mode prototypes on the chain, either. This is again
+ // just due to how we currently deal with dependencies for dictionary
+ // properties during finalization.
+ return PropertyAccessInfo::Invalid(zone());
+ }
+
// Store to property not found on the receiver or any prototype, we need
// to transition to a new data property.
// Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
@@ -655,14 +878,14 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
map = map_prototype_map;
CHECK(!map->is_deprecated());
- if (!CanInlinePropertyAccess(map)) {
+ if (!CanInlinePropertyAccess(map, access_mode)) {
return PropertyAccessInfo::Invalid(zone());
}
- // Successful lookup on prototype chain needs to guarantee that all
- // the prototypes up to the holder have stable maps. Let us make sure
- // the prototype maps are stable here.
- CHECK(map->is_stable());
+ // Successful lookup on prototype chain needs to guarantee that all the
+ // prototypes up to the holder have stable maps, except for dictionary-mode
+ // prototypes.
+ CHECK_IMPLIES(!map->is_dictionary_map(), map->is_stable());
}
UNREACHABLE();
}
@@ -894,14 +1117,14 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
- switch (details.constness()) {
+ switch (dependencies()->DependOnFieldConstness(transition_map_ref, number)) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
zone(), map, std::move(unrecorded_dependencies), field_index,
details_representation, field_type, transition_map, field_map, holder,
transition_map);
case PropertyConstness::kConst:
- return PropertyAccessInfo::DataConstant(
+ return PropertyAccessInfo::FastDataConstant(
zone(), map, std::move(unrecorded_dependencies), field_index,
details_representation, field_type, transition_map, field_map, holder,
transition_map);
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index aa402fe6951..b430dacd3ae 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -67,8 +67,10 @@ class PropertyAccessInfo final {
kInvalid,
kNotFound,
kDataField,
- kDataConstant,
- kAccessorConstant,
+ kFastDataConstant,
+ kDictionaryProtoDataConstant,
+ kFastAccessorConstant,
+ kDictionaryProtoAccessorConstant,
kModuleExport,
kStringLength
};
@@ -83,21 +85,27 @@ class PropertyAccessInfo final {
MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo DataConstant(
+ static PropertyAccessInfo FastDataConstant(
Zone* zone, Handle<Map> receiver_map,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo AccessorConstant(Zone* zone,
- Handle<Map> receiver_map,
- Handle<Object> constant,
- MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo FastAccessorConstant(Zone* zone,
+ Handle<Map> receiver_map,
+ Handle<Object> constant,
+ MaybeHandle<JSObject> holder);
static PropertyAccessInfo ModuleExport(Zone* zone, Handle<Map> receiver_map,
Handle<Cell> cell);
static PropertyAccessInfo StringLength(Zone* zone, Handle<Map> receiver_map);
static PropertyAccessInfo Invalid(Zone* zone);
+ static PropertyAccessInfo DictionaryProtoDataConstant(
+ Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder,
+ InternalIndex dict_index, Handle<Name> name);
+ static PropertyAccessInfo DictionaryProtoAccessorConstant(
+ Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder,
+ Handle<Object> constant, Handle<Name> name);
bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
Zone* zone) V8_WARN_UNUSED_RESULT;
@@ -107,12 +115,24 @@ class PropertyAccessInfo final {
bool IsInvalid() const { return kind() == kInvalid; }
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataField() const { return kind() == kDataField; }
- bool IsDataConstant() const { return kind() == kDataConstant; }
- bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
+ bool IsFastDataConstant() const { return kind() == kFastDataConstant; }
+ bool IsFastAccessorConstant() const {
+ return kind() == kFastAccessorConstant;
+ }
bool IsModuleExport() const { return kind() == kModuleExport; }
bool IsStringLength() const { return kind() == kStringLength; }
+ bool IsDictionaryProtoDataConstant() const {
+ return kind() == kDictionaryProtoDataConstant;
+ }
+ bool IsDictionaryProtoAccessorConstant() const {
+ return kind() == kDictionaryProtoAccessorConstant;
+ }
bool HasTransitionMap() const { return !transition_map().is_null(); }
+ bool HasDictionaryHolder() const {
+ return kind_ == kDictionaryProtoDataConstant ||
+ kind_ == kDictionaryProtoAccessorConstant;
+ }
ConstFieldInfo GetConstFieldInfo() const;
Kind kind() const { return kind_; }
@@ -122,22 +142,48 @@ class PropertyAccessInfo final {
// Find a more suitable place for it.
return holder_;
}
- MaybeHandle<Map> transition_map() const { return transition_map_; }
+ MaybeHandle<Map> transition_map() const {
+ DCHECK(!HasDictionaryHolder());
+ return transition_map_;
+ }
Handle<Object> constant() const { return constant_; }
- FieldIndex field_index() const { return field_index_; }
- Type field_type() const { return field_type_; }
- Representation field_representation() const { return field_representation_; }
- MaybeHandle<Map> field_map() const { return field_map_; }
+ FieldIndex field_index() const {
+ DCHECK(!HasDictionaryHolder());
+ return field_index_;
+ }
+
+ Type field_type() const {
+ DCHECK(!HasDictionaryHolder());
+ return field_type_;
+ }
+ Representation field_representation() const {
+ DCHECK(!HasDictionaryHolder());
+ return field_representation_;
+ }
+ MaybeHandle<Map> field_map() const {
+ DCHECK(!HasDictionaryHolder());
+ return field_map_;
+ }
ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
return lookup_start_object_maps_;
}
+ InternalIndex dictionary_index() const {
+ DCHECK(HasDictionaryHolder());
+ return dictionary_index_;
+ }
+
+ Handle<Name> name() const {
+ DCHECK(HasDictionaryHolder());
+ return name_.ToHandleChecked();
+ }
+
private:
explicit PropertyAccessInfo(Zone* zone);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant,
+ Handle<Object> constant, MaybeHandle<Name> name,
ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
@@ -145,18 +191,28 @@ class PropertyAccessInfo final {
Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& dependencies);
+ PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ InternalIndex dictionary_index, Handle<Name> name);
+ // Members used for fast and dictionary mode holders:
Kind kind_;
ZoneVector<Handle<Map>> lookup_start_object_maps_;
- ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
- MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
+
+ // Members only used for fast mode holders:
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
+ MaybeHandle<Map> transition_map_;
FieldIndex field_index_;
Representation field_representation_;
Type field_type_;
MaybeHandle<Map> field_owner_map_;
MaybeHandle<Map> field_map_;
+
+ // Members only used for dictionary mode holders:
+ InternalIndex dictionary_index_;
+ MaybeHandle<Name> name_;
};
// This class encapsulates information required to generate load properties
@@ -205,6 +261,11 @@ class AccessInfoFactory final {
Handle<Name> name,
AccessMode access_mode) const;
+ PropertyAccessInfo ComputeDictionaryProtoAccessInfo(
+ Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder,
+ InternalIndex dict_index, AccessMode access_mode,
+ PropertyDetails details) const;
+
MinimorphicLoadPropertyAccessInfo ComputePropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback) const;
@@ -248,6 +309,32 @@ class AccessInfoFactory final {
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const;
+ bool TryLoadPropertyDetails(Handle<Map> map, MaybeHandle<JSObject> holder,
+ Handle<Name> name, InternalIndex* index_out,
+ PropertyDetails* details_out) const;
+
+ bool should_lock_mutex() const { return map_updater_mutex_depth_ == 0; }
+
+ class MapUpdaterMutexDepthScope final {
+ public:
+ explicit MapUpdaterMutexDepthScope(const AccessInfoFactory* ptr)
+ : ptr_(ptr),
+ initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_) {
+ ptr_->map_updater_mutex_depth_++;
+ }
+
+ ~MapUpdaterMutexDepthScope() {
+ ptr_->map_updater_mutex_depth_--;
+ DCHECK_EQ(initial_map_updater_mutex_depth_,
+ ptr_->map_updater_mutex_depth_);
+ USE(initial_map_updater_mutex_depth_);
+ }
+
+ private:
+ const AccessInfoFactory* const ptr_;
+ const int initial_map_updater_mutex_depth_;
+ };
+
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
@@ -258,6 +345,12 @@ class AccessInfoFactory final {
TypeCache const* const type_cache_;
Zone* const zone_;
+ // ComputePropertyAccessInfo can be called recursively, thus we need to
+ // emulate a recursive mutex. This field holds the locking depth, i.e. how
+ // many times the mutex has been recursively locked. Only the outermost
+ // locker actually locks underneath.
+ mutable int map_updater_mutex_depth_ = 0;
+
// TODO(nicohartmann@): Move to public
AccessInfoFactory(const AccessInfoFactory&) = delete;
AccessInfoFactory& operator=(const AccessInfoFactory&) = delete;
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index d243c07790c..dba31fe0bc3 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -15,8 +15,11 @@
#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
#include "src/utils/boxed-float.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -38,6 +41,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
+ case kFlags_select:
return SetCC;
case kFlags_none:
return LeaveCC;
@@ -120,11 +124,12 @@ class ArmOperandConverter final : public InstructionOperandConverter {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
+#if V8_ENABLE_WEBASSEMBLY
if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
- } else {
- return Operand(constant.ToInt32());
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
@@ -180,10 +185,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
offset_(offset),
value_(value),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -203,9 +211,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
__ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode);
@@ -221,7 +231,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Operand const offset_;
Register const value_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
@@ -479,24 +491,6 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
} \
} while (0)
-#define ASSEMBLE_NEON_PAIRWISE_OP(op, size) \
- do { \
- Simd128Register dst = i.OutputSimd128Register(), \
- src0 = i.InputSimd128Register(0), \
- src1 = i.InputSimd128Register(1); \
- if (dst == src0) { \
- __ op(size, dst.low(), src0.low(), src0.high()); \
- if (dst == src1) { \
- __ vmov(dst.high(), dst.low()); \
- } else { \
- __ op(size, dst.high(), src1.low(), src1.high()); \
- } \
- } else { \
- __ op(size, dst.high(), src1.low(), src1.high()); \
- __ op(size, dst.low(), src0.low(), src0.high()); \
- } \
- } while (0)
-
#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
do { \
__ op(i.OutputSimd128Register().low(), i.InputSimd128Register(0).low(), \
@@ -629,13 +623,13 @@ bool VerifyOutputOfAtomicPairInstr(ArmOperandConverter* converter,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
+ first_unused_slot_offset)) {
ArmOperandConverter g(this, instr);
ZoneVector<Register> pending_pushes(zone());
for (auto move : pushes) {
@@ -661,13 +655,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, nullptr, false);
+ first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -749,6 +743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -762,15 +757,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObject: {
+ case kArchTailCallWasm: {
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Jump(wasm_code, constant.rmode());
} else {
- Register reg = i.InputRegister(0);
- DCHECK_IMPLIES(
- instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
- reg == kJavaScriptCallCodeStartRegister);
- __ JumpCodeObject(reg);
+ __ Jump(i.InputRegister(0));
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -778,13 +771,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
- Constant constant = i.ToConstant(instr->InputAt(0));
- Address wasm_code = static_cast<Address>(constant.ToInt32());
- __ Jump(wasm_code, constant.rmode());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -859,10 +855,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
__ str(pc, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -870,9 +868,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1772,9 +1772,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Slot-sized arguments are never padded but there may be a gap if
// the slot allocator reclaimed other padding slots. Adjust the stack
// here to skip any gap.
- if (slots > pushed_slots) {
- __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
- }
+ __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
switch (rep) {
case MachineRepresentation::kFloat32:
__ vpush(i.InputFloatRegister(1));
@@ -2068,24 +2066,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2ConvertLowI32x4S: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(0);
- __ vcvt_f64_s32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
- __ vcvt_f64_s32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ __ F64x2ConvertLowI32x4S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kArmF64x2ConvertLowI32x4U: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(0);
- __ vcvt_f64_u32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
- __ vcvt_f64_u32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kArmF64x2PromoteLowF32x4: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(0);
- __ vcvt_f64_f32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
- __ vcvt_f64_f32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ __ F64x2PromoteLowF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kArmI64x2SplatI32Pair: {
@@ -2159,13 +2151,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Abs: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
- Simd128Register tmp = temps.AcquireQ();
- __ vshr(NeonS64, tmp, src, 63);
- __ veor(dst, src, tmp);
- __ vsub(Neon64, dst, dst, tmp);
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmI64x2Neg: {
@@ -2272,24 +2258,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmF32x4AddHoriz: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Make sure we don't overwrite source data before it's used.
- if (dst == src0) {
- __ vpadd(dst.low(), src0.low(), src0.high());
- if (dst == src1) {
- __ vmov(dst.high(), dst.low());
- } else {
- __ vpadd(dst.high(), src1.low(), src1.high());
- }
- } else {
- __ vpadd(dst.high(), src1.low(), src1.high());
- __ vpadd(dst.low(), src0.low(), src0.high());
- }
- break;
- }
case kArmF32x4Sub: {
__ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2426,9 +2394,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI32x4AddHoriz:
- ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon32);
- break;
case kArmI32x4Sub: {
__ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2455,14 +2420,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Ne: {
- Simd128Register dst = i.OutputSimd128Register();
- UseScratchRegisterScope temps(tasm());
- Simd128Register tmp = temps.AcquireQ();
- __ vceq(Neon32, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vrev64(Neon32, tmp, dst);
- __ vand(dst, dst, tmp);
- __ vmvn(dst, dst);
+ __ I64x2Ne(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI64x2GtS: {
@@ -2640,9 +2599,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8AddHoriz:
- ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon16);
- break;
case kArmI16x8Sub: {
__ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2824,11 +2780,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16Mul: {
- __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kArmI8x16MinS: {
__ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2927,19 +2878,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmov(NeonU16, dst, tmp.low(), 0);
break;
}
- case kArmSignSelect: {
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register mask = i.InputSimd128Register(2);
- auto sz = static_cast<NeonSize>(MiscField::decode(instr->opcode()));
- if (Neon64 == sz) {
- // vclt does not support Neon64.
- __ vshr(NeonS64, dst, mask, 63);
- } else {
- __ vclt(sz, dst, mask, 0);
- }
- __ vbsl(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
- break;
- }
case kArmS128Const: {
QwNeonRegister dst = i.OutputSimd128Register();
uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
@@ -3287,11 +3225,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmV64x2AllTrue: {
- __ V64x2AllTrue(i.OutputRegister(), i.InputSimd128Register(0));
+ case kArmI64x2AllTrue: {
+ __ I64x2AllTrue(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
- case kArmV32x4AllTrue: {
+ case kArmI32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3302,7 +3240,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmV16x8AllTrue: {
+ case kArmI16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3314,7 +3252,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmV8x16AllTrue: {
+ case kArmI8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3634,7 +3572,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_NEON_NARROWING_OP
-#undef ASSEMBLE_NEON_PAIRWISE_OP
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
}
@@ -3674,6 +3611,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -3700,8 +3638,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -3727,6 +3664,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
Condition cc = FlagsConditionToCondition(condition);
__ b(cc, tlabel);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -3769,6 +3707,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
@@ -3797,10 +3740,15 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ StubPrologue(StackFrame::C_WASM_ENTRY);
// Reserve stack space for saving the c_entry_fp later.
__ AllocateStackSpace(kSystemPointerSize);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ Push(lr, fp);
__ mov(fp, sp);
@@ -3809,6 +3757,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue();
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -3827,6 +3776,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AllocateStackSpace(kSystemPointerSize);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -3854,6 +3804,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -3886,6 +3837,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
required_slots -= base::bits::CountPopulation(saves);
@@ -3912,10 +3864,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
const int returns = frame()->GetReturnSlotCount();
- if (returns != 0) {
- // Create space for returns.
- __ AllocateStackSpace(returns * kSystemPointerSize);
- }
+ // Create space for returns.
+ __ AllocateStackSpace(returns * kSystemPointerSize);
}
void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
@@ -3948,12 +3898,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// We might need r3 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r3.bit());
ArmOperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {additional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {additional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -3964,12 +3914,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = r3;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -3993,23 +3943,23 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ add(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
- __ cmp(argc_reg, Operand(parameter_count));
- __ mov(argc_reg, Operand(parameter_count), LeaveCC, lt);
+ if (parameter_slots > 1) {
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
}
__ Drop(argc_reg);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
- } else if (parameter_count == 0) {
+ __ Drop(parameter_slots + additional_count);
+ } else if (parameter_slots == 0) {
__ Drop(g.ToRegister(additional_pop_count));
} else {
- // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
// 0}. Check RawMachineAssembler::PopAndReturn.
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
}
__ Ret();
}
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 416f8a564a7..8cec45cd0c7 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -168,7 +168,6 @@ namespace compiler {
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
- V(ArmF32x4AddHoriz) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Div) \
@@ -210,7 +209,6 @@ namespace compiler {
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
- V(ArmI32x4AddHoriz) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
@@ -243,7 +241,6 @@ namespace compiler {
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
V(ArmI16x8AddSatS) \
- V(ArmI16x8AddHoriz) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSatS) \
V(ArmI16x8Mul) \
@@ -279,7 +276,6 @@ namespace compiler {
V(ArmI8x16AddSatS) \
V(ArmI8x16Sub) \
V(ArmI8x16SubSatS) \
- V(ArmI8x16Mul) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
@@ -298,7 +294,6 @@ namespace compiler {
V(ArmI8x16RoundingAverageU) \
V(ArmI8x16Abs) \
V(ArmI8x16BitMask) \
- V(ArmSignSelect) \
V(ArmS128Const) \
V(ArmS128Zero) \
V(ArmS128AllOnes) \
@@ -337,11 +332,11 @@ namespace compiler {
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
- V(ArmV64x2AllTrue) \
- V(ArmV32x4AllTrue) \
- V(ArmV16x8AllTrue) \
+ V(ArmI64x2AllTrue) \
+ V(ArmI32x4AllTrue) \
+ V(ArmI16x8AllTrue) \
V(ArmV128AnyTrue) \
- V(ArmV8x16AllTrue) \
+ V(ArmI8x16AllTrue) \
V(ArmS128Load8Splat) \
V(ArmS128Load16Splat) \
V(ArmS128Load32Splat) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index b82369e7638..cb5cd568b0b 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -148,7 +148,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
- case kArmF32x4AddHoriz:
case kArmF32x4Sub:
case kArmF32x4Mul:
case kArmF32x4Div:
@@ -190,7 +189,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Shl:
case kArmI32x4ShrS:
case kArmI32x4Add:
- case kArmI32x4AddHoriz:
case kArmI32x4Sub:
case kArmI32x4Mul:
case kArmI32x4MinS:
@@ -223,7 +221,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8SConvertI32x4:
case kArmI16x8Add:
case kArmI16x8AddSatS:
- case kArmI16x8AddHoriz:
case kArmI16x8Sub:
case kArmI16x8SubSatS:
case kArmI16x8Mul:
@@ -259,7 +256,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16AddSatS:
case kArmI8x16Sub:
case kArmI8x16SubSatS:
- case kArmI8x16Mul:
case kArmI8x16MinS:
case kArmI8x16MaxS:
case kArmI8x16Eq:
@@ -278,7 +274,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16RoundingAverageU:
case kArmI8x16Abs:
case kArmI8x16BitMask:
- case kArmSignSelect:
case kArmS128Const:
case kArmS128Zero:
case kArmS128AllOnes:
@@ -317,11 +312,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x8Reverse:
case kArmS8x4Reverse:
case kArmS8x2Reverse:
- case kArmV64x2AllTrue:
- case kArmV32x4AllTrue:
- case kArmV16x8AllTrue:
+ case kArmI64x2AllTrue:
+ case kArmI32x4AllTrue:
+ case kArmI16x8AllTrue:
case kArmV128AnyTrue:
- case kArmV8x16AllTrue:
+ case kArmI8x16AllTrue:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 3f15323297b..d28ada322d4 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -112,6 +112,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
}
}
+#if V8_ENABLE_WEBASSEMBLY
void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
@@ -132,6 +133,7 @@ void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
+#endif // V8_ENABLE_WEBASSEMBLY
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
@@ -148,14 +150,6 @@ void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseUniqueRegister(node->InputAt(1)));
}
-void VisitRRRR(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
- ArmOperandGenerator g(selector);
- selector->Emit(
- opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
-}
-
template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
AddressingMode kImmMode, AddressingMode kRegMode>
bool TryMatchShift(InstructionSelector* selector,
@@ -496,7 +490,7 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -2594,11 +2588,11 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16Abs, kArmI8x16Abs) \
V(I8x16Popcnt, kArmVcnt) \
V(S128Not, kArmS128Not) \
- V(V64x2AllTrue, kArmV64x2AllTrue) \
- V(V32x4AllTrue, kArmV32x4AllTrue) \
- V(V16x8AllTrue, kArmV16x8AllTrue) \
+ V(I64x2AllTrue, kArmI64x2AllTrue) \
+ V(I32x4AllTrue, kArmI32x4AllTrue) \
+ V(I16x8AllTrue, kArmI16x8AllTrue) \
V(V128AnyTrue, kArmV128AnyTrue) \
- V(V8x16AllTrue, kArmV8x16AllTrue)
+ V(I8x16AllTrue, kArmI8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -2626,7 +2620,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F64x2Lt, kArmF64x2Lt) \
V(F64x2Le, kArmF64x2Le) \
V(F32x4Add, kArmF32x4Add) \
- V(F32x4AddHoriz, kArmF32x4AddHoriz) \
V(F32x4Sub, kArmF32x4Sub) \
V(F32x4Mul, kArmF32x4Mul) \
V(F32x4Min, kArmF32x4Min) \
@@ -2638,7 +2631,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I64x2Add, kArmI64x2Add) \
V(I64x2Sub, kArmI64x2Sub) \
V(I32x4Add, kArmI32x4Add) \
- V(I32x4AddHoriz, kArmI32x4AddHoriz) \
V(I32x4Sub, kArmI32x4Sub) \
V(I32x4Mul, kArmI32x4Mul) \
V(I32x4MinS, kArmI32x4MinS) \
@@ -2658,7 +2650,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
V(I16x8Add, kArmI16x8Add) \
V(I16x8AddSatS, kArmI16x8AddSatS) \
- V(I16x8AddHoriz, kArmI16x8AddHoriz) \
V(I16x8Sub, kArmI16x8Sub) \
V(I16x8SubSatS, kArmI16x8SubSatS) \
V(I16x8Mul, kArmI16x8Mul) \
@@ -2682,7 +2673,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16AddSatS, kArmI8x16AddSatS) \
V(I8x16Sub, kArmI8x16Sub) \
V(I8x16SubSatS, kArmI8x16SubSatS) \
- V(I8x16Mul, kArmI8x16Mul) \
V(I8x16MinS, kArmI8x16MinS) \
V(I8x16MaxS, kArmI8x16MaxS) \
V(I8x16Eq, kArmI8x16Eq) \
@@ -2842,6 +2832,7 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
struct ShuffleEntry {
@@ -2988,6 +2979,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
ArmOperandGenerator g(this);
@@ -3113,18 +3107,6 @@ VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, NeonS16)
VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, NeonU16)
#undef VISIT_EXTADD_PAIRWISE
-#define VISIT_SIGN_SELECT(OPCODE, SIZE) \
- void InstructionSelector::Visit##OPCODE(Node* node) { \
- InstructionCode opcode = kArmSignSelect; \
- opcode |= MiscField::encode(SIZE); \
- VisitRRRR(this, opcode, node); \
- }
-
-VISIT_SIGN_SELECT(I8x16SignSelect, Neon8)
-VISIT_SIGN_SELECT(I16x8SignSelect, Neon16)
-VISIT_SIGN_SELECT(I32x4SignSelect, Neon32)
-VISIT_SIGN_SELECT(I64x2SignSelect, Neon64)
-
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
ArmOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 5b9c2e4d4fb..91b8f1f04b1 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -12,8 +12,11 @@
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -212,11 +215,12 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kInt64:
+#if V8_ENABLE_WEBASSEMBLY
if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt64(), constant.rmode());
- } else {
- return Operand(constant.ToInt64());
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ return Operand(constant.ToInt64());
case Constant::kFloat32:
return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
case Constant::kFloat64:
@@ -269,10 +273,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
offset_(offset),
value_(value),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -295,12 +302,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode);
@@ -316,7 +325,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Operand const offset_;
Register const value_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
@@ -375,6 +386,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+#if V8_ENABLE_WEBASSEMBLY
class WasmOutOfLineTrap : public OutOfLineCode {
public:
WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
@@ -393,14 +405,14 @@ class WasmOutOfLineTrap : public OutOfLineCode {
private:
void GenerateCallToTrap(TrapId trap_id) {
- if (trap_id == TrapId::kInvalid) {
+ if (!gen_->wasm_runtime_exception_support()) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
@@ -426,6 +438,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
: WasmOutOfLineTrap(gen, instr), pc_(pc) {}
void Generate() override {
+ DCHECK(FLAG_wasm_bounds_checks && FLAG_wasm_trap_handler);
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
}
@@ -436,12 +449,17 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(AccessModeField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
}
+#else
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr, int pc) {
+ DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+}
+#endif // V8_ENABLE_WEBASSEMBLY
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
@@ -648,21 +666,21 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
- DCHECK_EQ(first_unused_stack_slot % 2, 0);
+ int first_unused_slot_offset) {
+ DCHECK_EQ(first_unused_slot_offset % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
DCHECK(instr->IsTailCall());
InstructionOperandConverter g(this, instr);
- int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
- if (optional_padding_slot % 2) {
- __ Poke(padreg, optional_padding_slot * kSystemPointerSize);
+ int optional_padding_offset = g.InputInt32(instr->InputCount() - 2);
+ if (optional_padding_offset % 2) {
+ __ Poke(padreg, optional_padding_offset * kSystemPointerSize);
}
}
@@ -749,6 +767,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -762,21 +781,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObject: {
- if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- DCHECK_IMPLIES(
- instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
- reg == kJavaScriptCallCodeStartRegister);
- __ JumpCodeObject(reg);
- }
- unwinding_info_writer_.MarkBlockWillExit();
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
case kArchTailCallWasm: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -794,6 +798,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
@@ -867,10 +887,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
Label return_location;
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
__ StoreReturnAddressInWasmExitFrame(&return_location);
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
@@ -880,9 +902,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters, 0);
}
__ Bind(&return_location);
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1478,10 +1502,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArm64Prfm: {
- __ prfm(MiscField::decode(opcode), i.MemoryOperand(0));
- break;
- }
case kArm64Clz:
__ Clz(i.OutputRegister64(), i.InputRegister64(0));
break;
@@ -1553,6 +1573,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
+ case kArm64Float32Abd:
+ __ Fabd(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
case kArm64Float32Neg:
__ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
@@ -1623,6 +1647,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float64Abs:
__ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float64Abd:
+ __ Fabd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kArm64Float64Neg:
__ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -2168,13 +2196,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
- SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S);
SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
+ case kArm64F32x4MulElement: {
+ __ Fmul(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputSimd128Register(1).S(), i.InputInt8(2));
+ break;
+ }
+ case kArm64F64x2MulElement: {
+ __ Fmul(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
+ i.InputSimd128Register(1).D(), i.InputInt8(2));
+ break;
+ }
case kArm64F32x4Ne: {
VRegister dst = i.OutputSimd128Register().V4S();
__ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
@@ -2351,7 +2388,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
- SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S);
SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
@@ -2453,7 +2489,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
@@ -2564,7 +2599,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
@@ -2623,14 +2657,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V8H(), 0);
break;
}
- case kArm64SignSelect: {
- VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
- __ Cmlt(i.OutputSimd128Register().Format(f),
- i.InputSimd128Register(2).Format(f), 0);
- __ Bsl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
- i.InputSimd128Register(1).V16B());
- break;
- }
case kArm64S128Const: {
uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
@@ -2815,8 +2841,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64V64x2AllTrue: {
- __ V64x2AllTrue(i.OutputRegister32(), i.InputSimd128Register(0));
+ case kArm64I64x2AllTrue: {
+ __ I64x2AllTrue(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
@@ -2831,9 +2857,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
// For AnyTrue, the format does not matter.
SIMD_REDUCE_OP_CASE(kArm64V128AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64V32x4AllTrue, Uminv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64V16x8AllTrue, Uminv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64V8x16AllTrue, Uminv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64I32x4AllTrue, Uminv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64I16x8AllTrue, Uminv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64I8x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2930,6 +2956,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
auto ool = zone()->New<WasmOutOfLineTrap>(this, instr);
@@ -2937,6 +2964,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
Condition cc = FlagsConditionToCondition(condition);
__ B(cc, tlabel);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2951,6 +2979,29 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Cset(reg, cc);
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ Arm64OperandConverter i(this, instr);
+ MachineRepresentation rep =
+ LocationOperand::cast(instr->OutputAt(0))->representation();
+ Condition cc = FlagsConditionToCondition(condition);
+ // We don't now how many inputs were consumed by the condition, so we have to
+ // calculate the indices of the last two inputs.
+ DCHECK_GE(instr->InputCount(), 2);
+ size_t true_value_index = instr->InputCount() - 2;
+ size_t false_value_index = instr->InputCount() - 1;
+ if (rep == MachineRepresentation::kFloat32) {
+ __ Fcsel(i.OutputFloat32Register(),
+ i.InputFloat32Register(true_value_index),
+ i.InputFloat32Register(false_value_index), cc);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ Fcsel(i.OutputFloat64Register(),
+ i.InputFloat64Register(true_value_index),
+ i.InputFloat64Register(false_value_index), cc);
+ }
+}
+
void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
Register input = i.InputRegister32(0);
@@ -2991,7 +3042,6 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- frame->AlignFrame(16);
auto call_descriptor = linkage()->GetIncomingDescriptor();
// Save FP registers.
@@ -3000,7 +3050,6 @@ void CodeGenerator::FinishFrame(Frame* frame) {
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
- DCHECK_EQ(saved_count % 2, 0);
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kSystemPointerSize));
}
@@ -3009,9 +3058,9 @@ void CodeGenerator::FinishFrame(Frame* frame) {
call_descriptor->CalleeSavedRegisters());
saved_count = saves.Count();
if (saved_count != 0) {
- DCHECK_EQ(saved_count % 2, 0);
frame->AllocateSavedCalleeRegisterSlots(saved_count);
}
+ frame->AlignFrame(16);
}
void CodeGenerator::AssembleConstructFrame() {
@@ -3029,9 +3078,9 @@ void CodeGenerator::AssembleConstructFrame() {
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
call_descriptor->CalleeSavedFPRegisters());
DCHECK_EQ(saves_fp.Count() % 2, 0);
- // The number of slots for returns has to be even to ensure the correct stack
- // alignment.
- const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
+ // The number of return slots should be even after aligning the Frame.
+ const int returns = frame()->GetReturnSlotCount();
+ DCHECK_EQ(returns % 2, 0);
if (frame_access_state()->has_frame()) {
// Link the frame
@@ -3059,6 +3108,7 @@ void CodeGenerator::AssembleConstructFrame() {
// to allocate the remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ __ CodeEntry();
size_t unoptimized_frame_slots = osr_helper()->UnoptimizedFrameSlots();
DCHECK(call_descriptor->IsJSFunctionCall());
DCHECK_EQ(unoptimized_frame_slots % 2, 1);
@@ -3069,6 +3119,7 @@ void CodeGenerator::AssembleConstructFrame() {
ResetSpeculationPoison();
}
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -3108,6 +3159,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved slots, which are pushed below.
required_slots -= saves.Count();
@@ -3136,6 +3188,7 @@ void CodeGenerator::AssembleConstructFrame() {
DCHECK_GE(required_slots, 1);
__ Claim(required_slots - 1);
} break;
+#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
@@ -3163,7 +3216,9 @@ void CodeGenerator::AssembleConstructFrame() {
: 1; // C-API function: PC.
__ Claim(required_slots + extra_slots);
} break;
+#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallAddress:
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
@@ -3171,6 +3226,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(scratch, padreg);
// The additional slot will be used for the saved c_entry_fp.
}
+#endif // V8_ENABLE_WEBASSEMBLY
__ Claim(required_slots);
break;
default:
@@ -3215,13 +3271,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// We might need x3 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & x3.bit());
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
Arm64OperandConverter g(this, nullptr);
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -3232,12 +3288,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = x3;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -3261,25 +3317,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
__ Add(argc_reg, argc_reg, 1); // Consider the receiver.
- if (parameter_count > 1) {
- __ Cmp(argc_reg, Operand(parameter_count));
+ if (parameter_slots > 1) {
+ __ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
- __ Mov(argc_reg, Operand(parameter_count));
+ __ Mov(argc_reg, Operand(parameter_slots));
__ Bind(&argc_reg_has_final_count);
}
__ DropArguments(argc_reg);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ DropArguments(parameter_count + additional_count);
- } else if (parameter_count == 0) {
+ __ DropArguments(parameter_slots + additional_count);
+ } else if (parameter_slots == 0) {
__ DropArguments(g.ToRegister(additional_pop_count));
} else {
- // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
// 0}. Check RawMachineAssembler::PopAndReturn.
- __ DropArguments(parameter_count);
+ __ DropArguments(parameter_slots);
}
__ AssertSpAligned();
__ Ret();
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index ee2c20372ed..7c376168425 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -93,13 +93,13 @@ namespace compiler {
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Peek) \
- V(Arm64Prfm) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32Fnmul) \
@@ -115,6 +115,7 @@ namespace compiler {
V(Arm64Float64Max) \
V(Arm64Float64Min) \
V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64Fnmul) \
@@ -187,6 +188,7 @@ namespace compiler {
V(Arm64F64x2Add) \
V(Arm64F64x2Sub) \
V(Arm64F64x2Mul) \
+ V(Arm64F64x2MulElement) \
V(Arm64F64x2Div) \
V(Arm64F64x2Min) \
V(Arm64F64x2Max) \
@@ -212,9 +214,9 @@ namespace compiler {
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \
- V(Arm64F32x4AddHoriz) \
V(Arm64F32x4Sub) \
V(Arm64F32x4Mul) \
+ V(Arm64F32x4MulElement) \
V(Arm64F32x4Div) \
V(Arm64F32x4Min) \
V(Arm64F32x4Max) \
@@ -251,7 +253,6 @@ namespace compiler {
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
V(Arm64I32x4Add) \
- V(Arm64I32x4AddHoriz) \
V(Arm64I32x4Sub) \
V(Arm64I32x4Mul) \
V(Arm64I32x4Mla) \
@@ -283,7 +284,6 @@ namespace compiler {
V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \
V(Arm64I16x8AddSatS) \
- V(Arm64I16x8AddHoriz) \
V(Arm64I16x8Sub) \
V(Arm64I16x8SubSatS) \
V(Arm64I16x8Mul) \
@@ -319,7 +319,6 @@ namespace compiler {
V(Arm64I8x16AddSatS) \
V(Arm64I8x16Sub) \
V(Arm64I8x16SubSatS) \
- V(Arm64I8x16Mul) \
V(Arm64I8x16Mla) \
V(Arm64I8x16Mls) \
V(Arm64I8x16MinS) \
@@ -339,7 +338,6 @@ namespace compiler {
V(Arm64I8x16RoundingAverageU) \
V(Arm64I8x16Abs) \
V(Arm64I8x16BitMask) \
- V(Arm64SignSelect) \
V(Arm64S128Const) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
@@ -378,10 +376,10 @@ namespace compiler {
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
- V(Arm64V64x2AllTrue) \
- V(Arm64V32x4AllTrue) \
- V(Arm64V16x8AllTrue) \
- V(Arm64V8x16AllTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
V(Arm64LoadSplat) \
V(Arm64LoadLane) \
V(Arm64StoreLane) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index a384a844793..520db21dde0 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -92,6 +92,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float32Mul:
case kArm64Float32Div:
case kArm64Float32Abs:
+ case kArm64Float32Abd:
case kArm64Float32Neg:
case kArm64Float32Sqrt:
case kArm64Float32Fnmul:
@@ -106,6 +107,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64Max:
case kArm64Float64Min:
case kArm64Float64Abs:
+ case kArm64Float64Abd:
case kArm64Float64Neg:
case kArm64Float64Sqrt:
case kArm64Float64Fnmul:
@@ -152,6 +154,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Add:
case kArm64F64x2Sub:
case kArm64F64x2Mul:
+ case kArm64F64x2MulElement:
case kArm64F64x2Div:
case kArm64F64x2Min:
case kArm64F64x2Max:
@@ -177,9 +180,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
case kArm64F32x4Add:
- case kArm64F32x4AddHoriz:
case kArm64F32x4Sub:
case kArm64F32x4Mul:
+ case kArm64F32x4MulElement:
case kArm64F32x4Div:
case kArm64F32x4Min:
case kArm64F32x4Max:
@@ -220,7 +223,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
case kArm64I32x4Add:
- case kArm64I32x4AddHoriz:
case kArm64I32x4Sub:
case kArm64I32x4Mul:
case kArm64I32x4Mla:
@@ -252,7 +254,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8SConvertI32x4:
case kArm64I16x8Add:
case kArm64I16x8AddSatS:
- case kArm64I16x8AddHoriz:
case kArm64I16x8Sub:
case kArm64I16x8SubSatS:
case kArm64I16x8Mul:
@@ -288,7 +289,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16AddSatS:
case kArm64I8x16Sub:
case kArm64I8x16SubSatS:
- case kArm64I8x16Mul:
case kArm64I8x16Mla:
case kArm64I8x16Mls:
case kArm64I8x16MinS:
@@ -308,7 +308,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16RoundingAverageU:
case kArm64I8x16Abs:
case kArm64I8x16BitMask:
- case kArm64SignSelect:
case kArm64S128Const:
case kArm64S128Zero:
case kArm64S128Dup:
@@ -347,10 +346,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
case kArm64V128AnyTrue:
- case kArm64V64x2AllTrue:
- case kArm64V32x4AllTrue:
- case kArm64V16x8AllTrue:
- case kArm64V8x16AllTrue:
+ case kArm64I64x2AllTrue:
+ case kArm64I32x4AllTrue:
+ case kArm64I16x8AllTrue:
+ case kArm64I8x16AllTrue:
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
@@ -394,7 +393,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
- case kArm64Prfm:
case kArm64StoreLane:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 091272ac4e2..d9e388b4b5e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -212,14 +213,6 @@ void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseUniqueRegister(node->InputAt(1)));
}
-void VisitRRRR(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
- Arm64OperandGenerator g(selector);
- selector->Emit(
- opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
-}
-
struct ExtendingLoadMatcher {
ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
: matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
@@ -459,7 +452,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- InstructionOperand inputs[3];
+ InstructionOperand inputs[5];
size_t input_count = 0;
InstructionOperand outputs[1];
size_t output_count = 0;
@@ -514,6 +507,11 @@ void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(node);
}
+ if (cont->IsSelect()) {
+ inputs[input_count++] = g.UseRegister(cont->true_value());
+ inputs[input_count++] = g.UseRegister(cont->false_value());
+ }
+
DCHECK_NE(0u, input_count);
DCHECK((output_count != 0) || IsComparisonField::decode(properties));
DCHECK_GE(arraysize(inputs), input_count);
@@ -566,7 +564,7 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -629,26 +627,6 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
}
-void InstructionSelector::VisitPrefetchTemporal(Node* node) {
- Arm64OperandGenerator g(this);
- InstructionOperand inputs[2] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1))};
- InstructionCode opcode = kArm64Prfm;
- opcode |= AddressingModeField::encode(kMode_MRR);
- opcode |= MiscField::encode(PLDL1KEEP);
- Emit(opcode, 0, nullptr, 2, inputs);
-}
-
-void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
- Arm64OperandGenerator g(this);
- InstructionOperand inputs[2] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1))};
- InstructionCode opcode = kArm64Prfm;
- opcode |= AddressingModeField::encode(kMode_MRR);
- opcode |= MiscField::encode(PLDL1STRM);
- Emit(opcode, 0, nullptr, 2, inputs);
-}
-
namespace {
// Manually add base and index into a register to get the actual address.
// This should be used prior to instructions that only support
@@ -1473,8 +1451,6 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(BitcastFloat64ToInt64, kArm64U64MoveFloat64) \
V(BitcastInt32ToFloat32, kArm64Float64MoveU64) \
V(BitcastInt64ToFloat64, kArm64Float64MoveU64) \
- V(Float32Abs, kArm64Float32Abs) \
- V(Float64Abs, kArm64Float64Abs) \
V(Float32Sqrt, kArm64Float32Sqrt) \
V(Float64Sqrt, kArm64Float64Sqrt) \
V(Float32RoundDown, kArm64Float32RoundDown) \
@@ -2019,7 +1995,8 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
// zero-extension is a no-op.
return true;
}
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
// As for the operations above, a 32-bit load will implicitly clear the
// top 32 bits of the destination register.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -2159,7 +2136,15 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- selector->EmitWithContinuation(opcode, left, right, cont);
+ if (cont->IsSelect()) {
+ Arm64OperandGenerator g(selector);
+ InstructionOperand inputs[] = { left, right,
+ g.UseRegister(cont->true_value()),
+ g.UseRegister(cont->false_value()) };
+ selector->EmitWithContinuation(opcode, 0, nullptr, 4, inputs, cont);
+ } else {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+ }
}
// This function checks whether we can convert:
@@ -2846,8 +2831,8 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else {
- EmitWithContinuation(cont->Encode(kArm64Tst32), g.UseRegister(value),
- g.UseRegister(value), cont);
+ VisitCompare(this, cont->Encode(kArm64Tst32), g.UseRegister(value),
+ g.UseRegister(value), cont);
}
}
@@ -3068,6 +3053,30 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
return VisitRRR(this, kArm64Float32Mul, node);
}
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* in = node->InputAt(0);
+ if (in->opcode() == IrOpcode::kFloat32Sub && CanCover(node, in)) {
+ Emit(kArm64Float32Abd, g.DefineAsRegister(node),
+ g.UseRegister(in->InputAt(0)), g.UseRegister(in->InputAt(1)));
+ return;
+ }
+
+ return VisitRR(this, kArm64Float32Abs, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* in = node->InputAt(0);
+ if (in->opcode() == IrOpcode::kFloat64Sub && CanCover(node, in)) {
+ Emit(kArm64Float64Abd, g.DefineAsRegister(node),
+ g.UseRegister(in->InputAt(0)), g.UseRegister(in->InputAt(1)));
+ return;
+ }
+
+ return VisitRR(this, kArm64Float64Abs, node);
+}
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
@@ -3442,10 +3451,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16BitMask, kArm64I8x16BitMask) \
V(S128Not, kArm64S128Not) \
V(V128AnyTrue, kArm64V128AnyTrue) \
- V(V64x2AllTrue, kArm64V64x2AllTrue) \
- V(V32x4AllTrue, kArm64V32x4AllTrue) \
- V(V16x8AllTrue, kArm64V16x8AllTrue) \
- V(V8x16AllTrue, kArm64V8x16AllTrue)
+ V(I64x2AllTrue, kArm64I64x2AllTrue) \
+ V(I32x4AllTrue, kArm64I32x4AllTrue) \
+ V(I16x8AllTrue, kArm64I16x8AllTrue) \
+ V(I8x16AllTrue, kArm64I8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -3464,7 +3473,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, kArm64F64x2Add) \
V(F64x2Sub, kArm64F64x2Sub) \
- V(F64x2Mul, kArm64F64x2Mul) \
V(F64x2Div, kArm64F64x2Div) \
V(F64x2Min, kArm64F64x2Min) \
V(F64x2Max, kArm64F64x2Max) \
@@ -3473,9 +3481,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Lt, kArm64F64x2Lt) \
V(F64x2Le, kArm64F64x2Le) \
V(F32x4Add, kArm64F32x4Add) \
- V(F32x4AddHoriz, kArm64F32x4AddHoriz) \
V(F32x4Sub, kArm64F32x4Sub) \
- V(F32x4Mul, kArm64F32x4Mul) \
V(F32x4Div, kArm64F32x4Div) \
V(F32x4Min, kArm64F32x4Min) \
V(F32x4Max, kArm64F32x4Max) \
@@ -3489,7 +3495,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Ne, kArm64I64x2Ne) \
V(I64x2GtS, kArm64I64x2GtS) \
V(I64x2GeS, kArm64I64x2GeS) \
- V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
V(I32x4MaxS, kArm64I32x4MaxS) \
@@ -3504,7 +3509,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
V(I16x8AddSatS, kArm64I16x8AddSatS) \
- V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
V(I16x8MinS, kArm64I16x8MinS) \
@@ -3522,10 +3526,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GeU, kArm64I16x8GeU) \
V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
+ V(I8x16Add, kArm64I8x16Add) \
+ V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
V(I8x16AddSatS, kArm64I8x16AddSatS) \
V(I8x16SubSatS, kArm64I8x16SubSatS) \
- V(I8x16Mul, kArm64I8x16Mul) \
V(I8x16MinS, kArm64I8x16MinS) \
V(I8x16MaxS, kArm64I8x16MaxS) \
V(I8x16Eq, kArm64I8x16Eq) \
@@ -3621,17 +3626,84 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
-#define VISIT_SIGN_SELECT(NAME, SIZE) \
- void InstructionSelector::Visit##NAME(Node* node) { \
- InstructionCode opcode = kArm64SignSelect; \
- opcode |= LaneSizeField::encode(SIZE); \
- VisitRRRR(this, opcode, node); \
+using ShuffleMatcher =
+ ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
+using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
+
+namespace {
+// Struct holding the result of pattern-matching a mul+dup.
+struct MulWithDupResult {
+ Node* input; // Node holding the vector elements.
+ Node* dup_node; // Node holding the lane to multiply.
+ int index;
+ // Pattern-match is successful if dup_node is set.
+ explicit operator bool() const { return dup_node != nullptr; }
+};
+
+template <int LANES>
+MulWithDupResult TryMatchMulWithDup(Node* node) {
+ // Pattern match:
+ // f32x4.mul(x, shuffle(x, y, indices)) => f32x4.mul(x, y, laneidx)
+ // f64x2.mul(x, shuffle(x, y, indices)) => f64x2.mul(x, y, laneidx)
+ // where shuffle(x, y, indices) = dup(x[laneidx]) or dup(y[laneidx])
+ // f32x4.mul and f64x2.mul are commutative, so use BinopMatcher.
+ Node* input = nullptr;
+ Node* dup_node = nullptr;
+
+ int index = 0;
+#if V8_ENABLE_WEBASSEMBLY
+ BinopWithShuffleMatcher m = BinopWithShuffleMatcher(node);
+ ShuffleMatcher left = m.left();
+ ShuffleMatcher right = m.right();
+
+ // TODO(zhin): We can canonicalize first to avoid checking index < LANES.
+ // e.g. shuffle(x, y, [16, 17, 18, 19...]) => shuffle(y, y, [0, 1, 2,
+ // 3]...). But doing so can mutate the inputs of the shuffle node without
+ // updating the shuffle immediates themselves. Fix that before we
+ // canonicalize here. We don't want CanCover here because in many use cases,
+ // the shuffle is generated early in the function, but the f32x4.mul happens
+ // in a loop, which won't cover the shuffle since they are different basic
+ // blocks.
+ if (left.HasResolvedValue() && wasm::SimdShuffle::TryMatchSplat<LANES>(
+ left.ResolvedValue().data(), &index)) {
+ dup_node = left.node()->InputAt(index < LANES ? 0 : 1);
+ input = right.node();
+ } else if (right.HasResolvedValue() &&
+ wasm::SimdShuffle::TryMatchSplat<LANES>(
+ right.ResolvedValue().data(), &index)) {
+ dup_node = right.node()->InputAt(index < LANES ? 0 : 1);
+ input = left.node();
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ // Canonicalization would get rid of this too.
+ index %= LANES;
+
+ return {input, dup_node, index};
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Mul(Node* node) {
+ if (MulWithDupResult result = TryMatchMulWithDup<4>(node)) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64F32x4MulElement, g.DefineAsRegister(node),
+ g.UseRegister(result.input), g.UseRegister(result.dup_node),
+ g.UseImmediate(result.index));
+ } else {
+ return VisitRRR(this, kArm64F32x4Mul, node);
}
+}
-VISIT_SIGN_SELECT(I8x16SignSelect, 8)
-VISIT_SIGN_SELECT(I16x8SignSelect, 16)
-VISIT_SIGN_SELECT(I32x4SignSelect, 32)
-VISIT_SIGN_SELECT(I64x2SignSelect, 64)
+void InstructionSelector::VisitF64x2Mul(Node* node) {
+ if (MulWithDupResult result = TryMatchMulWithDup<2>(node)) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64F64x2MulElement, g.DefineAsRegister(node),
+ g.UseRegister(result.input), g.UseRegister(result.dup_node),
+ g.UseImmediate(result.index));
+ } else {
+ return VisitRRR(this, kArm64F64x2Mul, node);
+ }
+}
void InstructionSelector::VisitI64x2Mul(Node* node) {
Arm64OperandGenerator g(this);
@@ -3664,7 +3736,6 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
VISIT_SIMD_ADD(I32x4)
VISIT_SIMD_ADD(I16x8)
-VISIT_SIMD_ADD(I8x16)
#undef VISIT_SIMD_ADD
#define VISIT_SIMD_SUB(Type) \
@@ -3684,7 +3755,6 @@ VISIT_SIMD_ADD(I8x16)
VISIT_SIMD_SUB(I32x4)
VISIT_SIMD_SUB(I16x8)
-VISIT_SIMD_SUB(I8x16)
#undef VISIT_SIMD_SUB
void InstructionSelector::VisitS128Select(Node* node) {
@@ -3707,6 +3777,7 @@ VISIT_SIMD_QFMOP(F32x4Qfma)
VISIT_SIMD_QFMOP(F32x4Qfms)
#undef VISIT_SIMD_QFMOP
+#if V8_ENABLE_WEBASSEMBLY
namespace {
struct ShuffleEntry {
@@ -3854,6 +3925,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
VisitRR(this, kArm64Sxtb32, node);
@@ -3983,7 +4057,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ReverseBits |
MachineOperatorBuilder::kWord64ReverseBits |
- MachineOperatorBuilder::kSatConversionIsSafe;
+ MachineOperatorBuilder::kSatConversionIsSafe |
+ MachineOperatorBuilder::kFloat32Select |
+ MachineOperatorBuilder::kFloat64Select;
}
// static
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index e9a39f74a9e..3ed9eaabf14 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -13,7 +13,6 @@
#include "src/compiler/globals.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/diagnostics/eh-frame.h"
#include "src/execution/frames.h"
#include "src/logging/counters.h"
@@ -417,23 +416,29 @@ void CodeGenerator::AssembleCode() {
std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
}
- for (DeoptimizationExit* exit : deoptimization_exits_) {
- if (exit->emitted()) continue;
- if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
- exit->set_deoptimization_id(next_deoptimization_id_++);
- }
- result_ = AssembleDeoptimizerCall(exit);
- if (result_ != kSuccess) return;
-
- // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
- // order, which is always the case since they are added to
- // deoptimization_exits_ in that order, and the optional sort operation
- // above preserves that order.
- if (exit->kind() == DeoptimizeKind::kLazy) {
- int trampoline_pc = exit->label()->pos();
- last_updated = safepoints()->UpdateDeoptimizationInfo(
- exit->pc_offset(), trampoline_pc, last_updated,
- exit->deoptimization_id());
+ {
+#ifdef V8_TARGET_ARCH_PPC64
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ tasm());
+#endif
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ if (exit->emitted()) continue;
+ if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
+ exit->set_deoptimization_id(next_deoptimization_id_++);
+ }
+ result_ = AssembleDeoptimizerCall(exit);
+ if (result_ != kSuccess) return;
+
+ // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
+ // order, which is always the case since they are added to
+ // deoptimization_exits_ in that order, and the optional sort operation
+ // above preserves that order.
+ if (exit->kind() == DeoptimizeKind::kLazy) {
+ int trampoline_pc = exit->label()->pos();
+ last_updated = safepoints()->UpdateDeoptimizationInfo(
+ exit->pc_offset(), trampoline_pc, last_updated,
+ exit->deoptimization_id());
+ }
}
}
@@ -874,8 +879,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBoolean(instr, condition);
break;
}
+ case kFlags_select: {
+ AssembleArchSelect(instr, condition);
+ break;
+ }
case kFlags_trap: {
+#if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(instr, condition);
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
break;
}
case kFlags_none: {
@@ -906,7 +919,13 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
source_position, false);
if (FLAG_code_comments) {
OptimizedCompilationInfo* info = this->info();
- if (!info->IsOptimizing() && !info->IsWasm()) return;
+ if (!info->IsOptimizing()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (!info->IsWasm()) return;
+#else
+ return;
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
std::ostringstream buffer;
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
@@ -937,12 +956,15 @@ bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
}
StubCallMode CodeGenerator::DetermineStubCallMode() const {
+#if V8_ENABLE_WEBASSEMBLY
CodeKind code_kind = info()->code_kind();
- return (code_kind == CodeKind::WASM_FUNCTION ||
- code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
- code_kind == CodeKind::WASM_TO_JS_FUNCTION)
- ? StubCallMode::kCallWasmRuntimeStub
- : StubCallMode::kCallCodeObject;
+ if (code_kind == CodeKind::WASM_FUNCTION ||
+ code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
+ code_kind == CodeKind::WASM_TO_JS_FUNCTION) {
+ return StubCallMode::kCallWasmRuntimeStub;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return StubCallMode::kCallCodeObject;
}
void CodeGenerator::AssembleGaps(Instruction* instr) {
@@ -1176,14 +1198,16 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
height);
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJSToWasmBuiltinContinuation: {
const JSToWasmFrameStateDescriptor* js_to_wasm_descriptor =
static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
translations_.BeginJSToWasmBuiltinContinuationFrame(
bailout_id, shared_info_id, height,
- js_to_wasm_descriptor->return_type());
+ js_to_wasm_descriptor->return_kind());
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJavaScriptBuiltinContinuation: {
translations_.BeginJavaScriptBuiltinContinuationFrame(
bailout_id, shared_info_id, height);
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 7cead5dbde1..1d1bda743b5 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -261,7 +261,10 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchSelect(Instruction* instr, FlagsCondition condition);
+#if V8_ENABLE_WEBASSEMBLY
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
+#endif // V8_ENABLE_WEBASSEMBLY
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end);
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 77a4d92b960..ac5d589790e 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -18,8 +18,11 @@
#include "src/execution/frames.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -65,11 +68,13 @@ class IA32OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+#if V8_ENABLE_WEBASSEMBLY
if (constant.type() == Constant::kInt32 &&
RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(static_cast<Address>(constant.ToInt32()),
constant.rmode());
}
+#endif // V8_ENABLE_WEBASSEMBLY
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -257,18 +262,26 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
: OutOfLineCode(gen),
result_(result),
input_(input),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
isolate_(gen->isolate()),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
__ AllocateStackSpace(kDoubleSize);
__ Movsd(MemOperand(esp, 0), input_);
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (tasm()->options().inline_offheap_trampolines) {
__ CallBuiltin(Builtins::kDoubleToI);
} else {
@@ -281,7 +294,9 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
private:
Register const result_;
XMMRegister const input_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
Isolate* isolate_;
Zone* zone_;
};
@@ -298,8 +313,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
- zone_(gen->zone()) {}
+#endif // V8_ENABLE_WEBASSEMBLY
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -316,6 +334,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
@@ -323,6 +342,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
} else {
+#endif // V8_ENABLE_WEBASSEMBLY
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
}
@@ -335,7 +355,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
Zone* zone_;
};
@@ -536,31 +558,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (false)
-// Helper macro to help define signselect opcodes. This should not be used for
-// i16x8.signselect, because there is no native word-sized blend instruction.
-// We choose a helper macro here instead of a macro-assembler function because
-// the blend instruction requires xmm0 as an implicit argument, and the codegen
-// relies on xmm0 being the scratch register, so we can freely overwrite it as
-// required.
-#define ASSEMBLE_SIMD_SIGN_SELECT(BLEND_OP) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- XMMRegister src1 = i.InputSimd128Register(0); \
- XMMRegister src2 = i.InputSimd128Register(1); \
- XMMRegister mask = i.InputSimd128Register(2); \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
- __ v##BLEND_OP(dst, src1, src2, mask); \
- } else { \
- CpuFeatureScope scope(tasm(), SSE4_1); \
- DCHECK_EQ(dst, src1); \
- DCHECK_EQ(kScratchDoubleReg, xmm0); \
- if (mask != xmm0) { \
- __ movaps(xmm0, mask); \
- } \
- __ BLEND_OP(dst, src2); \
- } \
- } while (false)
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
@@ -614,14 +611,14 @@ bool VerifyOutputOfAtomicPairInstr(IA32OperandConverter* converter,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, flags, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
+ first_unused_slot_offset)) {
IA32OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
@@ -636,7 +633,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand source_location(LocationOperand::cast(source));
__ push(source_location.GetRegister());
} else if (source.IsImmediate()) {
- __ Push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ __ Push(Immediate(ImmediateOperand::cast(source).inline_int32_value()));
} else {
// Pushes of non-scalar data types is not supported.
UNIMPLEMENTED();
@@ -646,13 +643,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -732,6 +729,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -757,16 +755,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObject: {
+ case kArchTailCallWasm: {
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = i.InputCode(0);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
- DCHECK_IMPLIES(
- instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
- reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -777,13 +772,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Constant constant = i.ToConstant(instr->InputAt(0));
- Address wasm_code = static_cast<Address>(constant.ToInt32());
- __ jmp(wasm_code, constant.rmode());
+ Handle<Code> code = i.InputCode(0);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -861,6 +860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
Label return_location;
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
Register scratch = eax;
@@ -874,6 +874,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch);
__ pop(scratch);
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -882,9 +883,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
__ bind(&return_location);
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1485,7 +1488,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister tmp = i.TempSimd128Register(0);
__ pcmpeqd(tmp, tmp);
__ psrlq(tmp, 1);
- __ andpd(i.OutputDoubleRegister(), tmp);
+ __ andps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Neg: {
@@ -1493,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister tmp = i.TempSimd128Register(0);
__ pcmpeqd(tmp, tmp);
__ psllq(tmp, 63);
- __ xorpd(i.OutputDoubleRegister(), tmp);
+ __ xorps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Sqrt:
@@ -1654,7 +1657,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat64SilenceNaN:
- __ xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
__ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
@@ -2022,15 +2025,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2Pmin: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Minpd(dst, dst, i.InputSimd128Register(1));
+ __ Minpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32F64x2Pmax: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Maxpd(dst, dst, i.InputSimd128Register(1));
+ __ Maxpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32F64x2Round: {
@@ -2117,34 +2118,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8ExtMulLowI8x16S: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg,
- /*low=*/true, /*is_signed=*/true);
+ __ I16x8ExtMulLow(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*is_signed=*/true);
break;
}
case kIA32I16x8ExtMulHighI8x16S: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg,
- /*low=*/false, /*is_signed=*/true);
+ __ I16x8ExtMulHighS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I16x8ExtMulLowI8x16U: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg,
- /*low=*/true, /*is_signed=*/false);
+ __ I16x8ExtMulLow(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*is_signed=*/false);
break;
}
case kIA32I16x8ExtMulHighI8x16U: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg,
- /*low=*/false, /*is_signed=*/false);
+ __ I16x8ExtMulHighU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I64x2SplatI32Pair: {
XMMRegister dst = i.OutputSimd128Register();
__ Pinsrd(dst, i.InputRegister(0), 0);
__ Pinsrd(dst, i.InputOperand(1), 1);
- __ Pshufd(dst, dst, 0x44);
+ __ Pshufd(dst, dst, uint8_t{0x44});
break;
}
case kIA32I64x2ReplaceLaneI32Pair: {
@@ -2273,29 +2272,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
- case kIA32I8x16SignSelect: {
- ASSEMBLE_SIMD_SIGN_SELECT(pblendvb);
- break;
- }
- case kIA32I16x8SignSelect: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- XMMRegister src2 = i.InputSimd128Register(1);
- XMMRegister mask = i.InputSimd128Register(2);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsraw(kScratchDoubleReg, mask, 15);
- __ vpblendvb(dst, src1, src2, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(kScratchDoubleReg, xmm0);
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ pcmpgtw(kScratchDoubleReg, mask);
- __ pblendvb(dst, src2);
- }
- break;
- }
case kIA32I32x4ExtAddPairwiseI16x8S: {
__ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
i.InputSimd128Register(0),
@@ -2325,14 +2301,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kIA32I32x4SignSelect: {
- ASSEMBLE_SIMD_SIGN_SELECT(blendvps);
- break;
- }
- case kIA32I64x2SignSelect: {
- ASSEMBLE_SIMD_SIGN_SELECT(blendvpd);
- break;
- }
case kIA32F32x4Splat: {
XMMRegister dst = i.OutputDoubleRegister();
XMMRegister src = i.InputDoubleRegister(0);
@@ -2443,11 +2411,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
};
- case kIA32F32x4AddHoriz: {
- __ Haddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kIA32F32x4Sub: {
__ Subps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -2580,15 +2543,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F32x4Pmin: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Minps(dst, dst, i.InputSimd128Register(1));
+ __ Minps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32F32x4Pmax: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Maxps(dst, dst, i.InputSimd128Register(1));
+ __ Maxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32F32x4Round: {
@@ -2600,7 +2561,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
- __ Pshufd(dst, dst, 0x0);
+ __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kIA32I32x4ExtractLane: {
@@ -2664,18 +2625,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI32x4AddHoriz: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSSE3);
- __ phaddd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4AddHoriz: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vphaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2738,7 +2687,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI32x4Ne: {
@@ -2784,7 +2733,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
__ maxps(dst, kScratchDoubleReg);
// scratch: float representation of max_signed
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
@@ -2797,8 +2746,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subps(tmp, kScratchDoubleReg);
__ cmpleps(kScratchDoubleReg, tmp);
__ cvttps2dq(tmp, tmp);
- __ pxor(tmp, kScratchDoubleReg);
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ xorps(tmp, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
__ pmaxsd(tmp, kScratchDoubleReg);
// convert. Overflow lanes above max_signed will be 0x80000000
__ cvttps2dq(dst, dst);
@@ -2878,7 +2827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pmaxud(dst, src);
__ pcmpeqd(dst, src);
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
+ __ xorps(dst, kScratchDoubleReg);
break;
}
case kAVXI32x4GtU: {
@@ -2925,13 +2874,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
- __ Pshuflw(dst, dst, 0x0);
- __ Pshufd(dst, dst, 0x0);
+ __ Pshuflw(dst, dst, uint8_t{0x0});
+ __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kIA32I16x8ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsx_w(dst, dst);
break;
}
@@ -2997,18 +2946,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI16x8AddHoriz: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSSE3);
- __ phaddw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddHoriz: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vphaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI16x8Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -3079,7 +3016,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI16x8Ne: {
@@ -3198,7 +3135,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pmaxuw(dst, src);
__ pcmpeqw(dst, src);
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
+ __ xorps(dst, kScratchDoubleReg);
break;
}
case kAVXI16x8GtU: {
@@ -3255,7 +3192,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsx_b(dst, dst);
break;
}
@@ -3301,7 +3238,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputUint8(index + 1));
} else {
Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
}
break;
}
@@ -3313,7 +3250,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputUint8(index + 1));
} else {
Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
}
break;
}
@@ -3362,7 +3299,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
__ mov(tmp, mask);
__ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, 0);
+ __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
__ Pand(dst, tmp_simd);
} else {
// Take shift value modulo 8.
@@ -3430,85 +3367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16Mul: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
-
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- __ movaps(tmp, dst);
- __ movaps(kScratchDoubleReg, right);
- __ psrlw(tmp, 8);
- __ psrlw(kScratchDoubleReg, 8);
- // dst = left * 256
- __ psllw(dst, 8);
-
- // t = I16x8Mul(t, s)
- // => __PP __PP ... __PP __PP
- __ pmullw(tmp, kScratchDoubleReg);
- // dst = I16x8Mul(left * 256, right)
- // => pp__ pp__ ... pp__ pp__
- __ pmullw(dst, right);
-
- // t = I16x8Shl(t, 8)
- // => PP00 PP00 ... PP00 PP00
- __ psllw(tmp, 8);
-
- // dst = I16x8Shr(dst, 8)
- // => 00pp 00pp ... 00pp 00pp
- __ psrlw(dst, 8);
-
- // dst = I16x8Or(dst, t)
- // => PPpp PPpp ... PPpp PPpp
- __ por(dst, tmp);
- break;
- }
- case kAVXI8x16Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
-
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- __ vpsrlw(tmp, left, 8);
- __ vpsrlw(kScratchDoubleReg, right, 8);
-
- // t = I16x8Mul(t0, t1)
- // => __PP __PP ... __PP __PP
- __ vpmullw(tmp, tmp, kScratchDoubleReg);
-
- // s = left * 256
- __ vpsllw(kScratchDoubleReg, left, 8);
-
- // dst = I16x8Mul(left * 256, right)
- // => pp__ pp__ ... pp__ pp__
- __ vpmullw(dst, kScratchDoubleReg, right);
-
- // dst = I16x8Shr(dst, 8)
- // => 00pp 00pp ... 00pp 00pp
- __ vpsrlw(dst, dst, 8);
-
- // t = I16x8Shl(t, 8)
- // => PP00 PP00 ... PP00 PP00
- __ vpsllw(tmp, tmp, 8);
-
- // dst = I16x8Or(dst, t)
- // => PPpp PPpp ... PPpp PPpp
- __ vpor(dst, dst, tmp);
- break;
- }
case kIA32I8x16MinS: {
__ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3528,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI8x16Ne: {
@@ -3601,7 +3459,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
__ mov(tmp, mask);
__ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, 0);
+ __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
__ Pand(dst, tmp_simd);
} else {
// Unpack the bytes into words, do logical shifts, and repack.
@@ -3635,7 +3493,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pmaxub(dst, src);
__ pcmpeqb(dst, src);
__ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
+ __ xorps(dst, kScratchDoubleReg);
break;
}
case kAVXI8x16GtU: {
@@ -3719,7 +3577,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSES128And: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pand(i.OutputSimd128Register(), i.InputOperand(1));
+ __ andps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128And: {
@@ -3730,7 +3588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSES128Or: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ por(i.OutputSimd128Register(), i.InputOperand(1));
+ __ orps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128Or: {
@@ -3741,7 +3599,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSES128Xor: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pxor(i.OutputSimd128Register(), i.InputOperand(1));
+ __ xorps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128Xor: {
@@ -3757,25 +3615,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128AndNot: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
// The inputs have been inverted by instruction selector, so we can call
// andnps here without any modifications.
- XMMRegister src1 = i.InputSimd128Register(1);
- __ Andnps(dst, src1);
+ __ Andnps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32I8x16Swizzle: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister mask = i.TempSimd128Register(0);
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, uint32_t{0x70707070});
- __ Pshufd(mask, mask, 0x0);
- __ Paddusb(mask, i.InputSimd128Register(1));
- __ Pshufb(dst, mask);
+ __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ i.TempRegister(0), MiscField::decode(instr->opcode()));
break;
}
case kIA32I8x16Shuffle: {
@@ -3881,12 +3730,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32S32x4Swizzle: {
DCHECK_EQ(2, instr->InputCount());
- __ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), i.InputInt8(1));
+ __ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), i.InputUint8(1));
break;
}
case kIA32S32x4Shuffle: {
DCHECK_EQ(4, instr->InputCount()); // Swizzles should be handled above.
- int8_t shuffle = i.InputInt8(2);
+ uint8_t shuffle = i.InputUint8(2);
DCHECK_NE(0xe4, shuffle); // A simple blend should be handled below.
__ Pshufd(kScratchDoubleReg, i.InputOperand(1), shuffle);
__ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), shuffle);
@@ -3898,16 +3747,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32S16x8HalfShuffle1: {
XMMRegister dst = i.OutputSimd128Register();
- __ Pshuflw(dst, i.InputOperand(0), i.InputInt8(1));
- __ Pshufhw(dst, dst, i.InputInt8(2));
+ __ Pshuflw(dst, i.InputOperand(0), i.InputUint8(1));
+ __ Pshufhw(dst, dst, i.InputUint8(2));
break;
}
case kIA32S16x8HalfShuffle2: {
XMMRegister dst = i.OutputSimd128Register();
- __ Pshuflw(kScratchDoubleReg, i.InputOperand(1), i.InputInt8(2));
- __ Pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputInt8(3));
- __ Pshuflw(dst, i.InputOperand(0), i.InputInt8(2));
- __ Pshufhw(dst, dst, i.InputInt8(3));
+ __ Pshuflw(kScratchDoubleReg, i.InputOperand(1), i.InputUint8(2));
+ __ Pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputUint8(3));
+ __ Pshuflw(dst, i.InputOperand(0), i.InputUint8(2));
+ __ Pshufhw(dst, dst, i.InputUint8(3));
__ Pblendw(dst, kScratchDoubleReg, i.InputInt8(4));
break;
}
@@ -3917,22 +3766,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32S16x8Dup: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
- int8_t lane = i.InputInt8(1) & 0x7;
- int8_t lane4 = lane & 0x3;
- int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ uint8_t lane = i.InputUint8(1) & 0x7;
+ uint8_t lane4 = lane & 0x3;
+ uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, src, half_dup);
- __ Pshufd(dst, dst, 0);
+ __ Pshufd(dst, dst, uint8_t{0});
} else {
__ Pshufhw(dst, src, half_dup);
- __ Pshufd(dst, dst, 0xaa);
+ __ Pshufd(dst, dst, uint8_t{0xaa});
}
break;
}
case kIA32S8x16Dup: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- int8_t lane = i.InputInt8(1) & 0xf;
+ uint8_t lane = i.InputUint8(1) & 0xf;
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
if (lane < 8) {
@@ -3949,14 +3798,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
lane &= 0x7;
- int8_t lane4 = lane & 0x3;
- int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ uint8_t lane4 = lane & 0x3;
+ uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, 0);
+ __ Pshufd(dst, dst, uint8_t{0});
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, 0xaa);
+ __ Pshufd(dst, dst, uint8_t{0xaa});
}
break;
}
@@ -4015,7 +3864,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
if (instr->InputCount() == 2) {
__ pblendw(kScratchDoubleReg, i.InputOperand(1), 0x55);
src2 = kScratchDoubleReg;
@@ -4104,7 +3953,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ psllw(kScratchDoubleReg, 8);
}
__ psrlw(dst, 8);
- __ por(dst, kScratchDoubleReg);
+ __ orps(dst, kScratchDoubleReg);
break;
}
case kAVXS8x16TransposeLow: {
@@ -4134,7 +3983,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ psrlw(kScratchDoubleReg, 8);
}
__ psllw(kScratchDoubleReg, 8);
- __ por(dst, kScratchDoubleReg);
+ __ orps(dst, kScratchDoubleReg);
break;
}
case kAVXS8x16TransposeHigh: {
@@ -4167,7 +4016,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(kScratchDoubleReg, dst);
__ psrlw(kScratchDoubleReg, 8);
__ psllw(dst, 8);
- __ por(dst, kScratchDoubleReg);
+ __ orps(dst, kScratchDoubleReg);
break;
}
case kAVXS8x2Reverse:
@@ -4205,25 +4054,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kIA32V64x2AllTrue:
+ case kIA32I64x2AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
break;
- case kIA32V32x4AllTrue:
+ case kIA32I32x4AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
- case kIA32V16x8AllTrue:
+ case kIA32I16x8AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
break;
- case kIA32V8x16AllTrue: {
+ case kIA32I8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
- case kIA32Prefetch:
- __ prefetch(i.MemoryOperand(), 1);
- break;
- case kIA32PrefetchNta:
- __ prefetch(i.MemoryOperand(), 0);
- break;
case kIA32Word32AtomicPairLoad: {
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ movq(tmp, i.MemoryOperand());
@@ -4486,6 +4329,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -4511,7 +4355,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
size_t pop_size =
- call_descriptor->StackParameterCount() * kSystemPointerSize;
+ call_descriptor->ParameterSlotCount() * kSystemPointerSize;
// Use ecx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), ecx);
} else {
@@ -4541,6 +4385,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ j(FlagsConditionToCondition(condition), tlabel);
__ bind(&end);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -4606,6 +4451,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand::JumpTable(input, times_system_pointer_size, table));
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
// The calling convention for JSFunctions on IA32 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
@@ -4752,15 +4602,18 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
// Reserve stack space for saving the c_entry_fp later.
__ AllocateStackSpace(kSystemPointerSize);
}
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4781,6 +4634,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AllocateStackSpace(kSystemPointerSize);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
@@ -4803,6 +4657,7 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4833,6 +4688,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are created below.
required_slots -= base::bits::CountPopulation(saves);
@@ -4875,12 +4731,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
IA32OperandConverter g(this, nullptr);
- int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int parameter_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4891,12 +4746,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = ecx;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -4920,16 +4775,16 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
- int parameter_count_without_receiver =
- parameter_count - 1; // Exclude the receiver to simplify the
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
+ int parameter_slots_without_receiver =
+ parameter_slots - 1; // Exclude the receiver to simplify the
// computation. We'll account for it at the end.
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
- __ cmp(argc_reg, Immediate(parameter_count_without_receiver));
+ __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
__ j(greater, &mismatch_return, Label::kNear);
- __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, argc_reg, times_system_pointer_size,
@@ -4940,13 +4795,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
} else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = ecx;
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
+ size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
- int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
+ int pop_size = static_cast<int>(parameter_slots * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
@@ -5211,7 +5066,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
#undef ASSEMBLE_SIMD_PINSR
-#undef ASSEMBLE_SIMD_SIGN_SELECT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 40f7b6e4030..f06ed5156e8 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -153,7 +153,6 @@ namespace compiler {
V(IA32I64x2Ne) \
V(IA32I64x2GtS) \
V(IA32I64x2GeS) \
- V(IA32I64x2SignSelect) \
V(IA32I64x2ExtMulLowI32x4S) \
V(IA32I64x2ExtMulHighI32x4S) \
V(IA32I64x2ExtMulLowI32x4U) \
@@ -173,7 +172,6 @@ namespace compiler {
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
V(IA32F32x4Add) \
- V(IA32F32x4AddHoriz) \
V(IA32F32x4Sub) \
V(IA32F32x4Mul) \
V(IA32F32x4Div) \
@@ -203,8 +201,6 @@ namespace compiler {
V(IA32I32x4ShrS) \
V(SSEI32x4Add) \
V(AVXI32x4Add) \
- V(SSEI32x4AddHoriz) \
- V(AVXI32x4AddHoriz) \
V(SSEI32x4Sub) \
V(AVXI32x4Sub) \
V(SSEI32x4Mul) \
@@ -237,7 +233,6 @@ namespace compiler {
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
V(IA32I32x4DotI16x8S) \
- V(IA32I32x4SignSelect) \
V(IA32I32x4ExtMulLowI16x8S) \
V(IA32I32x4ExtMulHighI16x8S) \
V(IA32I32x4ExtMulLowI16x8U) \
@@ -259,8 +254,6 @@ namespace compiler {
V(AVXI16x8Add) \
V(SSEI16x8AddSatS) \
V(AVXI16x8AddSatS) \
- V(SSEI16x8AddHoriz) \
- V(AVXI16x8AddHoriz) \
V(SSEI16x8Sub) \
V(AVXI16x8Sub) \
V(SSEI16x8SubSatS) \
@@ -299,7 +292,6 @@ namespace compiler {
V(IA32I16x8RoundingAverageU) \
V(IA32I16x8Abs) \
V(IA32I16x8BitMask) \
- V(IA32I16x8SignSelect) \
V(IA32I16x8ExtMulLowI8x16S) \
V(IA32I16x8ExtMulHighI8x16S) \
V(IA32I16x8ExtMulLowI8x16U) \
@@ -324,8 +316,6 @@ namespace compiler {
V(IA32I8x16AddSatS) \
V(IA32I8x16Sub) \
V(IA32I8x16SubSatS) \
- V(SSEI8x16Mul) \
- V(AVXI8x16Mul) \
V(IA32I8x16MinS) \
V(IA32I8x16MaxS) \
V(IA32I8x16Eq) \
@@ -348,7 +338,6 @@ namespace compiler {
V(IA32I8x16RoundingAverageU) \
V(IA32I8x16Abs) \
V(IA32I8x16BitMask) \
- V(IA32I8x16SignSelect) \
V(IA32I8x16Popcnt) \
V(IA32S128Const) \
V(IA32S128Zero) \
@@ -410,12 +399,10 @@ namespace compiler {
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
V(IA32S128AnyTrue) \
- V(IA32V64x2AllTrue) \
- V(IA32V32x4AllTrue) \
- V(IA32V16x8AllTrue) \
- V(IA32V8x16AllTrue) \
- V(IA32Prefetch) \
- V(IA32PrefetchNta) \
+ V(IA32I64x2AllTrue) \
+ V(IA32I32x4AllTrue) \
+ V(IA32I16x8AllTrue) \
+ V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 21b650cb611..3d1c7073591 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -138,7 +138,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I64x2Ne:
case kIA32I64x2GtS:
case kIA32I64x2GeS:
- case kIA32I64x2SignSelect:
case kIA32I64x2ExtMulLowI32x4S:
case kIA32I64x2ExtMulHighI32x4S:
case kIA32I64x2ExtMulLowI32x4U:
@@ -158,7 +157,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
case kIA32F32x4Add:
- case kIA32F32x4AddHoriz:
case kIA32F32x4Sub:
case kIA32F32x4Mul:
case kIA32F32x4Div:
@@ -188,8 +186,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4ShrS:
case kSSEI32x4Add:
case kAVXI32x4Add:
- case kSSEI32x4AddHoriz:
- case kAVXI32x4AddHoriz:
case kSSEI32x4Sub:
case kAVXI32x4Sub:
case kSSEI32x4Mul:
@@ -222,7 +218,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Abs:
case kIA32I32x4BitMask:
case kIA32I32x4DotI16x8S:
- case kIA32I32x4SignSelect:
case kIA32I32x4ExtMulLowI16x8S:
case kIA32I32x4ExtMulHighI16x8S:
case kIA32I32x4ExtMulLowI16x8U:
@@ -244,8 +239,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8Add:
case kSSEI16x8AddSatS:
case kAVXI16x8AddSatS:
- case kSSEI16x8AddHoriz:
- case kAVXI16x8AddHoriz:
case kSSEI16x8Sub:
case kAVXI16x8Sub:
case kSSEI16x8SubSatS:
@@ -284,7 +277,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8RoundingAverageU:
case kIA32I16x8Abs:
case kIA32I16x8BitMask:
- case kIA32I16x8SignSelect:
case kIA32I16x8ExtMulLowI8x16S:
case kIA32I16x8ExtMulHighI8x16S:
case kIA32I16x8ExtMulLowI8x16U:
@@ -309,8 +301,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16AddSatS:
case kIA32I8x16Sub:
case kIA32I8x16SubSatS:
- case kSSEI8x16Mul:
- case kAVXI8x16Mul:
case kIA32I8x16MinS:
case kIA32I8x16MaxS:
case kIA32I8x16Eq:
@@ -333,7 +323,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16RoundingAverageU:
case kIA32I8x16Abs:
case kIA32I8x16BitMask:
- case kIA32I8x16SignSelect:
case kIA32I8x16Popcnt:
case kIA32S128Const:
case kIA32S128Zero:
@@ -385,10 +374,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSES8x2Reverse:
case kAVXS8x2Reverse:
case kIA32S128AnyTrue:
- case kIA32V64x2AllTrue:
- case kIA32V32x4AllTrue:
- case kIA32V16x8AllTrue:
- case kIA32V8x16AllTrue:
+ case kIA32I64x2AllTrue:
+ case kIA32I32x4AllTrue:
+ case kIA32I16x8AllTrue:
+ case kIA32I8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -431,8 +420,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Poke:
case kIA32MFence:
case kIA32LFence:
- case kIA32Prefetch:
- case kIA32PrefetchNta:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 662b40ddf44..033a566e113 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -37,9 +37,12 @@
#include "src/compiler/write-barrier-kind.h"
#include "src/flags/flags.h"
#include "src/utils/utils.h"
-#include "src/wasm/simd-shuffle.h"
#include "src/zone/zone-containers.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -398,7 +401,7 @@ void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -702,36 +705,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitPrefetchTemporal(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- InstructionCode opcode = kIA32Prefetch;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- // The maximum number of inputs that can be generated by the function above is
- // 3, but wasm cases only generate 2 inputs. This check will need to be
- // modified for any non-wasm uses of prefetch.
- DCHECK_LE(input_count, 2);
- opcode |= AddressingModeField::encode(addressing_mode);
- Emit(opcode, 0, nullptr, input_count, inputs);
-}
-
-void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- InstructionCode opcode = kIA32PrefetchNta;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- // The maximum number of inputs that can be generated by the function above is
- // 3, but wasm cases only generate 2 inputs. This check will need to be
- // modified for any non-wasm uses of prefetch.
- DCHECK_LE(input_count, 2);
- opcode |= AddressingModeField::encode(addressing_mode);
- Emit(opcode, 0, nullptr, input_count, inputs);
-}
-
namespace {
// Shared routine for multiple binary operations.
@@ -2217,7 +2190,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Lt) \
V(F32x4Le) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2233,7 +2205,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSatS) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSatS) \
V(I16x8Mul) \
@@ -2260,7 +2231,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(F32x4Add) \
- V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
@@ -2335,10 +2305,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(S128Not)
#define SIMD_ALLTRUE_LIST(V) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
- V(V8x16AllTrue)
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue)
#define SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(V) \
V(I64x2Shl) \
@@ -2378,7 +2348,7 @@ void InstructionSelector::VisitF64x2Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
if (IsSupported(AVX)) {
Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1,
@@ -2393,7 +2363,7 @@ void InstructionSelector::VisitF64x2Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
if (IsSupported(AVX)) {
Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1,
arraysize(temps), temps);
@@ -2499,20 +2469,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16Mul(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempSimd128Register()};
- if (IsSupported(AVX)) {
- Emit(kAVXI8x16Mul, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
- } else {
- Emit(kSSEI8x16Mul, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
- }
-}
-
void InstructionSelector::VisitS128Zero(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32S128Zero, g.DefineAsRegister(node));
@@ -2529,8 +2485,10 @@ void InstructionSelector::VisitS128Select(Node* node) {
void InstructionSelector::VisitS128AndNot(Node* node) {
IA32OperandGenerator g(this);
// andnps a b does ~a & b, but we want a & !b, so flip the input.
- Emit(kIA32S128AndNot, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32S128AndNot, dst, g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)));
}
#define VISIT_SIMD_SPLAT(Type) \
@@ -2748,6 +2706,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
// Returns true if shuffle can be decomposed into two 16x4 half shuffles
@@ -3037,12 +2996,27 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
}
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ InstructionCode op = kIA32I8x16Swizzle;
+
+ auto m = V128ConstMatcher(node->InputAt(1));
+ if (m.HasResolvedValue()) {
+ // If the indices vector is a const, check if they are in range, or if the
+ // top bit is set, then we can avoid the paddusb in the codegen and simply
+ // emit a pshufb.
+ auto imms = m.ResolvedValue().immediate();
+ op |= MiscField::encode(wasm::SimdSwizzle::AllInRangeOrTopBitSet(imms));
+ }
+
IA32OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kIA32I8x16Swizzle, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(op,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -3052,8 +3026,10 @@ void VisitPminOrPmax(InstructionSelector* selector, Node* node,
// Due to the way minps/minpd work, we want the dst to be same as the second
// input: b = pmin(a, b) directly maps to minps b a.
IA32OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)),
+ InstructionOperand dst = selector->IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ selector->Emit(opcode, dst, g.UseRegister(node->InputAt(1)),
g.UseRegister(node->InputAt(0)));
}
} // namespace
@@ -3075,44 +3051,6 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
}
namespace {
-void VisitSignSelect(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- IA32OperandGenerator g(selector);
- // signselect(x, y, -1) = x
- // pblendvb(dst, x, y, -1) = dst <- y, so we need to swap x and y.
- if (selector->IsSupported(AVX)) {
- selector->Emit(
- opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
- } else {
- // We would like to fix the mask to be xmm0, since that is what
- // pblendvb/blendvps/blendvps uses as an implicit operand. However, xmm0 is
- // also scratch register, so our mask values can be overwritten. Instead, we
- // manually move the mask to xmm0 inside codegen.
- selector->Emit(
- opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
- }
-}
-} // namespace
-
-void InstructionSelector::VisitI8x16SignSelect(Node* node) {
- VisitSignSelect(this, node, kIA32I8x16SignSelect);
-}
-
-void InstructionSelector::VisitI16x8SignSelect(Node* node) {
- VisitSignSelect(this, node, kIA32I16x8SignSelect);
-}
-
-void InstructionSelector::VisitI32x4SignSelect(Node* node) {
- VisitSignSelect(this, node, kIA32I32x4SignSelect);
-}
-
-void InstructionSelector::VisitI64x2SignSelect(Node* node) {
- VisitSignSelect(this, node, kIA32I64x2SignSelect);
-}
-
-namespace {
void VisitExtAddPairwise(InstructionSelector* selector, Node* node,
ArchOpcode opcode, bool need_temp) {
IA32OperandGenerator g(selector);
@@ -3218,14 +3156,7 @@ void InstructionSelector::VisitI64x2GeS(Node* node) {
}
void InstructionSelector::VisitI64x2Abs(Node* node) {
- IA32OperandGenerator g(this);
- if (CpuFeatures::IsSupported(AVX)) {
- Emit(kIA32I64x2Abs, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)));
- } else {
- Emit(kIA32I64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
- }
+ VisitRRSimd(this, node, kIA32I64x2Abs, kIA32I64x2Abs);
}
// static
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 89cd7be8647..31d669813e2 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -71,12 +71,12 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
/* IsCallWithDescriptorFlags fast */ \
V(ArchTailCallCodeObject) \
V(ArchTailCallAddress) \
- V(ArchTailCallWasm) \
+ IF_WASM(V, ArchTailCallWasm) \
/* Update IsTailCall if further TailCall opcodes are added */ \
\
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
- V(ArchCallWasmFunction) \
+ IF_WASM(V, ArchCallWasmFunction) \
V(ArchCallBuiltinPointer) \
/* Update IsCallWithDescriptorFlags if further Call opcodes are added */ \
\
@@ -212,7 +212,8 @@ enum FlagsMode {
kFlags_deoptimize = 3,
kFlags_deoptimize_and_poison = 4,
kFlags_set = 5,
- kFlags_trap = 6
+ kFlags_trap = 6,
+ kFlags_select = 7,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index 99c36c923db..c46d263bae2 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -307,7 +307,9 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchPrepareTailCall:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
+#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchAbortCSAAssert:
return kHasSideEffect;
@@ -321,7 +323,9 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCFunction:
case kArchCallCodeObject:
case kArchCallJSFunction:
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction:
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchCallBuiltinPointer:
// Calls can cause GC and GC may relocate objects. If a pure instruction
// operates on a tagged pointer that was cast to a word then it may be
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 6571db18015..5638ff1241a 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -19,7 +19,10 @@
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/deoptimizer/deoptimizer.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -57,6 +60,7 @@ InstructionSelector::InstructionSelector(
enable_scheduling_(enable_scheduling),
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
+ state_values_cache_(zone),
poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
@@ -578,6 +582,8 @@ class StateObjectDeduplicator {
return id;
}
+ size_t size() const { return objects_.size(); }
+
private:
static bool HasObjectId(Node* node) {
return node->opcode() == IrOpcode::kTypedObjectState ||
@@ -649,24 +655,94 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
}
+struct InstructionSelector::CachedStateValues : public ZoneObject {
+ public:
+ CachedStateValues(Zone* zone, StateValueList* values, size_t values_start,
+ InstructionOperandVector* inputs, size_t inputs_start)
+ : inputs_(inputs->begin() + inputs_start, inputs->end(), zone),
+ values_(values->MakeSlice(values_start)) {}
+
+ size_t Emit(InstructionOperandVector* inputs, StateValueList* values) {
+ inputs->insert(inputs->end(), inputs_.begin(), inputs_.end());
+ values->PushCachedSlice(values_);
+ return inputs_.size();
+ }
+
+ private:
+ InstructionOperandVector inputs_;
+ StateValueList::Slice values_;
+};
+
+class InstructionSelector::CachedStateValuesBuilder {
+ public:
+ explicit CachedStateValuesBuilder(StateValueList* values,
+ InstructionOperandVector* inputs,
+ StateObjectDeduplicator* deduplicator)
+ : values_(values),
+ inputs_(inputs),
+ deduplicator_(deduplicator),
+ values_start_(values->size()),
+ nested_start_(values->nested_count()),
+ inputs_start_(inputs->size()),
+ deduplicator_start_(deduplicator->size()) {}
+
+ // We can only build a CachedStateValues for a StateValue if it didn't update
+ // any of the ids in the deduplicator.
+ bool CanCache() const { return deduplicator_->size() == deduplicator_start_; }
+
+ InstructionSelector::CachedStateValues* Build(Zone* zone) {
+ DCHECK(CanCache());
+ DCHECK(values_->nested_count() == nested_start_);
+ return zone->New<InstructionSelector::CachedStateValues>(
+ zone, values_, values_start_, inputs_, inputs_start_);
+ }
+
+ private:
+ StateValueList* values_;
+ InstructionOperandVector* inputs_;
+ StateObjectDeduplicator* deduplicator_;
+ size_t values_start_;
+ size_t nested_start_;
+ size_t inputs_start_;
+ size_t deduplicator_start_;
+};
+
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* node,
FrameStateInputKind kind, Zone* zone) {
- size_t entries = 0;
- StateValuesAccess::iterator it = StateValuesAccess(node).begin();
- // Take advantage of sparse nature of StateValuesAccess to skip over multiple
- // empty nodes at once pushing repeated OptimizedOuts all in one go.
- while (!it.done()) {
- values->PushOptimizedOut(it.AdvanceTillNotEmpty());
- if (it.done()) break;
- StateValuesAccess::TypedNode input_node = *it;
- entries += AddOperandToStateValueDescriptor(values, inputs, g, deduplicator,
- input_node.node,
- input_node.type, kind, zone);
- ++it;
+ // StateValues are often shared across different nodes, and processing them is
+ // expensive, so cache the result of processing a StateValue so that we can
+ // quickly copy the result if we see it again.
+ FrameStateInput key(node, kind);
+ auto cache_entry = state_values_cache_.find(key);
+ if (cache_entry != state_values_cache_.end()) {
+ // Entry found in cache, emit cached version.
+ return cache_entry->second->Emit(inputs, values);
+ } else {
+ // Not found in cache, generate and then store in cache if possible.
+ size_t entries = 0;
+ CachedStateValuesBuilder cache_builder(values, inputs, deduplicator);
+ StateValuesAccess::iterator it = StateValuesAccess(node).begin();
+ // Take advantage of sparse nature of StateValuesAccess to skip over
+ // multiple empty nodes at once pushing repeated OptimizedOuts all in one
+ // go.
+ while (!it.done()) {
+ values->PushOptimizedOut(it.AdvanceTillNotEmpty());
+ if (it.done()) break;
+ StateValuesAccess::TypedNode input_node = *it;
+ entries += AddOperandToStateValueDescriptor(values, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ ++it;
+ }
+ if (cache_builder.CanCache()) {
+ // Use this->zone() to build the cache entry in the instruction selector's
+ // zone rather than the more long-lived instruction zone.
+ state_values_cache_.emplace(key, cache_builder.Build(this->zone()));
+ }
+ return entries;
}
- return entries;
}
// Returns the number of instruction operands added to inputs.
@@ -804,7 +880,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
FrameState{cont->frame_state()});
- } else if (cont->IsSet()) {
+ } else if (cont->IsSet() || cont->IsSelect()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsTrap()) {
int trap_id = static_cast<int>(cont->trap_id());
@@ -914,7 +990,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
frame_->EnsureReturnSlots(
- static_cast<int>(buffer->descriptor->StackReturnCount()));
+ static_cast<int>(buffer->descriptor->ReturnSlotCount()));
}
// Filter out the outputs that aren't live because no projection uses them.
@@ -967,6 +1043,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
+#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
@@ -979,6 +1056,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallBuiltinPointer:
// The common case for builtin pointers is to have the target in a
// register. If we have a constant, we use a register anyway to simplify
@@ -1417,7 +1495,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kRetain:
VisitRetain(node);
return;
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
@@ -1426,12 +1505,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadTransform(node);
}
- case IrOpcode::kPrefetchTemporal: {
- return VisitPrefetchTemporal(node);
- }
- case IrOpcode::kPrefetchNonTemporal: {
- return VisitPrefetchNonTemporal(node);
- }
case IrOpcode::kLoadLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
@@ -1675,6 +1748,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat32(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Min:
return MarkAsFloat32(node), VisitFloat32Min(node);
+ case IrOpcode::kFloat32Select:
+ return MarkAsFloat32(node), VisitSelect(node);
case IrOpcode::kFloat64Add:
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
@@ -1743,6 +1818,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
+ case IrOpcode::kFloat64Select:
+ return MarkAsFloat64(node), VisitSelect(node);
case IrOpcode::kFloat32RoundDown:
return MarkAsFloat32(node), VisitFloat32RoundDown(node);
case IrOpcode::kFloat64RoundDown:
@@ -1968,8 +2045,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
case IrOpcode::kF32x4Add:
return MarkAsSimd128(node), VisitF32x4Add(node);
- case IrOpcode::kF32x4AddHoriz:
- return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
case IrOpcode::kF32x4Sub:
return MarkAsSimd128(node), VisitF32x4Sub(node);
case IrOpcode::kF32x4Mul:
@@ -2058,8 +2133,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node);
case IrOpcode::kI64x2ExtMulHighI32x4U:
return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node);
- case IrOpcode::kI64x2SignSelect:
- return MarkAsSimd128(node), VisitI64x2SignSelect(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2080,8 +2153,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ShrS(node);
case IrOpcode::kI32x4Add:
return MarkAsSimd128(node), VisitI32x4Add(node);
- case IrOpcode::kI32x4AddHoriz:
- return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
case IrOpcode::kI32x4Sub:
return MarkAsSimd128(node), VisitI32x4Sub(node);
case IrOpcode::kI32x4Mul:
@@ -2128,8 +2199,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node);
case IrOpcode::kI32x4ExtMulHighI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node);
- case IrOpcode::kI32x4SignSelect:
- return MarkAsSimd128(node), VisitI32x4SignSelect(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
@@ -2162,8 +2231,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8Add(node);
case IrOpcode::kI16x8AddSatS:
return MarkAsSimd128(node), VisitI16x8AddSatS(node);
- case IrOpcode::kI16x8AddHoriz:
- return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
case IrOpcode::kI16x8SubSatS:
@@ -2218,8 +2285,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node);
case IrOpcode::kI16x8ExtMulHighI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node);
- case IrOpcode::kI16x8SignSelect:
- return MarkAsSimd128(node), VisitI16x8SignSelect(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16U:
@@ -2248,8 +2313,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Sub(node);
case IrOpcode::kI8x16SubSatS:
return MarkAsSimd128(node), VisitI8x16SubSatS(node);
- case IrOpcode::kI8x16Mul:
- return MarkAsSimd128(node), VisitI8x16Mul(node);
case IrOpcode::kI8x16MinS:
return MarkAsSimd128(node), VisitI8x16MinS(node);
case IrOpcode::kI8x16MaxS:
@@ -2286,8 +2349,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Abs(node);
case IrOpcode::kI8x16BitMask:
return MarkAsWord32(node), VisitI8x16BitMask(node);
- case IrOpcode::kI8x16SignSelect:
- return MarkAsSimd128(node), VisitI8x16SignSelect(node);
case IrOpcode::kS128Const:
return MarkAsSimd128(node), VisitS128Const(node);
case IrOpcode::kS128Zero:
@@ -2310,14 +2371,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
case IrOpcode::kV128AnyTrue:
return MarkAsWord32(node), VisitV128AnyTrue(node);
- case IrOpcode::kV64x2AllTrue:
- return MarkAsWord32(node), VisitV64x2AllTrue(node);
- case IrOpcode::kV32x4AllTrue:
- return MarkAsWord32(node), VisitV32x4AllTrue(node);
- case IrOpcode::kV16x8AllTrue:
- return MarkAsWord32(node), VisitV16x8AllTrue(node);
- case IrOpcode::kV8x16AllTrue:
- return MarkAsWord32(node), VisitV8x16AllTrue(node);
+ case IrOpcode::kI64x2AllTrue:
+ return MarkAsWord32(node), VisitI64x2AllTrue(node);
+ case IrOpcode::kI32x4AllTrue:
+ return MarkAsWord32(node), VisitI32x4AllTrue(node);
+ case IrOpcode::kI16x8AllTrue:
+ return MarkAsWord32(node), VisitI16x8AllTrue(node);
+ case IrOpcode::kI8x16AllTrue:
+ return MarkAsWord32(node), VisitI8x16AllTrue(node);
default:
FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
node->op()->mnemonic(), node->id());
@@ -2740,7 +2801,7 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
}
#endif // !V8_TARGET_ARCH_IA32
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
@@ -2752,25 +2813,7 @@ void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
-// TODO(v8:11168): Prototyping prefetch.
-void InstructionSelector::VisitPrefetchTemporal(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 || !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_ARM
-// TODO(v8:10983) Prototyping sign select.
-void InstructionSelector::VisitI8x16SignSelect(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8SignSelect(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4SignSelect(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2SignSelect(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2939,11 +2982,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallJSFunction:
opcode = EncodeCallDescriptorFlags(kArchCallJSFunction, flags);
break;
+#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
opcode = EncodeCallDescriptorFlags(kArchCallWasmFunction, flags);
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallBuiltinPointer:
opcode = EncodeCallDescriptorFlags(kArchCallBuiltinPointer, flags);
break;
@@ -3002,10 +3047,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK(!caller->IsJSFunctionCall());
opcode = kArchTailCallAddress;
break;
+#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction:
DCHECK(!caller->IsJSFunctionCall());
opcode = kArchTailCallWasm;
break;
+#endif // V8_ENABLE_WEBASSEMBLY
default:
UNREACHABLE();
}
@@ -3013,17 +3060,17 @@ void InstructionSelector::VisitTailCall(Node* node) {
Emit(kArchPrepareTailCall, g.NoOutput());
- // Add an immediate operand that represents the first slot that is unused
- // with respect to the stack pointer that has been updated for the tail call
- // instruction. This is used by backends that need to pad arguments for stack
- // alignment, in order to store an optional slot of padding above the
- // arguments.
- const int optional_padding_slot = callee->GetFirstUnusedStackSlot();
- buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
+ // Add an immediate operand that represents the offset to the first slot that
+ // is unused with respect to the stack pointer that has been updated for the
+ // tail call instruction. Backends that pad arguments can write the padding
+ // value at this offset from the stack.
+ const int optional_padding_offset =
+ callee->GetOffsetToFirstUnusedStackSlot() - 1;
+ buffer.instruction_args.push_back(g.TempImmediate(optional_padding_offset));
- const int first_unused_stack_slot =
+ const int first_unused_slot_offset =
kReturnAddressStackSlotCount + stack_param_delta;
- buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
+ buffer.instruction_args.push_back(g.TempImmediate(first_unused_slot_offset));
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
@@ -3095,6 +3142,13 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
}
}
+void InstructionSelector::VisitSelect(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSelect(kNotEqual, node,
+ node->InputAt(1), node->InputAt(2));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
OperandGenerator g(this);
DynamicCheckMapsWithDeoptUnlessNode n(node);
@@ -3275,6 +3329,7 @@ FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
GetFrameStateDescriptorInternal(zone, state.outer_frame_state());
}
+#if V8_ENABLE_WEBASSEMBLY
if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) {
auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>(
state_info.function_info());
@@ -3283,6 +3338,7 @@ FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
state_info.state_combine(), parameters, locals, stack,
state_info.shared_info(), outer_state, function_info->signature());
}
+#endif // V8_ENABLE_WEBASSEMBLY
return zone->New<FrameStateDescriptor>(
zone, state_info.type(), state_info.bailout_id(),
@@ -3301,6 +3357,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
return desc;
}
+#if V8_ENABLE_WEBASSEMBLY
void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
bool* is_swizzle) {
// Get raw shuffle indices.
@@ -3328,6 +3385,7 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
node->ReplaceInput(0, input1);
node->ReplaceInput(1, input0);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// static
bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 4a65b5193eb..8984c05c3a6 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -16,9 +16,12 @@
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
-#include "src/wasm/simd-shuffle.h"
#include "src/zone/zone-containers.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -89,6 +92,11 @@ class FlagsContinuation final {
return FlagsContinuation(condition, trap_id, result);
}
+ static FlagsContinuation ForSelect(FlagsCondition condition, Node* result,
+ Node* true_value, Node* false_value) {
+ return FlagsContinuation(condition, result, true_value, false_value);
+ }
+
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const {
return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
@@ -102,6 +110,7 @@ class FlagsContinuation final {
}
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
+ bool IsSelect() const { return mode_ == kFlags_select; }
FlagsCondition condition() const {
DCHECK(!IsNone());
return condition_;
@@ -135,7 +144,7 @@ class FlagsContinuation final {
return extra_args_count_;
}
Node* result() const {
- DCHECK(IsSet());
+ DCHECK(IsSet() || IsSelect());
return frame_state_or_result_;
}
TrapId trap_id() const {
@@ -150,6 +159,14 @@ class FlagsContinuation final {
DCHECK(IsBranch());
return false_block_;
}
+ Node* true_value() const {
+ DCHECK(IsSelect());
+ return true_value_;
+ }
+ Node* false_value() const {
+ DCHECK(IsSelect());
+ return false_value_;
+ }
void Negate() {
DCHECK(!IsNone());
@@ -241,6 +258,18 @@ class FlagsContinuation final {
DCHECK_NOT_NULL(result);
}
+ FlagsContinuation(FlagsCondition condition, Node* result,
+ Node* true_value, Node* false_value)
+ : mode_(kFlags_select),
+ condition_(condition),
+ frame_state_or_result_(result),
+ true_value_(true_value),
+ false_value_(false_value) {
+ DCHECK_NOT_NULL(result);
+ DCHECK_NOT_NULL(true_value);
+ DCHECK_NOT_NULL(false_value);
+ }
+
FlagsMode const mode_;
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
@@ -253,6 +282,8 @@ class FlagsContinuation final {
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
+ Node* true_value_; // Only valid if mode_ == kFlags_select.
+ Node* false_value_; // Only valid if mode_ == kFlags_select.
};
// This struct connects nodes of parameters which are going to be pushed on the
@@ -628,6 +659,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, FrameState frame_state);
+ void VisitSelect(Node* node);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
@@ -652,6 +684,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// ============= Vector instruction (SIMD) helper fns. =======================
// ===========================================================================
+#if V8_ENABLE_WEBASSEMBLY
// Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
// indices, and a boolean indicating if the shuffle is a swizzle (one input).
void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
@@ -659,6 +692,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// Swaps the two first input operands of the node, to help match shuffles
// to specific architectural instructions.
void SwapShuffleInputs(Node* node);
+#endif // V8_ENABLE_WEBASSEMBLY
// ===========================================================================
@@ -698,6 +732,31 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
};
#endif // V8_TARGET_ARCH_64_BIT
+ struct FrameStateInput {
+ FrameStateInput(Node* node_, FrameStateInputKind kind_)
+ : node(node_), kind(kind_) {}
+
+ Node* node;
+ FrameStateInputKind kind;
+
+ struct Hash {
+ size_t operator()(FrameStateInput const& source) const {
+ return base::hash_combine(source.node,
+ static_cast<size_t>(source.kind));
+ }
+ };
+
+ struct Equal {
+ bool operator()(FrameStateInput const& lhs,
+ FrameStateInput const& rhs) const {
+ return lhs.node == rhs.node && lhs.kind == rhs.kind;
+ }
+ };
+ };
+
+ struct CachedStateValues;
+ class CachedStateValuesBuilder;
+
// ===========================================================================
Zone* const zone_;
@@ -721,6 +780,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableScheduling enable_scheduling_;
EnableRootsRelativeAddressing enable_roots_relative_addressing_;
EnableSwitchJumpTable enable_switch_jump_table_;
+ ZoneUnorderedMap<FrameStateInput, CachedStateValues*, FrameStateInput::Hash,
+ FrameStateInput::Equal>
+ state_values_cache_;
PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index a14ae2a7029..43824e8fcb9 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -17,7 +17,10 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
#include "src/utils/ostreams.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -166,9 +169,13 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
case InstructionOperand::IMMEDIATE: {
ImmediateOperand imm = ImmediateOperand::cast(op);
switch (imm.type()) {
- case ImmediateOperand::INLINE:
- return os << "#" << imm.inline_value();
- case ImmediateOperand::INDEXED:
+ case ImmediateOperand::INLINE_INT32:
+ return os << "#" << imm.inline_int32_value();
+ case ImmediateOperand::INLINE_INT64:
+ return os << "#" << imm.inline_int64_value();
+ case ImmediateOperand::INDEXED_RPO:
+ return os << "[rpo_immediate:" << imm.indexed_value() << "]";
+ case ImmediateOperand::INDEXED_IMM:
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
@@ -413,6 +420,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os << "set";
case kFlags_trap:
return os << "trap";
+ case kFlags_select:
+ return os << "select";
}
UNREACHABLE();
}
@@ -829,6 +838,7 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
immediates_(zone()),
+ rpo_immediates_(instruction_blocks->size(), zone()),
instructions_(zone()),
next_virtual_register_(0),
reference_maps_(zone()),
@@ -1012,14 +1022,19 @@ size_t GetConservativeFrameSizeInBytes(FrameStateType type,
// The arguments adaptor frame state is only used in the deoptimizer and
// does not occupy any extra space in the stack. Check out the design doc:
// https://docs.google.com/document/d/150wGaUREaZI6YWqOQFD5l2mWQXaPbbZjcAIJLOFrzMs/edit
- return 0;
+ // We just need to account for the additional parameters we might push
+ // here.
+ return UnoptimizedFrameInfo::GetStackSizeForAdditionalArguments(
+ static_cast<int>(parameters_count));
case FrameStateType::kConstructStub: {
auto info = ConstructStubFrameInfo::Conservative(
static_cast<int>(parameters_count));
return info.frame_size_in_bytes();
}
case FrameStateType::kBuiltinContinuation:
+#if V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJSToWasmBuiltinContinuation:
+#endif // V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJavaScriptBuiltinContinuation:
case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
const RegisterConfiguration* config = RegisterConfiguration::Default();
@@ -1074,7 +1089,9 @@ size_t FrameStateDescriptor::GetHeight() const {
case FrameStateType::kUnoptimizedFunction:
return locals_count(); // The accumulator is *not* included.
case FrameStateType::kBuiltinContinuation:
+#if V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJSToWasmBuiltinContinuation:
+#endif
// Custom, non-JS calling convention (that does not have a notion of
// a receiver or context).
return parameters_count();
@@ -1126,6 +1143,7 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
return count;
}
+#if V8_ENABLE_WEBASSEMBLY
JSToWasmFrameStateDescriptor::JSToWasmFrameStateDescriptor(
Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
@@ -1135,7 +1153,8 @@ JSToWasmFrameStateDescriptor::JSToWasmFrameStateDescriptor(
: FrameStateDescriptor(zone, type, bailout_id, state_combine,
parameters_count, locals_count, stack_count,
shared_info, outer_state),
- return_type_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
+ return_kind_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
+#endif // V8_ENABLE_WEBASSEMBLY
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 9aa808491a8..88a114e08e2 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -395,7 +395,7 @@ class ConstantOperand : public InstructionOperand {
class ImmediateOperand : public InstructionOperand {
public:
- enum ImmediateType { INLINE, INDEXED };
+ enum ImmediateType { INLINE_INT32, INLINE_INT64, INDEXED_RPO, INDEXED_IMM };
explicit ImmediateOperand(ImmediateType type, int32_t value)
: InstructionOperand(IMMEDIATE) {
@@ -406,13 +406,18 @@ class ImmediateOperand : public InstructionOperand {
ImmediateType type() const { return TypeField::decode(value_); }
- int32_t inline_value() const {
- DCHECK_EQ(INLINE, type());
+ int32_t inline_int32_value() const {
+ DCHECK_EQ(INLINE_INT32, type());
+ return static_cast<int64_t>(value_) >> ValueField::kShift;
+ }
+
+ int64_t inline_int64_value() const {
+ DCHECK_EQ(INLINE_INT64, type());
return static_cast<int64_t>(value_) >> ValueField::kShift;
}
int32_t indexed_value() const {
- DCHECK_EQ(INDEXED, type());
+ DCHECK(type() == INDEXED_IMM || type() == INDEXED_RPO);
return static_cast<int64_t>(value_) >> ValueField::kShift;
}
@@ -423,7 +428,7 @@ class ImmediateOperand : public InstructionOperand {
INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE)
STATIC_ASSERT(KindField::kSize == 3);
- using TypeField = base::BitField64<ImmediateType, 3, 1>;
+ using TypeField = base::BitField64<ImmediateType, 3, 2>;
using ValueField = base::BitField64<int32_t, 32, 32>;
};
@@ -860,6 +865,7 @@ class V8_EXPORT_PRIVATE Instruction final {
FlagsCondition flags_condition() const {
return FlagsConditionField::decode(opcode());
}
+ int misc() const { return MiscField::decode(opcode()); }
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
@@ -924,7 +930,11 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
bool IsTailCall() const {
+#if V8_ENABLE_WEBASSEMBLY
return arch_opcode() <= ArchOpcode::kArchTailCallWasm;
+#else
+ return arch_opcode() <= ArchOpcode::kArchTailCallAddress;
+#endif // V8_ENABLE_WEBASSEMBLY
}
bool IsThrow() const {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
@@ -1017,6 +1027,8 @@ std::ostream& operator<<(std::ostream&, const Instruction&);
class RpoNumber final {
public:
static const int kInvalidRpoNumber = -1;
+ RpoNumber() : index_(kInvalidRpoNumber) {}
+
int ToInt() const {
DCHECK(IsValid());
return index_;
@@ -1086,8 +1098,15 @@ class V8_EXPORT_PRIVATE Constant final {
RelocInfo::Mode rmode() const { return rmode_; }
+ bool FitsInInt32() const {
+ if (type() == kInt32) return true;
+ DCHECK(type() == kInt64);
+ return value_ >= std::numeric_limits<int32_t>::min() &&
+ value_ <= std::numeric_limits<int32_t>::max();
+ }
+
int32_t ToInt32() const {
- DCHECK(type() == kInt32 || type() == kInt64);
+ DCHECK(FitsInInt32());
const int32_t value = static_cast<int32_t>(value_);
DCHECK_EQ(value_, static_cast<int64_t>(value));
return value;
@@ -1225,6 +1244,8 @@ class StateValueList {
size_t size() { return fields_.size(); }
+ size_t nested_count() { return nested_.size(); }
+
struct Value {
StateValueDescriptor* desc;
StateValueList* nested;
@@ -1266,6 +1287,14 @@ class StateValueList {
ZoneVector<StateValueList*>::iterator nested_iterator;
};
+ struct Slice {
+ Slice(ZoneVector<StateValueDescriptor>::iterator start, size_t fields)
+ : start_position(start), fields_count(fields) {}
+
+ ZoneVector<StateValueDescriptor>::iterator start_position;
+ size_t fields_count;
+ };
+
void ReserveSize(size_t size) { fields_.reserve(size); }
StateValueList* PushRecursiveField(Zone* zone, size_t id) {
@@ -1289,11 +1318,31 @@ class StateValueList {
void PushOptimizedOut(size_t num = 1) {
fields_.insert(fields_.end(), num, StateValueDescriptor::OptimizedOut());
}
+ void PushCachedSlice(const Slice& cached) {
+ fields_.insert(fields_.end(), cached.start_position,
+ cached.start_position + cached.fields_count);
+ }
+
+ // Returns a Slice representing the (non-nested) fields in StateValueList from
+ // values_start to the current end position.
+ Slice MakeSlice(size_t values_start) {
+ DCHECK(!HasNestedFieldsAfter(values_start));
+ size_t fields_count = fields_.size() - values_start;
+ return Slice(fields_.begin() + values_start, fields_count);
+ }
iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
iterator end() { return iterator(fields_.end(), nested_.end()); }
private:
+ bool HasNestedFieldsAfter(size_t values_start) {
+ auto it = fields_.begin() + values_start;
+ for (; it != fields_.end(); it++) {
+ if (it->IsNested()) return true;
+ }
+ return false;
+ }
+
ZoneVector<StateValueDescriptor> fields_;
ZoneVector<StateValueList*> nested_;
};
@@ -1319,7 +1368,9 @@ class FrameStateDescriptor : public ZoneObject {
bool HasContext() const {
return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
type_ == FrameStateType::kBuiltinContinuation ||
+#if V8_ENABLE_WEBASSEMBLY
type_ == FrameStateType::kJSToWasmBuiltinContinuation ||
+#endif // V8_ENABLE_WEBASSEMBLY
type_ == FrameStateType::kConstructStub;
}
@@ -1359,6 +1410,7 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* const outer_state_;
};
+#if V8_ENABLE_WEBASSEMBLY
class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
public:
JSToWasmFrameStateDescriptor(Zone* zone, FrameStateType type,
@@ -1370,11 +1422,12 @@ class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
FrameStateDescriptor* outer_state,
const wasm::FunctionSig* wasm_signature);
- base::Optional<wasm::ValueKind> return_type() const { return return_type_; }
+ base::Optional<wasm::ValueKind> return_kind() const { return return_kind_; }
private:
- base::Optional<wasm::ValueKind> return_type_;
+ base::Optional<wasm::ValueKind> return_kind_;
};
+#endif // V8_ENABLE_WEBASSEMBLY
// A deoptimization entry is a pair of the reason why we deoptimize and the
// frame state descriptor that we have to go back to.
@@ -1646,21 +1699,50 @@ class V8_EXPORT_PRIVATE InstructionSequence final
using Immediates = ZoneVector<Constant>;
Immediates& immediates() { return immediates_; }
+ using RpoImmediates = ZoneVector<RpoNumber>;
+ RpoImmediates& rpo_immediates() { return rpo_immediates_; }
+
ImmediateOperand AddImmediate(const Constant& constant) {
- if (constant.type() == Constant::kInt32 &&
- RelocInfo::IsNone(constant.rmode())) {
- return ImmediateOperand(ImmediateOperand::INLINE, constant.ToInt32());
+ if (RelocInfo::IsNone(constant.rmode())) {
+ if (constant.type() == Constant::kRpoNumber) {
+ // Ideally we would inline RPO numbers into the operand, however jump-
+ // threading modifies RPO values and so we indirect through a vector
+ // of rpo_immediates to enable rewriting. We keep this seperate from the
+ // immediates vector so that we don't repeatedly push the same rpo
+ // number.
+ RpoNumber rpo_number = constant.ToRpoNumber();
+ DCHECK(!rpo_immediates().at(rpo_number.ToSize()).IsValid() ||
+ rpo_immediates().at(rpo_number.ToSize()) == rpo_number);
+ rpo_immediates()[rpo_number.ToSize()] = rpo_number;
+ return ImmediateOperand(ImmediateOperand::INDEXED_RPO,
+ rpo_number.ToInt());
+ } else if (constant.type() == Constant::kInt32) {
+ return ImmediateOperand(ImmediateOperand::INLINE_INT32,
+ constant.ToInt32());
+ } else if (constant.type() == Constant::kInt64 &&
+ constant.FitsInInt32()) {
+ return ImmediateOperand(ImmediateOperand::INLINE_INT64,
+ constant.ToInt32());
+ }
}
int index = static_cast<int>(immediates_.size());
immediates_.push_back(constant);
- return ImmediateOperand(ImmediateOperand::INDEXED, index);
+ return ImmediateOperand(ImmediateOperand::INDEXED_IMM, index);
}
Constant GetImmediate(const ImmediateOperand* op) const {
switch (op->type()) {
- case ImmediateOperand::INLINE:
- return Constant(op->inline_value());
- case ImmediateOperand::INDEXED: {
+ case ImmediateOperand::INLINE_INT32:
+ return Constant(op->inline_int32_value());
+ case ImmediateOperand::INLINE_INT64:
+ return Constant(op->inline_int64_value());
+ case ImmediateOperand::INDEXED_RPO: {
+ int index = op->indexed_value();
+ DCHECK_LE(0, index);
+ DCHECK_GT(rpo_immediates_.size(), index);
+ return Constant(rpo_immediates_[index]);
+ }
+ case ImmediateOperand::INDEXED_IMM: {
int index = op->indexed_value();
DCHECK_LE(0, index);
DCHECK_GT(immediates_.size(), index);
@@ -1707,6 +1789,11 @@ class V8_EXPORT_PRIVATE InstructionSequence final
void RecomputeAssemblyOrderForTesting();
+ void IncreaseRpoForTesting(size_t rpo_count) {
+ DCHECK_GE(rpo_count, rpo_immediates().size());
+ rpo_immediates().resize(rpo_count);
+ }
+
private:
friend V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
const InstructionSequence&);
@@ -1726,6 +1813,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
SourcePositionMap source_positions_;
ConstantMap constants_;
Immediates immediates_;
+ RpoImmediates rpo_immediates_;
InstructionDeque instructions_;
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index c09274ca2f4..96a3b144a00 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -131,8 +131,8 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
// Dynamic return values might use different registers at
// different return sites and therefore cannot be shared.
if (instr->InputAt(0)->IsImmediate()) {
- int32_t return_size =
- ImmediateOperand::cast(instr->InputAt(0))->inline_value();
+ int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
+ ->inline_int32_value();
// Instructions can be shared only for blocks that share
// the same |must_deconstruct_frame| attribute.
if (block->must_deconstruct_frame()) {
@@ -243,13 +243,12 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
}
// Patch RPO immediates.
- InstructionSequence::Immediates& immediates = code->immediates();
- for (size_t i = 0; i < immediates.size(); i++) {
- Constant constant = immediates[i];
- if (constant.type() == Constant::kRpoNumber) {
- RpoNumber rpo = constant.ToRpoNumber();
+ InstructionSequence::RpoImmediates& rpo_immediates = code->rpo_immediates();
+ for (size_t i = 0; i < rpo_immediates.size(); i++) {
+ RpoNumber rpo = rpo_immediates[i];
+ if (rpo.IsValid()) {
RpoNumber fw = result[rpo.ToInt()];
- if (!(fw == rpo)) immediates[i] = Constant(fw);
+ if (fw != rpo) rpo_immediates[i] = fw;
}
}
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index e84f0d94396..394f319dc03 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -143,16 +143,6 @@ MoveOperands* MidTierRegisterAllocationData::AddPendingOperandGapMove(
return AddGapMove(instr_index, position, PendingOperand(), PendingOperand());
}
-MachineRepresentation MidTierRegisterAllocationData::RepresentationFor(
- int virtual_register) {
- if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
- return InstructionSequence::DefaultRepresentation();
- } else {
- DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
- return code()->GetRepresentation(virtual_register);
- }
-}
-
BlockState& MidTierRegisterAllocationData::block_state(RpoNumber rpo_number) {
return block_states_[rpo_number.ToInt()];
}
@@ -281,17 +271,20 @@ class VirtualRegisterData final {
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
- void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
+ void DefineAsUnallocatedOperand(int virtual_register,
+ MachineRepresentation rep, int instr_index,
bool is_deferred_block,
bool is_exceptional_call_output);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
- int virtual_register, int instr_index,
+ int virtual_register,
+ MachineRepresentation rep, int instr_index,
bool is_deferred_block,
bool is_exceptional_call_output);
- void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
+ void DefineAsConstantOperand(ConstantOperand* operand,
+ MachineRepresentation rep, int instr_index,
bool is_deferred_block);
- void DefineAsPhi(int virtual_register, int instr_index,
- bool is_deferred_block);
+ void DefineAsPhi(int virtual_register, MachineRepresentation rep,
+ int instr_index, bool is_deferred_block);
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
@@ -360,6 +353,7 @@ class VirtualRegisterData final {
void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
int vreg() const { return vreg_; }
+ MachineRepresentation rep() const { return rep_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
bool is_phi() const { return is_phi_; }
@@ -474,8 +468,9 @@ class VirtualRegisterData final {
}
private:
- void Initialize(int virtual_register, InstructionOperand* spill_operand,
- int instr_index, bool is_phi, bool is_constant,
+ void Initialize(int virtual_register, MachineRepresentation rep,
+ InstructionOperand* spill_operand, int instr_index,
+ bool is_phi, bool is_constant,
bool is_defined_in_deferred_block,
bool is_exceptional_call_output);
@@ -489,6 +484,7 @@ class VirtualRegisterData final {
int output_instr_index_;
int vreg_;
+ MachineRepresentation rep_;
bool is_phi_ : 1;
bool is_constant_ : 1;
bool is_defined_in_deferred_block_ : 1;
@@ -504,12 +500,14 @@ VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
}
void VirtualRegisterData::Initialize(int virtual_register,
+ MachineRepresentation rep,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
bool is_constant,
bool is_defined_in_deferred_block,
bool is_exceptional_call_output) {
vreg_ = virtual_register;
+ rep_ = rep;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
output_instr_index_ = instr_index;
@@ -521,29 +519,31 @@ void VirtualRegisterData::Initialize(int virtual_register,
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
+ MachineRepresentation rep,
int instr_index,
bool is_deferred_block) {
- Initialize(operand->virtual_register(), operand, instr_index, false, true,
- is_deferred_block, false);
+ Initialize(operand->virtual_register(), rep, operand, instr_index, false,
+ true, is_deferred_block, false);
}
void VirtualRegisterData::DefineAsFixedSpillOperand(
- AllocatedOperand* operand, int virtual_register, int instr_index,
- bool is_deferred_block, bool is_exceptional_call_output) {
- Initialize(virtual_register, operand, instr_index, false, false,
+ AllocatedOperand* operand, int virtual_register, MachineRepresentation rep,
+ int instr_index, bool is_deferred_block, bool is_exceptional_call_output) {
+ Initialize(virtual_register, rep, operand, instr_index, false, false,
is_deferred_block, is_exceptional_call_output);
}
void VirtualRegisterData::DefineAsUnallocatedOperand(
- int virtual_register, int instr_index, bool is_deferred_block,
- bool is_exceptional_call_output) {
- Initialize(virtual_register, nullptr, instr_index, false, false,
+ int virtual_register, MachineRepresentation rep, int instr_index,
+ bool is_deferred_block, bool is_exceptional_call_output) {
+ Initialize(virtual_register, rep, nullptr, instr_index, false, false,
is_deferred_block, is_exceptional_call_output);
}
-void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
- bool is_deferred_block) {
- Initialize(virtual_register, nullptr, instr_index, true, false,
+void VirtualRegisterData::DefineAsPhi(int virtual_register,
+ MachineRepresentation rep,
+ int instr_index, bool is_deferred_block) {
+ Initialize(virtual_register, rep, nullptr, instr_index, true, false,
is_deferred_block, false);
}
@@ -1335,24 +1335,37 @@ class SinglePassRegisterAllocator final {
// Allocation routines used to allocate a particular operand to either a
// register or a spill slot.
- void AllocateConstantOutput(ConstantOperand* operand, int instr_index);
- void AllocateOutput(UnallocatedOperand* operand, int instr_index);
- void AllocateInput(UnallocatedOperand* operand, int instr_index);
+ void AllocateConstantOutput(ConstantOperand* operand,
+ VirtualRegisterData& vreg, int instr_index);
+ void AllocateOutput(UnallocatedOperand* operand, VirtualRegisterData& vreg,
+ int instr_index);
+ void AllocateInput(UnallocatedOperand* operand, VirtualRegisterData& vreg,
+ int instr_index);
void AllocateSameInputOutput(UnallocatedOperand* output,
- UnallocatedOperand* input, int instr_index);
- void AllocateGapMoveInput(UnallocatedOperand* operand, int instr_index);
- void AllocateTemp(UnallocatedOperand* operand, int instr_index);
- void AllocatePhi(int virtual_register, const InstructionBlock* block);
- void AllocatePhiGapMove(int to_vreg, int from_vreg, int instr_index);
+ UnallocatedOperand* input,
+ VirtualRegisterData& output_vreg,
+ VirtualRegisterData& input_vreg,
+ int instr_index);
+ void AllocateGapMoveInput(UnallocatedOperand* operand,
+ VirtualRegisterData& vreg, int instr_index);
+ void AllocateTemp(UnallocatedOperand* operand, int virtual_register,
+ MachineRepresentation rep, int instr_index);
+ void AllocatePhi(VirtualRegisterData& virtual_register,
+ const InstructionBlock* block);
+ void AllocatePhiGapMove(VirtualRegisterData& to_vreg,
+ VirtualRegisterData& from_vreg, int instr_index);
// Reserve any fixed registers for the operands on an instruction before doing
// allocation on the operands.
void ReserveFixedInputRegister(const UnallocatedOperand* operand,
- int instr_index);
+ int virtual_register,
+ MachineRepresentation rep, int instr_index);
void ReserveFixedTempRegister(const UnallocatedOperand* operand,
+ int virtual_register, MachineRepresentation rep,
int instr_index);
void ReserveFixedOutputRegister(const UnallocatedOperand* operand,
- int instr_index);
+ int virtual_register,
+ MachineRepresentation rep, int instr_index);
// Spills all registers that are currently holding data, for example, due to
// an instruction that clobbers all registers.
@@ -1367,7 +1380,7 @@ class SinglePassRegisterAllocator final {
void UpdateForDeferredBlock(int instr_index);
void AllocateDeferredBlockSpillOutput(int instr_index,
RpoNumber deferred_block,
- int virtual_register);
+ VirtualRegisterData& virtual_register);
RegisterKind kind() const { return kind_; }
BitVector* assigned_registers() const { return assigned_registers_; }
@@ -1404,8 +1417,8 @@ class SinglePassRegisterAllocator final {
// Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
// on entry to a |successor| block.
void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to,
- int virtual_register, RpoNumber successor,
- RegisterState* succ_state);
+ VirtualRegisterData& virtual_register,
+ RpoNumber successor, RegisterState* succ_state);
// Update the virtual register data with the data in register_state()
void UpdateVirtualRegisterState();
@@ -1417,26 +1430,29 @@ class SinglePassRegisterAllocator final {
// Allocate |reg| to |virtual_register| for |operand| of the instruction at
// |instr_index|. The register will be reserved for this use for the specified
// |pos| use position.
- void AllocateUse(RegisterIndex reg, int virtual_register,
+ void AllocateUse(RegisterIndex reg, VirtualRegisterData& virtual_register,
InstructionOperand* operand, int instr_index,
UsePosition pos);
// Allocate |reg| to |virtual_register| as a pending use (i.e., only if the
// register is not subsequently spilled) for |operand| of the instruction at
// |instr_index|.
- void AllocatePendingUse(RegisterIndex reg, int virtual_register,
+ void AllocatePendingUse(RegisterIndex reg,
+ VirtualRegisterData& virtual_register,
InstructionOperand* operand, bool can_be_constant,
int instr_index);
// Allocate |operand| to |reg| and add a gap move to move |virtual_register|
// to this register for the instruction at |instr_index|. |reg| will be
// reserved for this use for the specified |pos| use position.
- void AllocateUseWithMove(RegisterIndex reg, int virtual_register,
+ void AllocateUseWithMove(RegisterIndex reg,
+ VirtualRegisterData& virtual_register,
UnallocatedOperand* operand, int instr_index,
UsePosition pos);
void CommitRegister(RegisterIndex reg, int virtual_register,
- InstructionOperand* operand, UsePosition pos);
+ MachineRepresentation rep, InstructionOperand* operand,
+ UsePosition pos);
void SpillRegister(RegisterIndex reg);
void SpillRegisterForVirtualRegister(int virtual_register);
@@ -1447,11 +1463,13 @@ class SinglePassRegisterAllocator final {
// Returns an AllocatedOperand corresponding to the use of |reg| for
// |virtual_register|.
AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
- int virtual_register);
+ MachineRepresentation rep);
- void ReserveFixedRegister(const UnallocatedOperand* operand, int instr_index,
- UsePosition pos);
- RegisterIndex AllocateOutput(UnallocatedOperand* operand, int instr_index,
+ void ReserveFixedRegister(const UnallocatedOperand* operand,
+ int virtual_register, MachineRepresentation rep,
+ int instr_index, UsePosition pos);
+ RegisterIndex AllocateOutput(UnallocatedOperand* operand,
+ VirtualRegisterData& vreg_data, int instr_index,
UsePosition pos);
void EmitGapMoveFromOutput(InstructionOperand from, InstructionOperand to,
int instr_index);
@@ -1473,8 +1491,9 @@ class SinglePassRegisterAllocator final {
// Assign, free and mark use's of |reg| for a |virtual_register| at use
// position |pos|.
V8_INLINE void AssignRegister(RegisterIndex reg, int virtual_register,
- UsePosition pos);
- V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register);
+ MachineRepresentation rep, UsePosition pos);
+ V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register,
+ MachineRepresentation rep);
V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep,
UsePosition pos);
V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos);
@@ -1509,10 +1528,6 @@ class SinglePassRegisterAllocator final {
return data()->VirtualRegisterDataFor(virtual_register);
}
- MachineRepresentation RepresentationFor(int virtual_register) const {
- return data()->RepresentationFor(virtual_register);
- }
-
int num_allocatable_registers() const { return num_allocatable_registers_; }
const InstructionBlock* current_block() const { return current_block_; }
MidTierRegisterAllocationData* data() const { return data_; }
@@ -1681,7 +1696,9 @@ void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
// We will update the register state when starting the next block.
while (!allocated_registers_bits_.IsEmpty()) {
RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
- FreeRegister(reg, VirtualRegisterForRegister(reg));
+ VirtualRegisterData& vreg_data =
+ data()->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
+ FreeRegister(reg, vreg_data.vreg(), vreg_data.rep());
}
current_block_ = nullptr;
register_state_ = nullptr;
@@ -1726,7 +1743,9 @@ void SinglePassRegisterAllocator::MergeStateFrom(
int virtual_register =
successor_registers->VirtualRegisterForRegister(reg);
- MachineRepresentation rep = RepresentationFor(virtual_register);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(virtual_register);
+ MachineRepresentation rep = vreg_data.rep();
// If we have already processed |reg|, e.g., adding gap move to that
// register, then we can continue.
@@ -1753,7 +1772,7 @@ void SinglePassRegisterAllocator::MergeStateFrom(
}
if (new_reg.is_valid()) {
- MoveRegisterOnMerge(new_reg, reg, virtual_register, successor,
+ MoveRegisterOnMerge(new_reg, reg, vreg_data, successor,
successor_registers);
processed_regs.Add(new_reg, rep);
} else {
@@ -1772,7 +1791,7 @@ void SinglePassRegisterAllocator::MergeStateFrom(
// Register is free in our current register state, so merge the
// successor block's register details into it.
register_state()->CopyFrom(reg, successor_registers);
- AssignRegister(reg, virtual_register, UsePosition::kNone);
+ AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
}
}
}
@@ -1785,8 +1804,9 @@ RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
RegisterBitVector allocated_regs;
for (RegisterIndex reg : *reg_state) {
if (reg_state->IsAllocated(reg)) {
- int virtual_register = reg_state->VirtualRegisterForRegister(reg);
- allocated_regs.Add(reg, RepresentationFor(virtual_register));
+ VirtualRegisterData virtual_register =
+ VirtualRegisterDataFor(reg_state->VirtualRegisterForRegister(reg));
+ allocated_regs.Add(reg, virtual_register.rep());
}
}
return allocated_regs;
@@ -1797,18 +1817,20 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
DCHECK_NE(reg_state, register_state());
if (reg_state->IsAllocated(reg)) {
int virtual_register = reg_state->VirtualRegisterForRegister(reg);
- AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ VirtualRegisterData& vreg_data =
+ data()->VirtualRegisterDataFor(virtual_register);
+ AllocatedOperand allocated = AllocatedOperandForReg(reg, vreg_data.rep());
reg_state->Spill(reg, allocated, current_block(), data());
}
}
void SinglePassRegisterAllocator::MoveRegisterOnMerge(
- RegisterIndex from, RegisterIndex to, int virtual_register,
+ RegisterIndex from, RegisterIndex to, VirtualRegisterData& virtual_register,
RpoNumber successor, RegisterState* succ_state) {
int instr_index = data()->GetBlock(successor)->first_instruction_index();
MoveOperands* move =
data()->AddPendingOperandGapMove(instr_index, Instruction::START);
- succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
+ succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register.rep()),
&move->destination(), data());
AllocatePendingUse(from, virtual_register, &move->source(), true,
instr_index);
@@ -1822,7 +1844,9 @@ void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
register_state()->ResetIfSpilledWhileShared(reg);
int virtual_register = VirtualRegisterForRegister(reg);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
- AssignRegister(reg, virtual_register, UsePosition::kNone);
+ MachineRepresentation rep =
+ data()->VirtualRegisterDataFor(virtual_register).rep();
+ AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
}
}
CheckConsistency();
@@ -1837,7 +1861,7 @@ void SinglePassRegisterAllocator::CheckConsistency() {
if (reg.is_valid()) {
CHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
CHECK(allocated_registers_bits_.Contains(
- reg, RepresentationFor(virtual_register)));
+ reg, VirtualRegisterDataFor(virtual_register).rep()));
}
}
@@ -1846,7 +1870,7 @@ void SinglePassRegisterAllocator::CheckConsistency() {
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
CHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
CHECK(allocated_registers_bits_.Contains(
- reg, RepresentationFor(virtual_register)));
+ reg, VirtualRegisterDataFor(virtual_register).rep()));
}
}
#endif
@@ -1916,8 +1940,8 @@ void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg,
int virtual_register,
+ MachineRepresentation rep,
UsePosition pos) {
- MachineRepresentation rep = RepresentationFor(virtual_register);
assigned_registers()->Add(ToRegCode(reg, rep));
allocated_registers_bits_.Add(reg, rep);
MarkRegisterUse(reg, rep, pos);
@@ -1938,8 +1962,9 @@ void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg,
}
void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
- int virtual_register) {
- allocated_registers_bits_.Clear(reg, RepresentationFor(virtual_register));
+ int virtual_register,
+ MachineRepresentation rep) {
+ allocated_registers_bits_.Clear(reg, rep);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid();
}
@@ -1949,7 +1974,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
DCHECK_NE(pos, UsePosition::kNone);
- MachineRepresentation rep = RepresentationFor(virtual_register.vreg());
+ MachineRepresentation rep = virtual_register.rep();
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
@@ -2084,14 +2109,15 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
void SinglePassRegisterAllocator::CommitRegister(RegisterIndex reg,
int virtual_register,
+ MachineRepresentation rep,
InstructionOperand* operand,
UsePosition pos) {
// Committing the output operation, and mark the register use in this
// instruction, then mark it as free going forward.
- AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
register_state()->Commit(reg, allocated, operand, data());
- MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
- FreeRegister(reg, virtual_register);
+ MarkRegisterUse(reg, rep, pos);
+ FreeRegister(reg, virtual_register, rep);
CheckConsistency();
}
@@ -2100,9 +2126,10 @@ void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) {
// Spill the register and free register.
int virtual_register = VirtualRegisterForRegister(reg);
- AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep();
+ AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
register_state()->Spill(reg, allocated, current_block(), data());
- FreeRegister(reg, virtual_register);
+ FreeRegister(reg, virtual_register, rep);
}
void SinglePassRegisterAllocator::SpillAllRegisters() {
@@ -2127,97 +2154,94 @@ void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
// Committing the output operation, and mark the register use in this
// instruction, then mark it as free going forward.
if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
- int virtual_register = VirtualRegisterForRegister(reg);
- AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ VirtualRegisterData& virtual_register =
+ data()->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
+ AllocatedOperand allocated =
+ AllocatedOperandForReg(reg, virtual_register.rep());
register_state()->SpillForDeferred(reg, allocated, instr_index, data());
- FreeRegister(reg, virtual_register);
+ FreeRegister(reg, virtual_register.vreg(), virtual_register.rep());
}
CheckConsistency();
}
void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
- int instr_index, RpoNumber deferred_block, int virtual_register) {
+ int instr_index, RpoNumber deferred_block,
+ VirtualRegisterData& virtual_register) {
DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
- VirtualRegisterData& vreg_data =
- data()->VirtualRegisterDataFor(virtual_register);
// TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(vreg_data.HasSpillRange());
- if (!vreg_data.NeedsSpillAtOutput() &&
- !DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
+ CHECK(virtual_register.HasSpillRange());
+ if (!virtual_register.NeedsSpillAtOutput() &&
+ !DefinedAfter(virtual_register.vreg(), instr_index, UsePosition::kEnd)) {
// If a register has been assigned to the virtual register, and the virtual
// register still doesn't need to be spilled at it's output, and add a
// pending move to output the virtual register to it's spill slot on entry
// of the deferred block (to avoid spilling on in non-deferred code).
// TODO(rmcilroy): Consider assigning a register even if the virtual
// register isn't yet assigned - currently doing this regresses performance.
- RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
+ RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
if (reg.is_valid()) {
int deferred_block_start =
data()->GetBlock(deferred_block)->first_instruction_index();
- register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register,
+ register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register.vreg(),
deferred_block_start, data());
return;
} else {
- vreg_data.MarkAsNeedsSpillAtOutput();
+ virtual_register.MarkAsNeedsSpillAtOutput();
}
}
}
AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
- RegisterIndex reg, int virtual_register) {
- MachineRepresentation rep = RepresentationFor(virtual_register);
+ RegisterIndex reg, MachineRepresentation rep) {
return AllocatedOperand(AllocatedOperand::REGISTER, rep, ToRegCode(reg, rep));
}
-void SinglePassRegisterAllocator::AllocateUse(RegisterIndex reg,
- int virtual_register,
- InstructionOperand* operand,
- int instr_index,
- UsePosition pos) {
- DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
- DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
+void SinglePassRegisterAllocator::AllocateUse(
+ RegisterIndex reg, VirtualRegisterData& virtual_register,
+ InstructionOperand* operand, int instr_index, UsePosition pos) {
+ DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg()));
- AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ AllocatedOperand allocated =
+ AllocatedOperandForReg(reg, virtual_register.rep());
register_state()->Commit(reg, allocated, operand, data());
- register_state()->AllocateUse(reg, virtual_register, operand, instr_index,
- data());
- AssignRegister(reg, virtual_register, pos);
+ register_state()->AllocateUse(reg, virtual_register.vreg(), operand,
+ instr_index, data());
+ AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(), pos);
CheckConsistency();
}
void SinglePassRegisterAllocator::AllocatePendingUse(
- RegisterIndex reg, int virtual_register, InstructionOperand* operand,
- bool can_be_constant, int instr_index) {
- DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
- DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
+ RegisterIndex reg, VirtualRegisterData& virtual_register,
+ InstructionOperand* operand, bool can_be_constant, int instr_index) {
+ DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg()));
- register_state()->AllocatePendingUse(reg, virtual_register, operand,
+ register_state()->AllocatePendingUse(reg, virtual_register.vreg(), operand,
can_be_constant, instr_index);
// Since this is a pending use and the operand doesn't need to use a register,
// allocate with UsePosition::kNone to avoid blocking it's use by other
// operands in this instruction.
- AssignRegister(reg, virtual_register, UsePosition::kNone);
+ AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(),
+ UsePosition::kNone);
CheckConsistency();
}
void SinglePassRegisterAllocator::AllocateUseWithMove(
- RegisterIndex reg, int virtual_register, UnallocatedOperand* operand,
- int instr_index, UsePosition pos) {
- AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register);
- UnallocatedOperand from = UnallocatedOperand(
- UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
+ RegisterIndex reg, VirtualRegisterData& virtual_register,
+ UnallocatedOperand* operand, int instr_index, UsePosition pos) {
+ AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register.rep());
+ UnallocatedOperand from =
+ UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ virtual_register.vreg());
data()->AddGapMove(instr_index, Instruction::END, from, to);
InstructionOperand::ReplaceWith(operand, &to);
- MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
+ MarkRegisterUse(reg, virtual_register.rep(), pos);
CheckConsistency();
}
-void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
- int instr_index) {
+void SinglePassRegisterAllocator::AllocateInput(
+ UnallocatedOperand* operand, VirtualRegisterData& virtual_register,
+ int instr_index) {
EnsureRegisterState();
- int virtual_register = operand->virtual_register();
- MachineRepresentation rep = RepresentationFor(virtual_register);
- VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
// Spill slot policy operands.
if (operand->HasFixedSlotPolicy()) {
@@ -2230,16 +2254,19 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
// the instruction (at the gap move). For now spilling is fine since
// fixed slot inputs are uncommon.
UnallocatedOperand input_copy(
- UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
- AllocatedOperand allocated = AllocatedOperand(
- AllocatedOperand::STACK_SLOT, rep, operand->fixed_slot_index());
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ virtual_register.vreg());
+ AllocatedOperand allocated =
+ AllocatedOperand(AllocatedOperand::STACK_SLOT, virtual_register.rep(),
+ operand->fixed_slot_index());
InstructionOperand::ReplaceWith(operand, &allocated);
MoveOperands* move_op =
data()->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
- vreg_data.SpillOperand(&move_op->source(), instr_index, true, data());
+ virtual_register.SpillOperand(&move_op->source(), instr_index, true,
+ data());
return;
} else if (operand->HasSlotPolicy()) {
- vreg_data.SpillOperand(operand, instr_index, false, data());
+ virtual_register.SpillOperand(operand, instr_index, false, data());
return;
}
@@ -2249,8 +2276,9 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
if (operand->HasFixedRegisterPolicy() ||
operand->HasFixedFPRegisterPolicy()) {
// With a fixed register operand, we must use that register.
- RegisterIndex reg = FromRegCode(operand->fixed_register_index(), rep);
- if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
+ RegisterIndex reg =
+ FromRegCode(operand->fixed_register_index(), virtual_register.rep());
+ if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(), reg)) {
// If the virtual register is already in a different register, then just
// add a gap move from that register to the fixed register.
AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos);
@@ -2260,8 +2288,8 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
}
} else {
bool must_use_register = operand->HasRegisterPolicy();
- RegisterIndex reg =
- ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
+ RegisterIndex reg = ChooseRegisterFor(virtual_register, instr_index, pos,
+ must_use_register);
if (reg.is_valid()) {
if (must_use_register) {
@@ -2272,38 +2300,34 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
instr_index);
}
} else {
- vreg_data.SpillOperand(operand, instr_index,
- operand->HasRegisterOrSlotOrConstantPolicy(),
- data());
+ virtual_register.SpillOperand(
+ operand, instr_index, operand->HasRegisterOrSlotOrConstantPolicy(),
+ data());
}
}
}
void SinglePassRegisterAllocator::AllocateGapMoveInput(
- UnallocatedOperand* operand, int instr_index) {
+ UnallocatedOperand* operand, VirtualRegisterData& vreg_data,
+ int instr_index) {
EnsureRegisterState();
- int virtual_register = operand->virtual_register();
- VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
-
// Gap move inputs should be unconstrained.
DCHECK(operand->HasRegisterOrSlotOrConstantPolicy());
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
- AllocatePendingUse(reg, virtual_register, operand, true, instr_index);
+ AllocatePendingUse(reg, vreg_data, operand, true, instr_index);
} else {
vreg_data.SpillOperand(operand, instr_index, true, data());
}
}
void SinglePassRegisterAllocator::AllocateConstantOutput(
- ConstantOperand* operand, int instr_index) {
+ ConstantOperand* operand, VirtualRegisterData& vreg_data, int instr_index) {
EnsureRegisterState();
// If the constant is allocated to a register, spill it now to add the
// necessary gap moves from the constant operand to the register.
- int virtual_register = operand->virtual_register();
- VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
- SpillRegisterForVirtualRegister(virtual_register);
+ SpillRegisterForVirtualRegister(vreg_data.vreg());
if (vreg_data.NeedsSpillAtOutput()) {
vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block(),
instr_index, data());
@@ -2311,15 +2335,16 @@ void SinglePassRegisterAllocator::AllocateConstantOutput(
}
void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand,
+ VirtualRegisterData& vreg_data,
int instr_index) {
- AllocateOutput(operand, instr_index, UsePosition::kEnd);
+ AllocateOutput(operand, vreg_data, instr_index, UsePosition::kEnd);
}
RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
- UnallocatedOperand* operand, int instr_index, UsePosition pos) {
+ UnallocatedOperand* operand, VirtualRegisterData& vreg_data,
+ int instr_index, UsePosition pos) {
EnsureRegisterState();
- int virtual_register = operand->virtual_register();
- VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
+ int virtual_register = vreg_data.vreg();
RegisterIndex reg;
if (operand->HasSlotPolicy() || operand->HasFixedSlotPolicy()) {
@@ -2328,8 +2353,7 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
SpillRegisterForVirtualRegister(virtual_register);
reg = RegisterIndex::Invalid();
} else if (operand->HasFixedPolicy()) {
- reg = FromRegCode(operand->fixed_register_index(),
- RepresentationFor(virtual_register));
+ reg = FromRegCode(operand->fixed_register_index(), vreg_data.rep());
} else {
reg = ChooseRegisterFor(vreg_data, instr_index, pos,
operand->HasRegisterPolicy());
@@ -2347,10 +2371,10 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register);
// Don't mark |existing_reg| as used in this instruction, since it is used
// in the (already allocated) following instruction's gap-move.
- CommitRegister(existing_reg, virtual_register, &move_output_to,
- UsePosition::kNone);
+ CommitRegister(existing_reg, vreg_data.vreg(), vreg_data.rep(),
+ &move_output_to, UsePosition::kNone);
}
- CommitRegister(reg, virtual_register, operand, pos);
+ CommitRegister(reg, vreg_data.vreg(), vreg_data.rep(), operand, pos);
if (move_output_to.IsAllocated()) {
// Emit a move from output to the register that the |virtual_register| was
// allocated to.
@@ -2369,16 +2393,19 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
}
void SinglePassRegisterAllocator::AllocateSameInputOutput(
- UnallocatedOperand* output, UnallocatedOperand* input, int instr_index) {
+ UnallocatedOperand* output, UnallocatedOperand* input,
+ VirtualRegisterData& output_vreg_data, VirtualRegisterData& input_vreg_data,
+ int instr_index) {
EnsureRegisterState();
- int input_vreg = input->virtual_register();
- int output_vreg = output->virtual_register();
+ int input_vreg = input_vreg_data.vreg();
+ int output_vreg = output_vreg_data.vreg();
// The input operand has the details of the register constraints, so replace
// the output operand with a copy of the input, with the output's vreg.
UnallocatedOperand output_as_input(*input, output_vreg);
InstructionOperand::ReplaceWith(output, &output_as_input);
- RegisterIndex reg = AllocateOutput(output, instr_index, UsePosition::kAll);
+ RegisterIndex reg =
+ AllocateOutput(output, output_vreg_data, instr_index, UsePosition::kAll);
if (reg.is_valid()) {
// Replace the input operand with an unallocated fixed register policy for
@@ -2387,7 +2414,7 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
kind() == RegisterKind::kGeneral
? UnallocatedOperand::FIXED_REGISTER
: UnallocatedOperand::FIXED_FP_REGISTER;
- MachineRepresentation rep = RepresentationFor(input_vreg);
+ MachineRepresentation rep = input_vreg_data.rep();
UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg);
InstructionOperand::ReplaceWith(input, &fixed_input);
same_input_output_registers_bits_.Add(reg, rep);
@@ -2397,7 +2424,6 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
// register's spill slot. As such, spill this input operand using the output
// virtual register's spill slot, then add a gap-move to move the input
// value into this spill slot.
- VirtualRegisterData& output_vreg_data = VirtualRegisterDataFor(output_vreg);
output_vreg_data.SpillOperand(input, instr_index, false, data());
// Add an unconstrained gap move for the input virtual register.
@@ -2411,26 +2437,26 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
}
void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
+ int virtual_register,
+ MachineRepresentation rep,
int instr_index) {
EnsureRegisterState();
- int virtual_register = operand->virtual_register();
RegisterIndex reg;
DCHECK(!operand->HasFixedSlotPolicy());
if (operand->HasSlotPolicy()) {
reg = RegisterIndex::Invalid();
} else if (operand->HasFixedRegisterPolicy() ||
operand->HasFixedFPRegisterPolicy()) {
- reg = FromRegCode(operand->fixed_register_index(),
- RepresentationFor(virtual_register));
+ reg = FromRegCode(operand->fixed_register_index(), rep);
} else {
- reg = ChooseRegisterFor(RepresentationFor(virtual_register),
- UsePosition::kAll, operand->HasRegisterPolicy());
+ reg =
+ ChooseRegisterFor(rep, UsePosition::kAll, operand->HasRegisterPolicy());
}
if (reg.is_valid()) {
DCHECK(virtual_register == InstructionOperand::kInvalidVirtualRegister ||
VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg));
- CommitRegister(reg, virtual_register, operand, UsePosition::kAll);
+ CommitRegister(reg, virtual_register, rep, operand, UsePosition::kAll);
} else {
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
vreg_data.SpillOperand(operand, instr_index,
@@ -2451,27 +2477,31 @@ bool SinglePassRegisterAllocator::DefinedAfter(int virtual_register,
}
void SinglePassRegisterAllocator::ReserveFixedInputRegister(
- const UnallocatedOperand* operand, int instr_index) {
+ const UnallocatedOperand* operand, int virtual_register,
+ MachineRepresentation rep, int instr_index) {
ReserveFixedRegister(
- operand, instr_index,
+ operand, virtual_register, rep, instr_index,
operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll);
}
void SinglePassRegisterAllocator::ReserveFixedTempRegister(
- const UnallocatedOperand* operand, int instr_index) {
- ReserveFixedRegister(operand, instr_index, UsePosition::kAll);
+ const UnallocatedOperand* operand, int virtual_register,
+ MachineRepresentation rep, int instr_index) {
+ ReserveFixedRegister(operand, virtual_register, rep, instr_index,
+ UsePosition::kAll);
}
void SinglePassRegisterAllocator::ReserveFixedOutputRegister(
- const UnallocatedOperand* operand, int instr_index) {
- ReserveFixedRegister(operand, instr_index, UsePosition::kEnd);
+ const UnallocatedOperand* operand, int virtual_register,
+ MachineRepresentation rep, int instr_index) {
+ ReserveFixedRegister(operand, virtual_register, rep, instr_index,
+ UsePosition::kEnd);
}
void SinglePassRegisterAllocator::ReserveFixedRegister(
- const UnallocatedOperand* operand, int instr_index, UsePosition pos) {
+ const UnallocatedOperand* operand, int virtual_register,
+ MachineRepresentation rep, int instr_index, UsePosition pos) {
EnsureRegisterState();
- int virtual_register = operand->virtual_register();
- MachineRepresentation rep = RepresentationFor(virtual_register);
RegisterIndex reg = FromRegCode(operand->fixed_register_index(), rep);
if (!IsFreeOrSameVirtualRegister(reg, virtual_register) &&
!DefinedAfter(virtual_register, instr_index, pos)) {
@@ -2483,11 +2513,12 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
MarkRegisterUse(reg, rep, pos);
}
-void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
- int instr_index) {
+void SinglePassRegisterAllocator::AllocatePhiGapMove(
+ VirtualRegisterData& to_vreg, VirtualRegisterData& from_vreg,
+ int instr_index) {
EnsureRegisterState();
- RegisterIndex from_register = RegisterForVirtualRegister(from_vreg);
- RegisterIndex to_register = RegisterForVirtualRegister(to_vreg);
+ RegisterIndex from_register = RegisterForVirtualRegister(from_vreg.vreg());
+ RegisterIndex to_register = RegisterForVirtualRegister(to_vreg.vreg());
// If to_register isn't marked as a phi gap move, we can't use it as such.
if (to_register.is_valid() && !register_state()->IsPhiGapMove(to_register)) {
@@ -2499,7 +2530,8 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
// virtual register isn't allocated, then commit this register and
// re-allocate it to the |from| virtual register.
InstructionOperand operand;
- CommitRegister(to_register, to_vreg, &operand, UsePosition::kAll);
+ CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), &operand,
+ UsePosition::kAll);
AllocateUse(to_register, from_vreg, &operand, instr_index,
UsePosition::kAll);
} else {
@@ -2511,28 +2543,27 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
// Commit the |to| side to either a register or the pending spills.
if (to_register.is_valid()) {
- CommitRegister(to_register, to_vreg, to_operand, UsePosition::kAll);
+ CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), to_operand,
+ UsePosition::kAll);
} else {
- VirtualRegisterDataFor(to_vreg).SpillOperand(to_operand, instr_index,
- true, data());
+ to_vreg.SpillOperand(to_operand, instr_index, true, data());
}
// The from side is unconstrained.
UnallocatedOperand unconstrained_input(
- UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg);
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg.vreg());
InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
}
}
-void SinglePassRegisterAllocator::AllocatePhi(int virtual_register,
- const InstructionBlock* block) {
- VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
- if (vreg_data.NeedsSpillAtOutput() || block->IsLoopHeader()) {
+void SinglePassRegisterAllocator::AllocatePhi(
+ VirtualRegisterData& virtual_register, const InstructionBlock* block) {
+ if (virtual_register.NeedsSpillAtOutput() || block->IsLoopHeader()) {
// If the Phi needs to be spilled, just spill here directly so that all
// gap moves into the Phi move into the spill slot.
- SpillRegisterForVirtualRegister(virtual_register);
+ SpillRegisterForVirtualRegister(virtual_register.vreg());
} else {
- RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
+ RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
if (reg.is_valid()) {
// If the register is valid, assign it as a phi gap move to be processed
// at the successor blocks. If no register or spill slot was used then
@@ -2563,7 +2594,9 @@ class MidTierOutputProcessor final {
return data()->VirtualRegisterDataFor(virtual_register);
}
MachineRepresentation RepresentationFor(int virtual_register) const {
- return data()->RepresentationFor(virtual_register);
+ DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
+ DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
+ return code()->GetRepresentation(virtual_register);
}
bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
@@ -2677,30 +2710,32 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
if (output->IsConstant()) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
int virtual_register = constant_operand->virtual_register();
+ MachineRepresentation rep = RepresentationFor(virtual_register);
VirtualRegisterDataFor(virtual_register)
- .DefineAsConstantOperand(constant_operand, index, is_deferred);
+ .DefineAsConstantOperand(constant_operand, rep, index, is_deferred);
} else {
DCHECK(output->IsUnallocated());
UnallocatedOperand* unallocated_operand =
UnallocatedOperand::cast(output);
int virtual_register = unallocated_operand->virtual_register();
+ MachineRepresentation rep = RepresentationFor(virtual_register);
bool is_exceptional_call_output =
instr->IsCallWithDescriptorFlags() &&
instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
if (unallocated_operand->HasFixedSlotPolicy()) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
- MachineRepresentation rep = RepresentationFor(virtual_register);
AllocatedOperand* fixed_spill_operand =
AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
- index, is_deferred,
+ rep, index, is_deferred,
is_exceptional_call_output);
} else {
VirtualRegisterDataFor(virtual_register)
- .DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
+ .DefineAsUnallocatedOperand(virtual_register, rep, index,
+ is_deferred,
is_exceptional_call_output);
}
}
@@ -2716,8 +2751,9 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
// Define phi output operands.
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
+ MachineRepresentation rep = RepresentationFor(virtual_register);
VirtualRegisterDataFor(virtual_register)
- .DefineAsPhi(virtual_register, block->first_instruction_index(),
+ .DefineAsPhi(virtual_register, rep, block->first_instruction_index(),
is_deferred);
}
}
@@ -2758,15 +2794,10 @@ class MidTierRegisterAllocator final {
void ReserveFixedRegisters(int instr_index);
SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
- SinglePassRegisterAllocator& AllocatorFor(const UnallocatedOperand* operand);
- SinglePassRegisterAllocator& AllocatorFor(const ConstantOperand* operand);
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
return data()->VirtualRegisterDataFor(virtual_register);
}
- MachineRepresentation RepresentationFor(int virtual_register) const {
- return data()->RepresentationFor(virtual_register);
- }
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
@@ -2801,9 +2832,11 @@ void MidTierRegisterAllocator::AllocateRegisters(
DCHECK_GT(successor, block_rpo);
for (const int virtual_register :
*data()->block_state(successor).deferred_blocks_region()) {
- AllocatorFor(RepresentationFor(virtual_register))
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(virtual_register);
+ AllocatorFor(vreg_data.rep())
.AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
- successor, virtual_register);
+ successor, vreg_data);
}
}
}
@@ -2825,23 +2858,32 @@ void MidTierRegisterAllocator::AllocateRegisters(
DCHECK(!output->IsAllocated());
if (output->IsConstant()) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
- AllocatorFor(constant_operand)
- .AllocateConstantOutput(constant_operand, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(constant_operand->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .AllocateConstantOutput(constant_operand, vreg_data, instr_index);
} else {
UnallocatedOperand* unallocated_output =
UnallocatedOperand::cast(output);
+ VirtualRegisterData& output_vreg_data =
+ VirtualRegisterDataFor(unallocated_output->virtual_register());
+
if (unallocated_output->HasSameAsInputPolicy()) {
DCHECK_EQ(i, 0);
UnallocatedOperand* unallocated_input =
UnallocatedOperand::cast(instr->InputAt(0));
- DCHECK_EQ(AllocatorFor(unallocated_input).kind(),
- AllocatorFor(unallocated_output).kind());
- AllocatorFor(unallocated_output)
+ VirtualRegisterData& input_vreg_data =
+ VirtualRegisterDataFor(unallocated_input->virtual_register());
+ DCHECK_EQ(AllocatorFor(output_vreg_data.rep()).kind(),
+ AllocatorFor(input_vreg_data.rep()).kind());
+ AllocatorFor(output_vreg_data.rep())
.AllocateSameInputOutput(unallocated_output, unallocated_input,
+ output_vreg_data, input_vreg_data,
instr_index);
} else {
- AllocatorFor(unallocated_output)
- .AllocateOutput(unallocated_output, instr_index);
+ AllocatorFor(output_vreg_data.rep())
+ .AllocateOutput(unallocated_output, output_vreg_data,
+ instr_index);
}
}
}
@@ -2856,7 +2898,12 @@ void MidTierRegisterAllocator::AllocateRegisters(
// Allocate temporaries.
for (size_t i = 0; i < instr->TempCount(); i++) {
UnallocatedOperand* temp = UnallocatedOperand::cast(instr->TempAt(i));
- AllocatorFor(temp).AllocateTemp(temp, instr_index);
+ int virtual_register = temp->virtual_register();
+ MachineRepresentation rep =
+ virtual_register == InstructionOperand::kInvalidVirtualRegister
+ ? InstructionSequence::DefaultRepresentation()
+ : code()->GetRepresentation(virtual_register);
+ AllocatorFor(rep).AllocateTemp(temp, virtual_register, rep, instr_index);
}
// Allocate inputs that are used across the whole instruction.
@@ -2864,7 +2911,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
if (!instr->InputAt(i)->IsUnallocated()) continue;
UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
if (input->IsUsedAtStart()) continue;
- AllocatorFor(input).AllocateInput(input, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(input->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .AllocateInput(input, vreg_data, instr_index);
}
// Then allocate inputs that are only used at the start of the instruction.
@@ -2872,7 +2922,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
if (!instr->InputAt(i)->IsUnallocated()) continue;
UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
DCHECK(input->IsUsedAtStart());
- AllocatorFor(input).AllocateInput(input, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(input->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .AllocateInput(input, vreg_data, instr_index);
}
// If we are allocating for the last instruction in the block, allocate any
@@ -2896,7 +2949,10 @@ void MidTierRegisterAllocator::AllocateRegisters(
if (move->source().IsUnallocated()) {
UnallocatedOperand* source =
UnallocatedOperand::cast(&move->source());
- AllocatorFor(source).AllocateGapMoveInput(source, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(source->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .AllocateGapMoveInput(source, vreg_data, instr_index);
}
}
}
@@ -2927,16 +2983,6 @@ SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
}
}
-SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
- const UnallocatedOperand* operand) {
- return AllocatorFor(RepresentationFor(operand->virtual_register()));
-}
-
-SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
- const ConstantOperand* operand) {
- return AllocatorFor(RepresentationFor(operand->virtual_register()));
-}
-
bool MidTierRegisterAllocator::IsFixedRegisterPolicy(
const UnallocatedOperand* operand) {
return operand->HasFixedRegisterPolicy() ||
@@ -2955,7 +3001,11 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
operand = UnallocatedOperand::cast(instr->InputAt(i));
}
if (IsFixedRegisterPolicy(operand)) {
- AllocatorFor(operand).ReserveFixedOutputRegister(operand, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(operand->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .ReserveFixedOutputRegister(operand, vreg_data.vreg(),
+ vreg_data.rep(), instr_index);
}
}
for (size_t i = 0; i < instr->TempCount(); i++) {
@@ -2963,7 +3013,13 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
const UnallocatedOperand* operand =
UnallocatedOperand::cast(instr->TempAt(i));
if (IsFixedRegisterPolicy(operand)) {
- AllocatorFor(operand).ReserveFixedTempRegister(operand, instr_index);
+ int virtual_register = operand->virtual_register();
+ MachineRepresentation rep =
+ virtual_register == InstructionOperand::kInvalidVirtualRegister
+ ? InstructionSequence::DefaultRepresentation()
+ : code()->GetRepresentation(virtual_register);
+ AllocatorFor(rep).ReserveFixedTempRegister(operand, virtual_register, rep,
+ instr_index);
}
}
for (size_t i = 0; i < instr->InputCount(); i++) {
@@ -2971,7 +3027,11 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
const UnallocatedOperand* operand =
UnallocatedOperand::cast(instr->InputAt(i));
if (IsFixedRegisterPolicy(operand)) {
- AllocatorFor(operand).ReserveFixedInputRegister(operand, instr_index);
+ VirtualRegisterData& vreg_data =
+ VirtualRegisterDataFor(operand->virtual_register());
+ AllocatorFor(vreg_data.rep())
+ .ReserveFixedInputRegister(operand, vreg_data.vreg(), vreg_data.rep(),
+ instr_index);
}
}
}
@@ -2995,19 +3055,21 @@ void MidTierRegisterAllocator::AllocatePhiGapMoves(
const InstructionBlock* successor = data()->GetBlock(block->successors()[0]);
for (PhiInstruction* phi : successor->phis()) {
- int to_vreg = phi->virtual_register();
- int from_vreg = phi->operands()[successors_phi_index];
+ VirtualRegisterData& to_vreg =
+ VirtualRegisterDataFor(phi->virtual_register());
+ VirtualRegisterData& from_vreg =
+ VirtualRegisterDataFor(phi->operands()[successors_phi_index]);
- MachineRepresentation rep = RepresentationFor(to_vreg);
- AllocatorFor(rep).AllocatePhiGapMove(to_vreg, from_vreg, instr_index);
+ AllocatorFor(to_vreg.rep())
+ .AllocatePhiGapMove(to_vreg, from_vreg, instr_index);
}
}
void MidTierRegisterAllocator::AllocatePhis(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
- int virtual_register = phi->virtual_register();
- MachineRepresentation rep = RepresentationFor(virtual_register);
- AllocatorFor(rep).AllocatePhi(virtual_register, block);
+ VirtualRegisterData& virtual_register =
+ VirtualRegisterDataFor(phi->virtual_register());
+ AllocatorFor(virtual_register.rep()).AllocatePhi(virtual_register, block);
}
}
@@ -3145,8 +3207,7 @@ void MidTierSpillSlotAllocator::Allocate(
DCHECK(virtual_register->HasPendingSpillOperand());
VirtualRegisterData::SpillRange* spill_range =
virtual_register->spill_range();
- MachineRepresentation rep =
- data()->RepresentationFor(virtual_register->vreg());
+ MachineRepresentation rep = virtual_register->rep();
int byte_width = ByteWidthForStackSlot(rep);
Range live_range = spill_range->live_range();
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
index 24401150956..28981458582 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
@@ -44,7 +44,6 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
}
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register);
- MachineRepresentation RepresentationFor(int virtual_register);
// Add a gap move between the given operands |from| and |to|.
MoveOperands* AddGapMove(int instr_index, Instruction::GapPosition position,
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 79e8836bd09..4066ba77e80 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -12,7 +12,10 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -150,9 +153,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -174,12 +180,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -196,7 +204,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
Zone* zone_;
};
@@ -566,15 +576,15 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -657,6 +667,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -669,6 +680,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
@@ -684,18 +708,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
- if (instr->InputAt(0)->IsImmediate()) {
- Constant constant = i.ToConstant(instr->InputAt(0));
- Address wasm_code = static_cast<Address>(constant.ToInt32());
- __ Jump(wasm_code, constant.rmode());
- } else {
- __ Jump(i.InputRegister(0));
- }
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
@@ -761,16 +773,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
// from start_call to return address.
int offset = __ root_array_available() ? 68 : 80;
+#endif // V8_ENABLE_WEBASSEMBLY
#if V8_HOST_ARCH_MIPS
if (__ emit_debug_code()) {
offset += 16;
}
#endif
+
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
// Put the return address in a stack slot.
__ mov(kScratchReg, ra);
@@ -781,6 +797,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
__ mov(ra, kScratchReg);
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -788,10 +806,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -3006,12 +3027,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kMipsI8x16MaxS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3162,7 +3177,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMipsV64x2AllTrue: {
+ case kMipsI64x2AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3173,7 +3188,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsV32x4AllTrue: {
+ case kMipsI32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3184,7 +3199,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsV16x8AllTrue: {
+ case kMipsI16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3195,7 +3210,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsV8x16AllTrue: {
+ case kMipsI8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3635,38 +3650,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
- case kMipsF32x4AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
- __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
- __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
- __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
- __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
- case kMipsI32x4AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ hadd_s_d(kSimd128ScratchReg, src0, src0);
- __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
- __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
- case kMipsI16x8AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ hadd_s_w(kSimd128ScratchReg, src0, src0);
- __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
- __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -3843,6 +3826,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -3869,8 +3853,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -3895,6 +3878,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -4065,6 +4049,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
});
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
@@ -4092,10 +4081,15 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ StubPrologue(StackFrame::C_WASM_ENTRY);
// Reserve stack space for saving the c_entry_fp later.
__ Subu(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ Push(ra, fp);
__ mov(fp, sp);
@@ -4104,6 +4098,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue();
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4122,6 +4117,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Subu(sp, sp, Operand(kSystemPointerSize));
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
@@ -4147,6 +4143,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4178,6 +4175,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
const int returns = frame()->GetReturnSlotCount();
@@ -4228,12 +4226,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
MipsOperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4243,12 +4241,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
}
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4273,10 +4271,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ Addu(t0, t0, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
- __ li(kScratchReg, parameter_count);
+ if (parameter_slots > 1) {
+ __ li(kScratchReg, parameter_slots);
__ slt(kScratchReg2, t0, kScratchReg);
__ movn(t0, kScratchReg, kScratchReg2);
}
@@ -4285,10 +4283,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
+ __ Drop(parameter_slots + additional_count);
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
__ sll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Addu(sp, sp, pop_reg);
}
@@ -4330,11 +4328,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (RelocInfo::IsWasmReference(src.rmode())) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
__ li(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
__ li(dst, Operand(src.ToInt32()));
- }
break;
case Constant::kFloat32:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 2048cbfe403..40f1ef3e98b 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -139,7 +139,6 @@ namespace compiler {
V(MipsI32x4ExtractLane) \
V(MipsI32x4ReplaceLane) \
V(MipsI32x4Add) \
- V(MipsI32x4AddHoriz) \
V(MipsI32x4Sub) \
V(MipsF64x2Abs) \
V(MipsF64x2Neg) \
@@ -209,7 +208,6 @@ namespace compiler {
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4Add) \
- V(MipsF32x4AddHoriz) \
V(MipsF32x4Sub) \
V(MipsF32x4Mul) \
V(MipsF32x4Div) \
@@ -253,7 +251,6 @@ namespace compiler {
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
V(MipsI16x8AddSatS) \
- V(MipsI16x8AddHoriz) \
V(MipsI16x8Sub) \
V(MipsI16x8SubSatS) \
V(MipsI16x8Mul) \
@@ -290,7 +287,6 @@ namespace compiler {
V(MipsI8x16AddSatS) \
V(MipsI8x16Sub) \
V(MipsI8x16SubSatS) \
- V(MipsI8x16Mul) \
V(MipsI8x16MaxS) \
V(MipsI8x16MinS) \
V(MipsI8x16Eq) \
@@ -314,10 +310,10 @@ namespace compiler {
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
- V(MipsV64x2AllTrue) \
- V(MipsV32x4AllTrue) \
- V(MipsV16x8AllTrue) \
- V(MipsV8x16AllTrue) \
+ V(MipsI64x2AllTrue) \
+ V(MipsI32x4AllTrue) \
+ V(MipsI16x8AllTrue) \
+ V(MipsI8x16AllTrue) \
V(MipsV128AnyTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 291f0630536..0b8a022014e 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -89,7 +89,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI64x2ExtMulHighI32x4U:
case kMipsF32x4Abs:
case kMipsF32x4Add:
- case kMipsF32x4AddHoriz:
case kMipsF32x4Eq:
case kMipsF32x4ExtractLane:
case kMipsF32x4Le:
@@ -135,7 +134,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsFloorWD:
case kMipsFloorWS:
case kMipsI16x8Add:
- case kMipsI16x8AddHoriz:
case kMipsI16x8AddSatS:
case kMipsI16x8AddSatU:
case kMipsI16x8Eq:
@@ -179,7 +177,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4ExtAddPairwiseI16x8S:
case kMipsI32x4ExtAddPairwiseI16x8U:
case kMipsI32x4Add:
- case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
case kMipsI32x4ExtractLane:
case kMipsI32x4GeS:
@@ -229,7 +226,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16MaxU:
case kMipsI8x16MinS:
case kMipsI8x16MinU:
- case kMipsI8x16Mul:
case kMipsI8x16Ne:
case kMipsI8x16Neg:
case kMipsI8x16ReplaceLane:
@@ -288,10 +284,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS16x8InterleaveRight:
case kMipsS16x8PackEven:
case kMipsS16x8PackOdd:
- case kMipsV64x2AllTrue:
- case kMipsV32x4AllTrue:
- case kMipsV16x8AllTrue:
- case kMipsV8x16AllTrue:
+ case kMipsI64x2AllTrue:
+ case kMipsI32x4AllTrue:
+ case kMipsI16x8AllTrue:
+ case kMipsI8x16AllTrue:
case kMipsV128AnyTrue:
case kMipsS32x4InterleaveEven:
case kMipsS32x4InterleaveLeft:
@@ -1391,10 +1387,14 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// in an empirical way.
switch (instr->arch_opcode()) {
case kArchCallCodeObject:
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction:
+#endif // V8_ENABLE_WEBASSEMBLY
return CallLatency();
case kArchTailCallCodeObject:
+#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallAddress:
return JumpLatency();
case kArchCallJSFunction: {
@@ -1531,7 +1531,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ShrPairLatency();
} else {
// auto immediate_operand = ImmediateOperand::cast(instr->InputAt(2));
- // return ShrPairLatency(false, immediate_operand->inline_value());
+ // return ShrPairLatency(false, immediate_operand->inline_32_value());
return 1;
}
}
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index be8c17ad9cc..dcea4c85660 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -1121,7 +1121,7 @@ void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
- ImmediateOperand(ImmediateOperand::INLINE, 0),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
g.UseRegister(node->InputAt(0)));
}
@@ -1349,7 +1349,7 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Possibly align stack here for functions.
- int push_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
@@ -2169,10 +2169,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Popcnt, kMipsI8x16Popcnt) \
V(I8x16BitMask, kMipsI8x16BitMask) \
V(S128Not, kMipsS128Not) \
- V(V64x2AllTrue, kMipsV64x2AllTrue) \
- V(V32x4AllTrue, kMipsV32x4AllTrue) \
- V(V16x8AllTrue, kMipsV16x8AllTrue) \
- V(V8x16AllTrue, kMipsV8x16AllTrue) \
+ V(I64x2AllTrue, kMipsI64x2AllTrue) \
+ V(I32x4AllTrue, kMipsI32x4AllTrue) \
+ V(I16x8AllTrue, kMipsI16x8AllTrue) \
+ V(I8x16AllTrue, kMipsI8x16AllTrue) \
V(V128AnyTrue, kMipsV128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -2212,7 +2212,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \
V(F32x4Add, kMipsF32x4Add) \
- V(F32x4AddHoriz, kMipsF32x4AddHoriz) \
V(F32x4Sub, kMipsF32x4Sub) \
V(F32x4Mul, kMipsF32x4Mul) \
V(F32x4Div, kMipsF32x4Div) \
@@ -2223,7 +2222,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Lt, kMipsF32x4Lt) \
V(F32x4Le, kMipsF32x4Le) \
V(I32x4Add, kMipsI32x4Add) \
- V(I32x4AddHoriz, kMipsI32x4AddHoriz) \
V(I32x4Sub, kMipsI32x4Sub) \
V(I32x4Mul, kMipsI32x4Mul) \
V(I32x4MaxS, kMipsI32x4MaxS) \
@@ -2245,7 +2243,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSatS, kMipsI16x8AddSatS) \
V(I16x8AddSatU, kMipsI16x8AddSatU) \
- V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
V(I16x8Sub, kMipsI16x8Sub) \
V(I16x8SubSatS, kMipsI16x8SubSatS) \
V(I16x8SubSatU, kMipsI16x8SubSatU) \
@@ -2275,7 +2272,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Sub, kMipsI8x16Sub) \
V(I8x16SubSatS, kMipsI8x16SubSatS) \
V(I8x16SubSatU, kMipsI8x16SubSatU) \
- V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16MinS, kMipsI8x16MinS) \
V(I8x16MaxU, kMipsI8x16MaxU) \
@@ -2356,6 +2352,7 @@ void InstructionSelector::VisitS128Select(Node* node) {
VisitRRRR(this, kMipsS128Select, node);
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
struct ShuffleEntry {
@@ -2463,6 +2460,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
MipsOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index d6e720b6de1..6edb1539439 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -13,7 +13,10 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -152,9 +155,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -175,12 +181,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -197,7 +205,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
Zone* zone_;
};
@@ -529,15 +539,15 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -621,6 +631,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -634,6 +645,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ daddiu(kScratchReg, i.InputRegister(0), 0);
+ __ Jump(kScratchReg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
@@ -649,19 +674,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
- if (instr->InputAt(0)->IsImmediate()) {
- Constant constant = i.ToConstant(instr->InputAt(0));
- Address wasm_code = static_cast<Address>(constant.ToInt64());
- __ Jump(wasm_code, constant.rmode());
- } else {
- __ daddiu(kScratchReg, i.InputRegister(0), 0);
- __ Jump(kScratchReg);
- }
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
@@ -726,16 +738,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
// from start_call to return address.
int offset = __ root_array_available() ? 76 : 88;
+#endif // V8_ENABLE_WEBASSEMBLY
#if V8_HOST_ARCH_MIPS64
if (__ emit_debug_code()) {
offset += 16;
}
#endif
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
// Put the return address in a stack slot.
__ mov(kScratchReg, ra);
@@ -746,6 +761,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
__ mov(ra, kScratchReg);
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -753,11 +769,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
RecordSafepoint(instr->reference_map());
}
-
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -3163,12 +3180,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kMips64I8x16MaxS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3325,7 +3336,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMips64V64x2AllTrue: {
+ case kMips64I64x2AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3336,7 +3347,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64V32x4AllTrue: {
+ case kMips64I32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3347,7 +3358,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64V16x8AllTrue: {
+ case kMips64I16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3358,7 +3369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64V8x16AllTrue: {
+ case kMips64I8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3804,38 +3815,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pckev_b(dst, dst, kSimd128ScratchReg);
break;
}
- case kMips64F32x4AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
- __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
- __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
- __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
- __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
- case kMips64I32x4AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ hadd_s_d(kSimd128ScratchReg, src0, src0);
- __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
- __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
- case kMips64I16x8AddHoriz: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ hadd_s_w(kSimd128ScratchReg, src0, src0);
- __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
- __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
- break;
- }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -4045,6 +4024,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -4070,8 +4050,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
@@ -4096,6 +4075,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -4282,6 +4262,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
});
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
@@ -4306,10 +4291,15 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ StubPrologue(StackFrame::C_WASM_ENTRY);
// Reserve stack space for saving the c_entry_fp later.
__ Dsubu(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ Push(ra, fp);
__ mov(fp, sp);
@@ -4318,6 +4308,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue();
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4336,6 +4327,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Dsubu(sp, sp, Operand(kSystemPointerSize));
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
@@ -4361,6 +4353,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4392,6 +4385,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
const int returns = frame()->GetReturnSlotCount();
@@ -4444,12 +4438,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
MipsOperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4460,12 +4454,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4489,10 +4483,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ Daddu(t0, t0, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
- __ li(kScratchReg, parameter_count);
+ if (parameter_slots > 1) {
+ __ li(kScratchReg, parameter_slots);
__ slt(kScratchReg2, t0, kScratchReg);
__ movn(t0, kScratchReg, kScratchReg2);
}
@@ -4500,10 +4494,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ Daddu(sp, sp, t0);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
+ __ Drop(parameter_slots + additional_count);
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
__ dsll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Daddu(sp, sp, pop_reg);
}
@@ -4551,11 +4545,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
- if (RelocInfo::IsWasmReference(src.rmode())) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
__ li(dst, Operand(src.ToInt64(), src.rmode()));
- } else {
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
__ li(dst, Operand(src.ToInt64()));
- }
break;
case Constant::kFloat64:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index a6bed82ea85..e1b40a4be58 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -171,7 +171,6 @@ namespace compiler {
V(Mips64I32x4ExtractLane) \
V(Mips64I32x4ReplaceLane) \
V(Mips64I32x4Add) \
- V(Mips64I32x4AddHoriz) \
V(Mips64I32x4Sub) \
V(Mips64F64x2Abs) \
V(Mips64F64x2Neg) \
@@ -242,7 +241,6 @@ namespace compiler {
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
- V(Mips64F32x4AddHoriz) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Div) \
@@ -281,7 +279,6 @@ namespace compiler {
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
V(Mips64I16x8AddSatS) \
- V(Mips64I16x8AddHoriz) \
V(Mips64I16x8Sub) \
V(Mips64I16x8SubSatS) \
V(Mips64I16x8Mul) \
@@ -312,7 +309,6 @@ namespace compiler {
V(Mips64I8x16AddSatS) \
V(Mips64I8x16Sub) \
V(Mips64I8x16SubSatS) \
- V(Mips64I8x16Mul) \
V(Mips64I8x16MaxS) \
V(Mips64I8x16MinS) \
V(Mips64I8x16Eq) \
@@ -336,10 +332,10 @@ namespace compiler {
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64V64x2AllTrue) \
- V(Mips64V32x4AllTrue) \
- V(Mips64V16x8AllTrue) \
- V(Mips64V8x16AllTrue) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
V(Mips64V128AnyTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 6baff2905ee..3b0e5b85fb4 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -116,7 +116,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64ExtAddPairwise:
case kMips64F32x4Abs:
case kMips64F32x4Add:
- case kMips64F32x4AddHoriz:
case kMips64F32x4Eq:
case kMips64F32x4ExtractLane:
case kMips64F32x4Lt:
@@ -165,7 +164,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64FloorWD:
case kMips64FloorWS:
case kMips64I16x8Add:
- case kMips64I16x8AddHoriz:
case kMips64I16x8AddSatS:
case kMips64I16x8AddSatU:
case kMips64I16x8Eq:
@@ -203,7 +201,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8BitMask:
case kMips64I16x8Q15MulRSatS:
case kMips64I32x4Add:
- case kMips64I32x4AddHoriz:
case kMips64I32x4Eq:
case kMips64I32x4ExtractLane:
case kMips64I32x4GeS:
@@ -248,7 +245,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16MaxU:
case kMips64I8x16MinS:
case kMips64I8x16MinU:
- case kMips64I8x16Mul:
case kMips64I8x16Ne:
case kMips64I8x16Neg:
case kMips64I8x16ReplaceLane:
@@ -304,10 +300,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64V64x2AllTrue:
- case kMips64V32x4AllTrue:
- case kMips64V16x8AllTrue:
- case kMips64V8x16AllTrue:
+ case kMips64I64x2AllTrue:
+ case kMips64I32x4AllTrue:
+ case kMips64I16x8AllTrue:
+ case kMips64I8x16AllTrue:
case kMips64V128AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
@@ -1294,10 +1290,14 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// in empirical way.
switch (instr->arch_opcode()) {
case kArchCallCodeObject:
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction:
+#endif // V8_ENABLE_WEBASSEMBLY
return CallLatency();
case kArchTailCallCodeObject:
+#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallAddress:
return JumpLatency();
case kArchCallJSFunction: {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index f704a03af83..b0b3fec8f38 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -1487,7 +1487,8 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
case IrOpcode::kUint32Mod:
case IrOpcode::kUint32MulHigh:
return true;
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
if (load_rep.IsUnsigned()) {
switch (load_rep.representation()) {
@@ -1595,7 +1596,7 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
- ImmediateOperand(ImmediateOperand::INLINE, 0),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
g.UseRegister(node->InputAt(0)));
}
@@ -1768,7 +1769,7 @@ void InstructionSelector::EmitPrepareArguments(
++slot;
}
} else {
- int push_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
@@ -2903,10 +2904,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Popcnt, kMips64I8x16Popcnt) \
V(I8x16BitMask, kMips64I8x16BitMask) \
V(S128Not, kMips64S128Not) \
- V(V64x2AllTrue, kMips64V64x2AllTrue) \
- V(V32x4AllTrue, kMips64V32x4AllTrue) \
- V(V16x8AllTrue, kMips64V16x8AllTrue) \
- V(V8x16AllTrue, kMips64V8x16AllTrue) \
+ V(I64x2AllTrue, kMips64I64x2AllTrue) \
+ V(I32x4AllTrue, kMips64I32x4AllTrue) \
+ V(I16x8AllTrue, kMips64I16x8AllTrue) \
+ V(I8x16AllTrue, kMips64I8x16AllTrue) \
V(V128AnyTrue, kMips64V128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -2942,7 +2943,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2GtS, kMips64I64x2GtS) \
V(I64x2GeS, kMips64I64x2GeS) \
V(F32x4Add, kMips64F32x4Add) \
- V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
V(F32x4Sub, kMips64F32x4Sub) \
V(F32x4Mul, kMips64F32x4Mul) \
V(F32x4Div, kMips64F32x4Div) \
@@ -2953,7 +2953,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Lt, kMips64F32x4Lt) \
V(F32x4Le, kMips64F32x4Le) \
V(I32x4Add, kMips64I32x4Add) \
- V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
V(I32x4Sub, kMips64I32x4Sub) \
V(I32x4Mul, kMips64I32x4Mul) \
V(I32x4MaxS, kMips64I32x4MaxS) \
@@ -2970,7 +2969,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Add, kMips64I16x8Add) \
V(I16x8AddSatS, kMips64I16x8AddSatS) \
V(I16x8AddSatU, kMips64I16x8AddSatU) \
- V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
V(I16x8Sub, kMips64I16x8Sub) \
V(I16x8SubSatS, kMips64I16x8SubSatS) \
V(I16x8SubSatU, kMips64I16x8SubSatU) \
@@ -2995,7 +2993,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Sub, kMips64I8x16Sub) \
V(I8x16SubSatS, kMips64I8x16SubSatS) \
V(I8x16SubSatU, kMips64I8x16SubSatU) \
- V(I8x16Mul, kMips64I8x16Mul) \
V(I8x16MaxS, kMips64I8x16MaxS) \
V(I8x16MinS, kMips64I8x16MinS) \
V(I8x16MaxU, kMips64I8x16MaxU) \
@@ -3092,6 +3089,7 @@ void InstructionSelector::VisitS128Select(Node* node) {
VisitRRRR(this, kMips64S128Select, node);
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
struct ShuffleEntry {
@@ -3204,6 +3202,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
Mips64OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 4e5393bd22e..334d318f02e 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -13,8 +13,11 @@
#include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -40,6 +43,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
+ case kFlags_select:
return SetRC;
case kFlags_none:
return LeaveRC;
@@ -140,10 +144,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -192,9 +199,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -215,7 +224,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
@@ -746,13 +757,13 @@ void AdjustStackPointerForTailCall(
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
+ first_unused_slot_offset)) {
PPCOperandConverter g(this, instr);
ZoneVector<Register> pending_pushes(zone());
for (auto move : pushes) {
@@ -778,13 +789,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, nullptr, false);
+ first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -871,6 +882,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
@@ -890,24 +902,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObject: {
- if (HasRegisterInput(instr, 0)) {
- Register reg = i.InputRegister(0);
- DCHECK_IMPLIES(
- instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
- reg == kJavaScriptCallCodeStartRegister);
- __ JumpCodeObject(reg);
- } else {
- // We cannot use the constant pool to load the target since
- // we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
- __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
- }
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
case kArchTailCallWasm: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
@@ -927,6 +921,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (HasRegisterInput(instr, 0)) {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ } else {
+ // We cannot use the constant pool to load the target since
+ // we've already restored the caller's frame.
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
@@ -1021,6 +1034,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset += 2 * kInstrSize;
}
#endif
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
__ mflr(r0);
__ bind(&start_call);
@@ -1030,6 +1044,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
__ mtlr(r0);
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters, has_function_descriptor);
@@ -1043,10 +1058,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// counted from where we are binding to the label and ends at this spot.
// If failed, replace it with the correct offset suggested. More info on
// f5ab7d3.
+#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1719,10 +1736,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Slot-sized arguments are never padded but there may be a gap if
// the slot allocator reclaimed other padding slots. Adjust the stack
// here to skip any gap.
- if (slots > pushed_slots) {
- __ addi(sp, sp,
- Operand(-((slots - pushed_slots) * kSystemPointerSize)));
- }
+ __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
switch (rep) {
case MachineRepresentation::kFloat32:
__ StoreSingleU(i.InputDoubleRegister(1),
@@ -2050,6 +2064,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
+ case kPPC_LoadReverseSimd128RR: {
+ __ xxbrq(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kPPC_StoreWord8:
ASSEMBLE_STORE_INTEGER(stb, stbx);
break;
@@ -2365,26 +2383,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_F32x4AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
- constexpr int shift_bits = 32;
- // generate first operand
- __ vpkudum(dst, src1, src0);
- // generate second operand
- __ li(ip, Operand(shift_bits));
- __ mtvsrd(tempFPReg2, ip);
- __ vspltb(tempFPReg2, tempFPReg2, Operand(7));
- __ vsro(tempFPReg1, src0, tempFPReg2);
- __ vsro(tempFPReg2, src1, tempFPReg2);
- __ vpkudum(kScratchSimd128Reg, tempFPReg2, tempFPReg1);
- // add the operands
- __ vaddfp(dst, kScratchSimd128Reg, dst);
- break;
- }
case kPPC_F32x4Sub: {
__ vsubfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2437,16 +2435,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_I32x4AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vsum2sws(dst, src0, kScratchSimd128Reg);
- __ vsum2sws(kScratchSimd128Reg, src1, kScratchSimd128Reg);
- __ vpkudum(dst, kScratchSimd128Reg, dst);
- break;
- }
case kPPC_I32x4Sub: {
__ vsubuwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2462,16 +2450,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vsum4shs(dst, src0, kScratchSimd128Reg);
- __ vsum4shs(kScratchSimd128Reg, src1, kScratchSimd128Reg);
- __ vpkuwus(dst, kScratchSimd128Reg, dst);
- break;
- }
case kPPC_I16x8Sub: {
__ vsubuhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2501,20 +2479,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16Mul: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- __ vmuleub(kScratchSimd128Reg, src0, src1);
- __ vmuloub(i.OutputSimd128Register(), src0, src1);
- __ xxspltib(tempFPReg1, Operand(8));
- __ vslh(kScratchSimd128Reg, kScratchSimd128Reg, tempFPReg1);
- __ vslh(dst, dst, tempFPReg1);
- __ vsrh(dst, dst, tempFPReg1);
- __ vor(dst, kScratchSimd128Reg, dst);
- break;
- }
case kPPC_I64x2MinS: {
__ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2964,6 +2928,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vadduwm(i.OutputSimd128Register(), kScratchSimd128Reg, tempFPReg1);
break;
}
+ case kPPC_I64x2Abs: {
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register src = i.InputSimd128Register(0);
+ constexpr int shift_bits = 63;
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vspltb(kScratchSimd128Reg, kScratchSimd128Reg, Operand(7));
+ __ vsrad(kScratchSimd128Reg, src, kScratchSimd128Reg);
+ __ vxor(tempFPReg1, src, kScratchSimd128Reg);
+ __ vsubudm(i.OutputSimd128Register(), tempFPReg1, kScratchSimd128Reg);
+ break;
+ }
case kPPC_I32x4Abs: {
Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
Simd128Register src = i.InputSimd128Register(0);
@@ -3021,12 +2997,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
+ constexpr uint8_t fxm = 0x2; // field mask.
constexpr int bit_number = 24;
__ li(r0, Operand(0));
__ li(ip, Operand(1));
// Check if both lanes are 0, if so then return false.
__ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ mtcrf(0xFF, r0); // Clear cr.
+ __ mtcrf(r0, fxm); // Clear cr6.
__ vcmpequd(kScratchSimd128Reg, src, kScratchSimd128Reg, SetRC);
__ isel(dst, r0, ip, bit_number);
break;
@@ -3034,27 +3011,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#define SIMD_ALL_TRUE(opcode) \
Simd128Register src = i.InputSimd128Register(0); \
Register dst = i.OutputRegister(); \
+ constexpr uint8_t fxm = 0x2; /* field mask. */ \
constexpr int bit_number = 24; \
__ li(r0, Operand(0)); \
__ li(ip, Operand(1)); \
/* Check if all lanes > 0, if not then return false.*/ \
__ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg); \
- __ mtcrf(0xFF, r0); /* Clear cr.*/ \
+ __ mtcrf(r0, fxm); /* Clear cr6.*/ \
__ opcode(kScratchSimd128Reg, src, kScratchSimd128Reg, SetRC); \
__ isel(dst, ip, r0, bit_number);
- case kPPC_V64x2AllTrue: {
+ case kPPC_I64x2AllTrue: {
SIMD_ALL_TRUE(vcmpgtud)
break;
}
- case kPPC_V32x4AllTrue: {
+ case kPPC_I32x4AllTrue: {
SIMD_ALL_TRUE(vcmpgtuw)
break;
}
- case kPPC_V16x8AllTrue: {
+ case kPPC_I16x8AllTrue: {
SIMD_ALL_TRUE(vcmpgtuh)
break;
}
- case kPPC_V8x16AllTrue: {
+ case kPPC_I8x16AllTrue: {
SIMD_ALL_TRUE(vcmpgtub)
break;
}
@@ -3686,7 +3664,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ li(kScratchReg, Operand(1));
__ mtvsrd(kScratchSimd128Reg, kScratchReg);
__ vsplth(kScratchSimd128Reg, kScratchSimd128Reg, Operand(3));
- EXT_ADD_PAIRWISE(vmulesh, vmulesh, vadduwm)
+ EXT_ADD_PAIRWISE(vmulesh, vmulosh, vadduwm)
break;
}
case kPPC_I32x4ExtAddPairwiseI16x8U: {
@@ -3696,7 +3674,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ li(kScratchReg, Operand(1));
__ mtvsrd(kScratchSimd128Reg, kScratchReg);
__ vsplth(kScratchSimd128Reg, kScratchSimd128Reg, Operand(3));
- EXT_ADD_PAIRWISE(vmuleuh, vmuleuh, vadduwm)
+ EXT_ADD_PAIRWISE(vmuleuh, vmulouh, vadduwm)
break;
}
@@ -3705,7 +3683,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register();
Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
__ xxspltib(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmulesb, vmulesb, vadduhm)
+ EXT_ADD_PAIRWISE(vmulesb, vmulosb, vadduhm)
break;
}
case kPPC_I16x8ExtAddPairwiseI8x16U: {
@@ -3713,7 +3691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register();
Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
__ xxspltib(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmuleub, vmuleub, vadduhm)
+ EXT_ADD_PAIRWISE(vmuleub, vmuloub, vadduhm)
break;
}
#undef EXT_ADD_PAIRWISE
@@ -3723,31 +3701,151 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchSimd128Reg);
break;
}
-#define SIGN_SELECT(compare_gt) \
- Simd128Register src0 = i.InputSimd128Register(0); \
- Simd128Register src1 = i.InputSimd128Register(1); \
- Simd128Register src2 = i.InputSimd128Register(2); \
- Simd128Register dst = i.OutputSimd128Register(); \
- __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg); \
- __ compare_gt(kScratchSimd128Reg, kScratchSimd128Reg, src2); \
- __ vsel(dst, src1, src0, kScratchSimd128Reg);
- case kPPC_I8x16SignSelect: {
- SIGN_SELECT(vcmpgtsb)
+#define EXT_MUL(mul_even, mul_odd) \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ __ mul_even(dst, src0, src1); \
+ __ mul_odd(kScratchSimd128Reg, src0, src1);
+ case kPPC_I64x2ExtMulLowI32x4S: {
+ constexpr int lane_width_in_bytes = 8;
+ EXT_MUL(vmulesw, vmulosw)
+ __ vextractd(dst, dst, Operand(1 * lane_width_in_bytes));
+ __ vextractd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ break;
+ }
+ case kPPC_I64x2ExtMulHighI32x4S: {
+ constexpr int lane_width_in_bytes = 8;
+ EXT_MUL(vmulesw, vmulosw)
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ break;
+ }
+ case kPPC_I64x2ExtMulLowI32x4U: {
+ constexpr int lane_width_in_bytes = 8;
+ EXT_MUL(vmuleuw, vmulouw)
+ __ vextractd(dst, dst, Operand(1 * lane_width_in_bytes));
+ __ vextractd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ break;
+ }
+ case kPPC_I64x2ExtMulHighI32x4U: {
+ constexpr int lane_width_in_bytes = 8;
+ EXT_MUL(vmuleuw, vmulouw)
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ break;
+ }
+ case kPPC_I32x4ExtMulLowI16x8S: {
+ EXT_MUL(vmulesh, vmulosh)
+ __ vmrglw(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I32x4ExtMulHighI16x8S: {
+ EXT_MUL(vmulesh, vmulosh)
+ __ vmrghw(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I32x4ExtMulLowI16x8U: {
+ EXT_MUL(vmuleuh, vmulouh)
+ __ vmrglw(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I32x4ExtMulHighI16x8U: {
+ EXT_MUL(vmuleuh, vmulouh)
+ __ vmrghw(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I16x8ExtMulLowI8x16S: {
+ EXT_MUL(vmulesb, vmulosb)
+ __ vmrglh(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I16x8ExtMulHighI8x16S: {
+ EXT_MUL(vmulesb, vmulosb)
+ __ vmrghh(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I16x8ExtMulLowI8x16U: {
+ EXT_MUL(vmuleub, vmuloub)
+ __ vmrglh(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_I16x8ExtMulHighI8x16U: {
+ EXT_MUL(vmuleub, vmuloub)
+ __ vmrghh(dst, dst, kScratchSimd128Reg);
+ break;
+ }
+#undef EXT_MUL
+ case kPPC_F64x2ConvertLowI32x4S: {
+ __ vupklsw(kScratchSimd128Reg, i.InputSimd128Register(0));
+ __ xvcvsxddp(i.OutputSimd128Register(), kScratchSimd128Reg);
+ break;
+ }
+ case kPPC_F64x2ConvertLowI32x4U: {
+ Simd128Register dst = i.OutputSimd128Register();
+ constexpr int lane_width_in_bytes = 8;
+ __ vupklsw(dst, i.InputSimd128Register(0));
+ // Zero extend.
+ __ mov(ip, Operand(0xFFFFFFFF));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vand(dst, kScratchSimd128Reg, dst);
+ __ xvcvuxddp(dst, dst);
break;
}
- case kPPC_I16x8SignSelect: {
- SIGN_SELECT(vcmpgtsh)
+ case kPPC_F64x2PromoteLowF32x4: {
+ constexpr int lane_number = 8;
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vextractd(kScratchSimd128Reg, src, Operand(lane_number));
+ __ vinsertw(kScratchSimd128Reg, kScratchSimd128Reg, Operand(lane_number));
+ __ xvcvspdp(dst, kScratchSimd128Reg);
break;
}
- case kPPC_I32x4SignSelect: {
- SIGN_SELECT(vcmpgtsw)
+ case kPPC_F32x4DemoteF64x2Zero: {
+ constexpr int lane_number = 8;
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xvcvdpsp(kScratchSimd128Reg, src);
+ __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
+ __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
+ __ vxor(dst, dst, dst);
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
+ break;
+ }
+ case kPPC_I32x4TruncSatF64x2SZero: {
+ constexpr int lane_number = 8;
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ // NaN to 0.
+ __ vor(kScratchSimd128Reg, src, src);
+ __ xvcmpeqdp(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
+ __ vand(kScratchSimd128Reg, src, kScratchSimd128Reg);
+ __ xvcvdpsxws(kScratchSimd128Reg, kScratchSimd128Reg);
+ __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
+ __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
+ __ vxor(dst, dst, dst);
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
+ break;
+ }
+ case kPPC_I32x4TruncSatF64x2UZero: {
+ constexpr int lane_number = 8;
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xvcvdpuxws(kScratchSimd128Reg, src);
+ __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
+ __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
+ __ vxor(dst, dst, dst);
+ __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
break;
}
- case kPPC_I64x2SignSelect: {
- SIGN_SELECT(vcmpgtsd)
+ case kPPC_I8x16Popcnt: {
+ __ vpopcntb(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
-#undef SIGN_SELECT
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3823,6 +3921,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -3849,8 +3948,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -3891,6 +3989,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ b(cond, tlabel, cr);
__ bind(&end);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -3977,6 +4076,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Jump(kScratchReg);
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
@@ -4007,10 +4111,15 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ StubPrologue(StackFrame::C_WASM_ENTRY);
// Reserve stack space for saving the c_entry_fp later.
__ addi(sp, sp, Operand(-kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -4029,6 +4138,7 @@ void CodeGenerator::AssembleConstructFrame() {
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
// efficient intialization of the constant pool pointer register).
__ StubPrologue(type);
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4049,6 +4159,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ addi(sp, sp, Operand(-kSystemPointerSize));
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
@@ -4076,6 +4187,7 @@ void CodeGenerator::AssembleConstructFrame() {
: call_descriptor->CalleeSavedRegisters();
if (required_slots > 0) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4108,6 +4220,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
required_slots -= base::bits::CountPopulation(saves);
@@ -4130,10 +4243,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
const int returns = frame()->GetReturnSlotCount();
- if (returns != 0) {
- // Create space for returns.
- __ Add(sp, sp, -returns * kSystemPointerSize, r0);
- }
+ // Create space for returns.
+ __ AllocateStackSpace(returns * kSystemPointerSize);
}
void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
@@ -4165,12 +4276,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// We might need r6 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r6.bit());
PPCOperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4181,12 +4292,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = r6;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4212,25 +4323,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ addi(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
+ if (parameter_slots > 1) {
Label skip;
- __ cmpi(argc_reg, Operand(parameter_count));
+ __ cmpi(argc_reg, Operand(parameter_slots));
__ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_count));
+ __ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
__ Drop(argc_reg);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
- } else if (parameter_count == 0) {
+ __ Drop(parameter_slots + additional_count);
+ } else if (parameter_slots == 0) {
__ Drop(g.ToRegister(additional_pop_count));
} else {
- // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
// 0}. Check RawMachineAssembler::PopAndReturn.
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
}
__ Ret();
}
@@ -4239,7 +4350,15 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
- // __ EmitConstantPool();
+ int total_size = 0;
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ total_size += (exit->kind() == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize;
+ }
+
+ __ CheckTrampolinePoolQuick(total_size);
+ DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
@@ -4272,26 +4391,22 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
-#if V8_TARGET_ARCH_PPC64
- if (false) {
-#else
+#if V8_ENABLE_WEBASSEMBLY && !V8_TARGET_ARCH_PPC64
if (RelocInfo::IsWasmReference(src.rmode())) {
-#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
- __ mov(dst, Operand(src.ToInt32()));
+ break;
}
+#endif // V8_ENABLE_WEBASSEMBLY && !V8_TARGET_ARCH_PPC64
+ __ mov(dst, Operand(src.ToInt32()));
break;
case Constant::kInt64:
-#if V8_TARGET_ARCH_PPC64
+#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_PPC64
if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
- } else {
-#endif
- __ mov(dst, Operand(src.ToInt64()));
-#if V8_TARGET_ARCH_PPC64
+ break;
}
-#endif
+#endif // V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_PPC64
+ __ mov(dst, Operand(src.ToInt64()));
break;
case Constant::kFloat32:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 2ef553a4f54..787cc2a27da 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -124,6 +124,7 @@ namespace compiler {
V(PPC_LoadFloat32) \
V(PPC_LoadDouble) \
V(PPC_LoadSimd128) \
+ V(PPC_LoadReverseSimd128RR) \
V(PPC_StoreWord8) \
V(PPC_StoreWord16) \
V(PPC_StoreWord32) \
@@ -216,11 +217,13 @@ namespace compiler {
V(PPC_F64x2NearestInt) \
V(PPC_F64x2Pmin) \
V(PPC_F64x2Pmax) \
+ V(PPC_F64x2ConvertLowI32x4S) \
+ V(PPC_F64x2ConvertLowI32x4U) \
+ V(PPC_F64x2PromoteLowF32x4) \
V(PPC_F32x4Splat) \
V(PPC_F32x4ExtractLane) \
V(PPC_F32x4ReplaceLane) \
V(PPC_F32x4Add) \
- V(PPC_F32x4AddHoriz) \
V(PPC_F32x4Sub) \
V(PPC_F32x4Mul) \
V(PPC_F32x4Eq) \
@@ -243,6 +246,9 @@ namespace compiler {
V(PPC_F32x4NearestInt) \
V(PPC_F32x4Pmin) \
V(PPC_F32x4Pmax) \
+ V(PPC_F32x4Qfma) \
+ V(PPC_F32x4Qfms) \
+ V(PPC_F32x4DemoteF64x2Zero) \
V(PPC_I64x2Splat) \
V(PPC_I64x2ExtractLane) \
V(PPC_I64x2ReplaceLane) \
@@ -268,12 +274,15 @@ namespace compiler {
V(PPC_I64x2SConvertI32x4High) \
V(PPC_I64x2UConvertI32x4Low) \
V(PPC_I64x2UConvertI32x4High) \
- V(PPC_I64x2SignSelect) \
+ V(PPC_I64x2ExtMulLowI32x4S) \
+ V(PPC_I64x2ExtMulHighI32x4S) \
+ V(PPC_I64x2ExtMulLowI32x4U) \
+ V(PPC_I64x2ExtMulHighI32x4U) \
+ V(PPC_I64x2Abs) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
V(PPC_I32x4ReplaceLane) \
V(PPC_I32x4Add) \
- V(PPC_I32x4AddHoriz) \
V(PPC_I32x4Sub) \
V(PPC_I32x4Mul) \
V(PPC_I32x4MinS) \
@@ -301,15 +310,17 @@ namespace compiler {
V(PPC_I32x4DotI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8U) \
- V(PPC_I32x4SignSelect) \
- V(PPC_F32x4Qfma) \
- V(PPC_F32x4Qfms) \
+ V(PPC_I32x4ExtMulLowI16x8S) \
+ V(PPC_I32x4ExtMulHighI16x8S) \
+ V(PPC_I32x4ExtMulLowI16x8U) \
+ V(PPC_I32x4ExtMulHighI16x8U) \
+ V(PPC_I32x4TruncSatF64x2SZero) \
+ V(PPC_I32x4TruncSatF64x2UZero) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
V(PPC_I16x8ReplaceLane) \
V(PPC_I16x8Add) \
- V(PPC_I16x8AddHoriz) \
V(PPC_I16x8Sub) \
V(PPC_I16x8Mul) \
V(PPC_I16x8MinS) \
@@ -342,14 +353,16 @@ namespace compiler {
V(PPC_I16x8ExtAddPairwiseI8x16S) \
V(PPC_I16x8ExtAddPairwiseI8x16U) \
V(PPC_I16x8Q15MulRSatS) \
- V(PPC_I16x8SignSelect) \
+ V(PPC_I16x8ExtMulLowI8x16S) \
+ V(PPC_I16x8ExtMulHighI8x16S) \
+ V(PPC_I16x8ExtMulLowI8x16U) \
+ V(PPC_I16x8ExtMulHighI8x16U) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
V(PPC_I8x16ReplaceLane) \
V(PPC_I8x16Add) \
V(PPC_I8x16Sub) \
- V(PPC_I8x16Mul) \
V(PPC_I8x16MinS) \
V(PPC_I8x16MinU) \
V(PPC_I8x16MaxS) \
@@ -375,11 +388,11 @@ namespace compiler {
V(PPC_I8x16Shuffle) \
V(PPC_I8x16Swizzle) \
V(PPC_I8x16BitMask) \
- V(PPC_I8x16SignSelect) \
- V(PPC_V64x2AllTrue) \
- V(PPC_V32x4AllTrue) \
- V(PPC_V16x8AllTrue) \
- V(PPC_V8x16AllTrue) \
+ V(PPC_I8x16Popcnt) \
+ V(PPC_I64x2AllTrue) \
+ V(PPC_I32x4AllTrue) \
+ V(PPC_I16x8AllTrue) \
+ V(PPC_I8x16AllTrue) \
V(PPC_V128AnyTrue) \
V(PPC_S128And) \
V(PPC_S128Or) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 90025c5a825..749905a0551 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -139,11 +139,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_F64x2NearestInt:
case kPPC_F64x2Pmin:
case kPPC_F64x2Pmax:
+ case kPPC_F64x2ConvertLowI32x4S:
+ case kPPC_F64x2ConvertLowI32x4U:
+ case kPPC_F64x2PromoteLowF32x4:
case kPPC_F32x4Splat:
case kPPC_F32x4ExtractLane:
case kPPC_F32x4ReplaceLane:
case kPPC_F32x4Add:
- case kPPC_F32x4AddHoriz:
case kPPC_F32x4Sub:
case kPPC_F32x4Mul:
case kPPC_F32x4Eq:
@@ -168,6 +170,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_F32x4NearestInt:
case kPPC_F32x4Pmin:
case kPPC_F32x4Pmax:
+ case kPPC_F32x4DemoteF64x2Zero:
case kPPC_I64x2Splat:
case kPPC_I64x2ExtractLane:
case kPPC_I64x2ReplaceLane:
@@ -193,12 +196,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I64x2SConvertI32x4High:
case kPPC_I64x2UConvertI32x4Low:
case kPPC_I64x2UConvertI32x4High:
- case kPPC_I64x2SignSelect:
+ case kPPC_I64x2ExtMulLowI32x4S:
+ case kPPC_I64x2ExtMulHighI32x4S:
+ case kPPC_I64x2ExtMulLowI32x4U:
+ case kPPC_I64x2ExtMulHighI32x4U:
+ case kPPC_I64x2Abs:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
case kPPC_I32x4ReplaceLane:
case kPPC_I32x4Add:
- case kPPC_I32x4AddHoriz:
case kPPC_I32x4Sub:
case kPPC_I32x4Mul:
case kPPC_I32x4MinS:
@@ -226,13 +232,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4DotI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8U:
- case kPPC_I32x4SignSelect:
+ case kPPC_I32x4ExtMulLowI16x8S:
+ case kPPC_I32x4ExtMulHighI16x8S:
+ case kPPC_I32x4ExtMulLowI16x8U:
+ case kPPC_I32x4ExtMulHighI16x8U:
+ case kPPC_I32x4TruncSatF64x2SZero:
+ case kPPC_I32x4TruncSatF64x2UZero:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
case kPPC_I16x8ReplaceLane:
case kPPC_I16x8Add:
- case kPPC_I16x8AddHoriz:
case kPPC_I16x8Sub:
case kPPC_I16x8Mul:
case kPPC_I16x8MinS:
@@ -265,14 +275,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8ExtAddPairwiseI8x16S:
case kPPC_I16x8ExtAddPairwiseI8x16U:
case kPPC_I16x8Q15MulRSatS:
- case kPPC_I16x8SignSelect:
+ case kPPC_I16x8ExtMulLowI8x16S:
+ case kPPC_I16x8ExtMulHighI8x16S:
+ case kPPC_I16x8ExtMulLowI8x16U:
+ case kPPC_I16x8ExtMulHighI8x16U:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
case kPPC_I8x16ReplaceLane:
case kPPC_I8x16Add:
case kPPC_I8x16Sub:
- case kPPC_I8x16Mul:
case kPPC_I8x16MinS:
case kPPC_I8x16MinU:
case kPPC_I8x16MaxS:
@@ -298,11 +310,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16Shuffle:
case kPPC_I8x16Swizzle:
case kPPC_I8x16BitMask:
- case kPPC_I8x16SignSelect:
- case kPPC_V64x2AllTrue:
- case kPPC_V32x4AllTrue:
- case kPPC_V16x8AllTrue:
- case kPPC_V8x16AllTrue:
+ case kPPC_I8x16Popcnt:
+ case kPPC_I64x2AllTrue:
+ case kPPC_I32x4AllTrue:
+ case kPPC_I16x8AllTrue:
+ case kPPC_I8x16AllTrue:
case kPPC_V128AnyTrue:
case kPPC_S128And:
case kPPC_S128Or:
@@ -313,6 +325,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Not:
case kPPC_S128Select:
case kPPC_S128AndNot:
+ case kPPC_LoadReverseSimd128RR:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 05fa443b417..849723bdac5 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -155,7 +155,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -984,9 +984,9 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
}
void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
- // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes.
- // Check if the input node is a Load and do a Load Reverse at once.
- UNIMPLEMENTED();
+ PPCOperandGenerator g(this);
+ Emit(kPPC_LoadReverseSimd128RR, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitInt32Add(Node* node) {
@@ -2147,6 +2147,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_TYPES(V) \
V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2163,7 +2164,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Min) \
V(F64x2Max) \
V(F32x4Add) \
- V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Eq) \
@@ -2178,8 +2178,13 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Mul) \
V(I64x2Eq) \
V(I64x2Ne) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2193,8 +2198,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GtU) \
V(I32x4GeU) \
V(I32x4DotI16x8S) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
V(I16x8Add) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8Mul) \
V(I16x8MinS) \
@@ -2215,9 +2223,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8SubSatU) \
V(I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U) \
V(I8x16Add) \
V(I8x16Sub) \
- V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
@@ -2249,6 +2260,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Floor) \
V(F64x2Trunc) \
V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
@@ -2260,6 +2274,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Floor) \
V(F32x4Trunc) \
V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
+ V(I64x2Abs) \
V(I64x2Neg) \
V(I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High) \
@@ -2275,10 +2291,13 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4UConvertI16x8High) \
V(I32x4ExtAddPairwiseI16x8S) \
V(I32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
V(I16x8Neg) \
V(I16x8Abs) \
V(I8x16Neg) \
V(I8x16Abs) \
+ V(I8x16Popcnt) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low) \
@@ -2303,10 +2322,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_BOOL_LIST(V) \
V(V128AnyTrue) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
- V(V8x16AllTrue)
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue)
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -2326,6 +2345,7 @@ SIMD_TYPES(SIMD_VISIT_SPLAT)
}
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
@@ -2389,6 +2409,20 @@ SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
#undef SIMD_VISIT_BOOL
#undef SIMD_BOOL_LIST
+#define SIMD_VISIT_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+SIMD_VISIT_QFMOP(F64x2Qfma)
+SIMD_VISIT_QFMOP(F64x2Qfms)
+SIMD_VISIT_QFMOP(F32x4Qfma)
+SIMD_VISIT_QFMOP(F32x4Qfms)
+#undef SIMD_VISIT_QFMOP
+
#define SIMD_VISIT_BITMASK(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
PPCOperandGenerator g(this); \
@@ -2415,6 +2449,7 @@ SIMD_VISIT_PMIN_MAX(F32x4Pmax)
#undef SIMD_VISIT_PMIN_MAX
#undef SIMD_TYPES
+#if V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
@@ -2439,6 +2474,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitS128Zero(Node* node) {
PPCOperandGenerator g(this);
@@ -2466,70 +2504,20 @@ void InstructionSelector::VisitS128Const(Node* node) {
} else if (all_ones) {
Emit(kPPC_S128AllOnes, dst);
} else {
- Emit(kPPC_S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
- g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ // We have to use Pack4Lanes to reverse the bytes (lanes) on BE,
+ // Which in this case is ineffective on LE.
+ Emit(kPPC_S128Const, g.DefineAsRegister(node),
+ g.UseImmediate(
+ wasm::SimdShuffle::Pack4Lanes(reinterpret_cast<uint8_t*>(val))),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(
+ reinterpret_cast<uint8_t*>(val) + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(
+ reinterpret_cast<uint8_t*>(val) + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(
+ reinterpret_cast<uint8_t*>(val) + 12)));
}
}
-void InstructionSelector::VisitI64x2ExtMulLowI32x4S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI64x2ExtMulHighI32x4S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI64x2ExtMulLowI32x4U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI64x2ExtMulHighI32x4U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtMulLowI16x8S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtMulHighI16x8S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtMulLowI16x8U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtMulHighI16x8U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtMulLowI8x16S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtMulHighI8x16S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitF32x4DemoteF64x2Zero(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Abs(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index 50e57bc5adf..1587f0ee182 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -44,6 +44,18 @@ void VerifyAllocatedGaps(const Instruction* instr, const char* caller_info) {
}
}
+int GetValue(const ImmediateOperand* imm) {
+ switch (imm->type()) {
+ case ImmediateOperand::INLINE_INT32:
+ return imm->inline_int32_value();
+ case ImmediateOperand::INLINE_INT64:
+ return static_cast<int>(imm->inline_int64_value());
+ case ImmediateOperand::INDEXED_RPO:
+ case ImmediateOperand::INDEXED_IMM:
+ return imm->indexed_value();
+ }
+}
+
} // namespace
RegisterAllocatorVerifier::RegisterAllocatorVerifier(
@@ -151,10 +163,8 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->virtual_register_ = constraint->value_;
} else if (op->IsImmediate()) {
const ImmediateOperand* imm = ImmediateOperand::cast(op);
- int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
- : imm->indexed_value();
constraint->type_ = kImmediate;
- constraint->value_ = value;
+ constraint->value_ = GetValue(imm);
} else {
CHECK(op->IsUnallocated());
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(op);
@@ -221,9 +231,7 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kImmediate: {
CHECK_WITH_MSG(op->IsImmediate(), caller_info_);
const ImmediateOperand* imm = ImmediateOperand::cast(op);
- int value = imm->type() == ImmediateOperand::INLINE
- ? imm->inline_value()
- : imm->indexed_value();
+ int value = GetValue(imm);
CHECK_EQ(value, constraint->value_);
return;
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 84145c87797..3cd6cd98de0 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -476,15 +476,6 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
return pos;
}
-UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
- for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
- pos = pos->next()) {
- if (pos->type() != UsePositionType::kRequiresSlot) continue;
- return pos;
- }
- return nullptr;
-}
-
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
@@ -1325,7 +1316,8 @@ TopTierRegisterAllocationData::TopTierRegisterAllocationData(
spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
zone),
flags_(flags),
- tick_counter_(tick_counter) {
+ tick_counter_(tick_counter),
+ slot_for_const_range_(zone) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
@@ -1761,6 +1753,28 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
continue; // Ignore immediates.
}
UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+ if (cur_input->HasSlotPolicy()) {
+ TopLevelLiveRange* range =
+ data()->GetOrCreateLiveRangeFor(cur_input->virtual_register());
+ if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
+ auto it = data()->slot_for_const_range().find(range);
+ if (it == data()->slot_for_const_range().end()) {
+ int width = ByteWidthForStackSlot(range->representation());
+ int index = data()->frame()->AllocateSpillSlot(width);
+ auto* slot = AllocatedOperand::New(allocation_zone(),
+ LocationOperand::STACK_SLOT,
+ range->representation(), index);
+ it = data()->slot_for_const_range().emplace(range, slot).first;
+ }
+ auto* slot = it->second;
+ int input_vreg = cur_input->virtual_register();
+ UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ input_vreg);
+ // Spill at every use position for simplicity. This case is very rare -
+ // the only known instance is crbug.com/1146880.
+ data()->AddGapMove(instr_index, Instruction::END, input_copy, *slot);
+ }
+ }
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
@@ -4078,8 +4092,14 @@ bool LinearScanAllocator::TryAllocateFreeReg(
if (pos < current->End()) {
// Register reg is available at the range start but becomes blocked before
- // the range end. Split current at position where it becomes blocked.
- LiveRange* tail = SplitRangeAt(current, pos);
+ // the range end. Split current before the position where it becomes
+ // blocked. Shift the split position to the last gap position. This is to
+ // ensure that if a connecting move is needed, that move coincides with the
+ // start of the range that it defines. See crbug.com/1182985.
+ LifetimePosition gap_pos =
+ pos.IsGapPosition() ? pos : pos.FullStart().End();
+ if (gap_pos <= current->Start()) return false;
+ LiveRange* tail = SplitRangeAt(current, gap_pos);
AddToUnhandled(tail);
// Try to allocate preferred register once more.
@@ -4088,7 +4108,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(
// Register reg is available at the range start and is free until the range
// end.
- DCHECK(pos >= current->End());
+ DCHECK_GE(pos, current->End());
TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
current->TopLevel()->vreg(), current->relative_id());
SetLiveRangeAssignedRegister(current, reg);
@@ -4519,9 +4539,11 @@ void OperandAssigner::AssignSpillSlots() {
for (SpillRange* range : spill_ranges) {
data()->tick_counter()->TickAndMaybeEnterSafepoint();
if (range == nullptr || range->IsEmpty()) continue;
- // Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
- int index = data()->frame()->AllocateSpillSlot(range->byte_width());
+ // Allocate a new operand referring to the spill slot, aligned to the
+ // operand size.
+ int width = range->byte_width();
+ int index = data()->frame()->AllocateSpillSlot(width, width);
range->set_assigned_slot(index);
}
}
@@ -4536,9 +4558,14 @@ void OperandAssigner::CommitAssignment() {
if (top_range == nullptr || top_range->IsEmpty()) continue;
InstructionOperand spill_operand;
if (top_range->HasSpillOperand()) {
- spill_operand = *top_range->TopLevel()->GetSpillOperand();
- } else if (top_range->TopLevel()->HasSpillRange()) {
- spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
+ auto it = data()->slot_for_const_range().find(top_range);
+ if (it != data()->slot_for_const_range().end()) {
+ spill_operand = *it->second;
+ } else {
+ spill_operand = *top_range->GetSpillOperand();
+ }
+ } else if (top_range->HasSpillRange()) {
+ spill_operand = top_range->GetSpillRangeOperand();
}
if (top_range->is_phi()) {
data()->GetPhiMapValueFor(top_range)->CommitAssignment(
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index e2b4d6217fe..214a1d1308f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -351,6 +351,10 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
TickCounter* tick_counter() { return tick_counter_; }
+ ZoneMap<TopLevelLiveRange*, AllocatedOperand*>& slot_for_const_range() {
+ return slot_for_const_range_;
+ }
+
private:
int GetNextLiveRangeId();
@@ -378,6 +382,7 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
TickCounter* const tick_counter_;
+ ZoneMap<TopLevelLiveRange*, AllocatedOperand*> slot_for_const_range_;
};
// Representation of the non-empty interval [start,end[.
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index cc83f22c657..f01941883ed 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -509,15 +509,15 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -2074,8 +2074,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
@@ -2441,12 +2440,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
RiscvOperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -2457,12 +2456,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -2486,11 +2485,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ Add64(t0, t0, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
+ if (parameter_slots > 1) {
Label done;
- __ li(kScratchReg, parameter_count);
+ __ li(kScratchReg, parameter_slots);
__ Branch(&done, ge, t0, Operand(kScratchReg));
__ Move(t0, kScratchReg);
__ bind(&done);
@@ -2501,10 +2500,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// it should be a kInt32 or a kInt64
DCHECK_LE(g.ToConstant(additional_pop_count).type(), Constant::kInt64);
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
+ __ Drop(parameter_slots + additional_count);
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
__ Sll64(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Add64(sp, sp, pop_reg);
}
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index fae854ec027..61921d15855 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -170,7 +170,6 @@ namespace compiler {
V(RiscvI32x4ExtractLane) \
V(RiscvI32x4ReplaceLane) \
V(RiscvI32x4Add) \
- V(RiscvI32x4AddHoriz) \
V(RiscvI32x4Sub) \
V(RiscvF64x2Abs) \
V(RiscvF64x2Neg) \
@@ -193,7 +192,10 @@ namespace compiler {
V(RiscvI32x4ShrU) \
V(RiscvI32x4MaxU) \
V(RiscvI32x4MinU) \
+ V(RiscvI64x2GtS) \
+ V(RiscvI64x2GeS) \
V(RiscvI64x2Eq) \
+ V(RiscvI64x2Ne) \
V(RiscvF64x2Sqrt) \
V(RiscvF64x2Add) \
V(RiscvF64x2Sub) \
@@ -223,6 +225,7 @@ namespace compiler {
V(RiscvI64x2Add) \
V(RiscvI64x2Sub) \
V(RiscvI64x2Mul) \
+ V(RiscvI64x2Abs) \
V(RiscvI64x2Neg) \
V(RiscvI64x2Shl) \
V(RiscvI64x2ShrS) \
@@ -234,7 +237,6 @@ namespace compiler {
V(RiscvF32x4RecipApprox) \
V(RiscvF32x4RecipSqrtApprox) \
V(RiscvF32x4Add) \
- V(RiscvF32x4AddHoriz) \
V(RiscvF32x4Sub) \
V(RiscvF32x4Mul) \
V(RiscvF32x4Div) \
@@ -273,7 +275,6 @@ namespace compiler {
V(RiscvI16x8ShrU) \
V(RiscvI16x8Add) \
V(RiscvI16x8AddSatS) \
- V(RiscvI16x8AddHoriz) \
V(RiscvI16x8Sub) \
V(RiscvI16x8SubSatS) \
V(RiscvI16x8Mul) \
@@ -304,7 +305,6 @@ namespace compiler {
V(RiscvI8x16AddSatS) \
V(RiscvI8x16Sub) \
V(RiscvI8x16SubSatS) \
- V(RiscvI8x16Mul) \
V(RiscvI8x16MaxS) \
V(RiscvI8x16MinS) \
V(RiscvI8x16Eq) \
@@ -328,10 +328,11 @@ namespace compiler {
V(RiscvS128Not) \
V(RiscvS128Select) \
V(RiscvS128AndNot) \
- V(RiscvV32x4AllTrue) \
- V(RiscvV16x8AllTrue) \
+ V(RiscvI32x4AllTrue) \
+ V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
- V(RiscvV8x16AllTrue) \
+ V(RiscvI8x16AllTrue) \
+ V(RiscvI64x2AllTrue) \
V(RiscvS32x4InterleaveRight) \
V(RiscvS32x4InterleaveLeft) \
V(RiscvS32x4PackEven) \
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index fdc13469026..b83942ffce8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -98,13 +98,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI64x2Sub:
case kRiscvI64x2Mul:
case kRiscvI64x2Neg:
+ case kRiscvI64x2Abs:
case kRiscvI64x2Shl:
case kRiscvI64x2ShrS:
case kRiscvI64x2ShrU:
case kRiscvI64x2BitMask:
+ case kRiscvI64x2GtS:
+ case kRiscvI64x2GeS:
case kRiscvF32x4Abs:
case kRiscvF32x4Add:
- case kRiscvF32x4AddHoriz:
case kRiscvF32x4Eq:
case kRiscvF32x4ExtractLane:
case kRiscvF32x4Lt:
@@ -131,6 +133,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvF32x4Trunc:
case kRiscvF32x4NearestInt:
case kRiscvI64x2Eq:
+ case kRiscvI64x2Ne:
case kRiscvF64x2Splat:
case kRiscvF64x2ExtractLane:
case kRiscvF64x2ReplaceLane:
@@ -158,7 +161,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI64x2UConvertI32x4Low:
case kRiscvI64x2UConvertI32x4High:
case kRiscvI16x8Add:
- case kRiscvI16x8AddHoriz:
case kRiscvI16x8AddSatS:
case kRiscvI16x8AddSatU:
case kRiscvI16x8Eq:
@@ -198,7 +200,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI16x8Abs:
case kRiscvI16x8BitMask:
case kRiscvI32x4Add:
- case kRiscvI32x4AddHoriz:
case kRiscvI32x4Eq:
case kRiscvI32x4ExtractLane:
case kRiscvI32x4GeS:
@@ -241,7 +242,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI8x16MaxU:
case kRiscvI8x16MinS:
case kRiscvI8x16MinU:
- case kRiscvI8x16Mul:
case kRiscvI8x16Ne:
case kRiscvI8x16Neg:
case kRiscvI8x16ReplaceLane:
@@ -295,10 +295,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS16x8PackOdd:
case kRiscvS16x2Reverse:
case kRiscvS16x4Reverse:
- case kRiscvV8x16AllTrue:
- case kRiscvV32x4AllTrue:
- case kRiscvV16x8AllTrue:
+ case kRiscvI8x16AllTrue:
+ case kRiscvI32x4AllTrue:
+ case kRiscvI16x8AllTrue:
case kRiscvV128AnyTrue:
+ case kRiscvI64x2AllTrue:
case kRiscvS32x4InterleaveEven:
case kRiscvS32x4InterleaveOdd:
case kRiscvS32x4InterleaveLeft:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 4d86fd02a32..1d6b506685e 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -878,6 +878,22 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kRiscvMul32, node);
}
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitRRR(this, kRiscvMulHigh32, node);
}
@@ -1500,7 +1516,7 @@ void InstructionSelector::EmitPrepareArguments(
++slot;
}
} else {
- int push_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
@@ -2589,8 +2605,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Trunc, kRiscvF64x2Trunc) \
V(F64x2NearestInt, kRiscvF64x2NearestInt) \
V(I64x2Neg, kRiscvI64x2Neg) \
+ V(I64x2Abs, kRiscvI64x2Abs) \
V(I64x2BitMask, kRiscvI64x2BitMask) \
V(I64x2Eq, kRiscvI64x2Eq) \
+ V(I64x2Ne, kRiscvI64x2Ne) \
+ V(I64x2GtS, kRiscvI64x2GtS) \
+ V(I64x2GeS, kRiscvI64x2GeS) \
V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
V(F32x4Abs, kRiscvF32x4Abs) \
@@ -2631,9 +2651,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Popcnt, kRiscvI8x16Popcnt) \
V(S128Not, kRiscvS128Not) \
V(V128AnyTrue, kRiscvV128AnyTrue) \
- V(V32x4AllTrue, kRiscvV32x4AllTrue) \
- V(V16x8AllTrue, kRiscvV16x8AllTrue) \
- V(V8x16AllTrue, kRiscvV8x16AllTrue)
+ V(I32x4AllTrue, kRiscvI32x4AllTrue) \
+ V(I16x8AllTrue, kRiscvI16x8AllTrue) \
+ V(I8x16AllTrue, kRiscvI8x16AllTrue) \
+ V(I64x2AllTrue, kRiscvI64x2AllTrue) \
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2664,7 +2685,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Sub, kRiscvI64x2Sub) \
V(I64x2Mul, kRiscvI64x2Mul) \
V(F32x4Add, kRiscvF32x4Add) \
- V(F32x4AddHoriz, kRiscvF32x4AddHoriz) \
V(F32x4Sub, kRiscvF32x4Sub) \
V(F32x4Mul, kRiscvF32x4Mul) \
V(F32x4Div, kRiscvF32x4Div) \
@@ -2675,7 +2695,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Lt, kRiscvF32x4Lt) \
V(F32x4Le, kRiscvF32x4Le) \
V(I32x4Add, kRiscvI32x4Add) \
- V(I32x4AddHoriz, kRiscvI32x4AddHoriz) \
V(I32x4Sub, kRiscvI32x4Sub) \
V(I32x4Mul, kRiscvI32x4Mul) \
V(I32x4MaxS, kRiscvI32x4MaxS) \
@@ -2692,7 +2711,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Add, kRiscvI16x8Add) \
V(I16x8AddSatS, kRiscvI16x8AddSatS) \
V(I16x8AddSatU, kRiscvI16x8AddSatU) \
- V(I16x8AddHoriz, kRiscvI16x8AddHoriz) \
V(I16x8Sub, kRiscvI16x8Sub) \
V(I16x8SubSatS, kRiscvI16x8SubSatS) \
V(I16x8SubSatU, kRiscvI16x8SubSatU) \
@@ -2717,7 +2735,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Sub, kRiscvI8x16Sub) \
V(I8x16SubSatS, kRiscvI8x16SubSatS) \
V(I8x16SubSatU, kRiscvI8x16SubSatU) \
- V(I8x16Mul, kRiscvI8x16Mul) \
V(I8x16MaxS, kRiscvI8x16MaxS) \
V(I8x16MinS, kRiscvI8x16MinS) \
V(I8x16MaxU, kRiscvI8x16MaxU) \
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index f7c5498e07f..c5e16b3e49c 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -12,8 +12,11 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -177,10 +180,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -227,9 +233,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -249,7 +257,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
@@ -720,116 +730,36 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ asm_instr(value, operand); \
} while (0)
-#define ATOMIC_COMP_EXCHANGE(start, end, shift_amount, offset) \
- { \
- __ LoadU32(temp0, MemOperand(addr, offset)); \
- __ llgfr(temp1, temp0); \
- __ RotateInsertSelectBits(temp0, old_val, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ RotateInsertSelectBits(temp1, new_val, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ CmpAndSwap(temp0, temp1, MemOperand(addr, offset)); \
- __ RotateInsertSelectBits(output, temp0, Operand(start + shift_amount), \
- Operand(end + shift_amount), \
- Operand(64 - shift_amount), true); \
- }
-
-#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * idx; \
- constexpr int end = start + 7; \
- constexpr int shift_amount = (3 - idx) * 8; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * idx; \
- constexpr int end = start + 15; \
- constexpr int shift_amount = (1 - idx) * 16; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#else
-#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * (3 - idx); \
- constexpr int end = start + 7; \
- constexpr int shift_amount = idx * 8; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * (1 - idx); \
- constexpr int end = start + 15; \
- constexpr int shift_amount = idx * 16; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#endif
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label three, two, one, done; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- __ b(Condition(4), &one); \
- /* ending with 0b00 */ \
- ATOMIC_COMP_EXCHANGE_BYTE(0); \
- __ b(&done); \
- /* ending with 0b01 */ \
- __ bind(&one); \
- ATOMIC_COMP_EXCHANGE_BYTE(1); \
- __ b(&done); \
- /* ending with 0b10 */ \
- __ bind(&two); \
- ATOMIC_COMP_EXCHANGE_BYTE(2); \
- __ b(&done); \
- /* ending with 0b11 */ \
- __ bind(&three); \
- ATOMIC_COMP_EXCHANGE_BYTE(3); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
+ do { \
+ Register old_value = i.InputRegister(0); \
+ Register new_value = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ __ AtomicCmpExchangeU8(addr, output, old_value, new_value, temp0, temp1); \
+ __ load_and_ext(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label two, done; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(2), &two); \
- ATOMIC_COMP_EXCHANGE_HALFWORD(0); \
- __ b(&done); \
- __ bind(&two); \
- ATOMIC_COMP_EXCHANGE_HALFWORD(1); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
+ do { \
+ Register old_value = i.InputRegister(0); \
+ Register new_value = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, temp1); \
+ __ load_and_ext(output, output); \
} while (false)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
@@ -1063,13 +993,13 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
+ first_unused_slot_offset)) {
S390OperandConverter g(this, instr);
ZoneVector<Register> pending_pushes(zone());
for (auto move : pushes) {
@@ -1095,13 +1025,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, nullptr, false);
+ first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -1192,16 +1122,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
-#ifdef V8_TARGET_ARCH_S390X
Address wasm_code = static_cast<Address>(constant.ToInt64());
-#else
- Address wasm_code = static_cast<Address>(constant.ToInt32());
-#endif
__ Call(wasm_code, constant.rmode());
} else {
__ Call(i.InputRegister(0));
@@ -1210,6 +1137,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallWasm: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
@@ -1227,24 +1169,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- if (instr->InputAt(0)->IsImmediate()) {
- Constant constant = i.ToConstant(instr->InputAt(0));
-#ifdef V8_TARGET_ARCH_S390X
- Address wasm_code = static_cast<Address>(constant.ToInt64());
-#else
- Address wasm_code = static_cast<Address>(constant.ToInt32());
-#endif
- __ Jump(wasm_code, constant.rmode());
- } else {
- __ Jump(i.InputRegister(0));
- }
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
@@ -1312,12 +1236,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int const num_parameters = MiscField::decode(instr->opcode());
Label return_location;
// Put the return address in a stack slot.
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
__ larl(r0, &return_location);
__ StoreU64(r0,
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1326,9 +1252,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
__ bind(&return_location);
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1965,10 +1893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Slot-sized arguments are never padded but there may be a gap if
// the slot allocator reclaimed other padding slots. Adjust the stack
// here to skip any gap.
- if (slots > pushed_slots) {
- __ lay(sp,
- MemOperand(sp, -((slots - pushed_slots) * kSystemPointerSize)));
- }
+ __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
switch (rep) {
case MachineRepresentation::kFloat32:
__ lay(sp, MemOperand(sp, -kSystemPointerSize));
@@ -2384,59 +2309,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
-// 0x aa bb cc dd
-// index = 3..2..1..0
-#define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
- { \
- Label do_cs; \
- __ LoadU32(output, MemOperand(r1, offset)); \
- __ bind(&do_cs); \
- __ llgfr(r0, output); \
- __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ csy(output, r0, MemOperand(r1, offset)); \
- __ bne(&do_cs, Label::kNear); \
- __ srl(output, Operand(shift_amount)); \
- }
-#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * idx; \
- constexpr int end = start + 7; \
- constexpr int shift_amount = (3 - idx) * 8; \
- ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * idx; \
- constexpr int end = start + 15; \
- constexpr int shift_amount = (1 - idx) * 16; \
- ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#else
-#define ATOMIC_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * (3 - idx); \
- constexpr int end = start + 7; \
- constexpr int shift_amount = idx * 8; \
- ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * (1 - idx); \
- constexpr int end = start + 15; \
- constexpr int shift_amount = idx * 16; \
- ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#endif
case kS390_Word64AtomicExchangeUint8:
case kWord32AtomicExchangeInt8:
case kWord32AtomicExchangeUint8: {
@@ -2444,36 +2316,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
- Label three, two, one, done;
__ la(r1, MemOperand(base, index));
- __ tmll(r1, Operand(3));
- __ b(Condition(1), &three);
- __ b(Condition(2), &two);
- __ b(Condition(4), &one);
-
- // end with 0b00
- ATOMIC_EXCHANGE_BYTE(0);
- __ b(&done);
-
- // ending with 0b01
- __ bind(&one);
- ATOMIC_EXCHANGE_BYTE(1);
- __ b(&done);
-
- // ending with 0b10
- __ bind(&two);
- ATOMIC_EXCHANGE_BYTE(2);
- __ b(&done);
-
- // ending with 0b11
- __ bind(&three);
- ATOMIC_EXCHANGE_BYTE(3);
-
- __ bind(&done);
+ __ AtomicExchangeU8(r1, value, output, r0);
if (opcode == kWord32AtomicExchangeInt8) {
- __ lgbr(output, output);
+ __ LoadS8(output, output);
} else {
- __ llgcr(output, output);
+ __ LoadU8(output, output);
}
break;
}
@@ -2484,20 +2332,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
- Label two, done;
__ la(r1, MemOperand(base, index));
- __ tmll(r1, Operand(3));
- __ b(Condition(2), &two);
-
- // end with 0b00
- ATOMIC_EXCHANGE_HALFWORD(0);
- __ b(&done);
-
- // ending with 0b10
- __ bind(&two);
- ATOMIC_EXCHANGE_HALFWORD(1);
-
- __ bind(&done);
+ __ AtomicExchangeU16(r1, value, output, r0);
if (opcode == kWord32AtomicExchangeInt16) {
__ lghr(output, output);
} else {
@@ -2827,22 +2663,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
- case kS390_F32x4AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- DoubleRegister tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
- constexpr int shift_bits = 32;
- __ vpk(dst, src1, src0, Condition(0), Condition(0), Condition(3));
- __ vesrl(tempFPReg2, src1, MemOperand(r0, shift_bits), Condition(3));
- __ vesrl(tempFPReg1, src0, MemOperand(r0, shift_bits), Condition(3));
- __ vpk(kScratchDoubleReg, tempFPReg2, tempFPReg1, Condition(0),
- Condition(0), Condition(3));
- __ vfa(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
- Condition(2));
- break;
- }
case kS390_F32x4Sub: {
__ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -2922,20 +2742,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
- case kS390_I32x4AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ vs(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(2));
- __ vsumg(dst, src0, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(2));
- __ vsumg(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(2));
- __ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4Sub: {
__ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -2954,20 +2760,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(1));
break;
}
- case kS390_I16x8AddHoriz: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register dst = i.OutputSimd128Register();
- __ vs(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(1));
- __ vsum(dst, src0, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(1));
- __ vsum(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
- Condition(0), Condition(1));
- __ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
- Condition(2));
- break;
- }
case kS390_I16x8Sub: {
__ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -2992,12 +2784,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- case kS390_I8x16Mul: {
- __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
case kS390_I16x8RoundingAverageU: {
__ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3463,19 +3249,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
- case kS390_V64x2AllTrue: {
+ case kS390_I64x2AllTrue: {
SIMD_ALL_TRUE(3)
break;
}
- case kS390_V32x4AllTrue: {
+ case kS390_I32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
}
- case kS390_V16x8AllTrue: {
+ case kS390_I16x8AllTrue: {
SIMD_ALL_TRUE(1)
break;
}
- case kS390_V8x16AllTrue: {
+ case kS390_I8x16AllTrue: {
SIMD_ALL_TRUE(0)
break;
}
@@ -3535,39 +3321,79 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
+ // vector conversions
+#define CONVERT_FLOAT_TO_INT32(convert) \
+ for (int index = 0; index < 4; index++) { \
+ __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index), \
+ Condition(2)); \
+ __ MovIntToFloat(tempFPReg1, kScratchReg); \
+ __ convert(kScratchReg, tempFPReg1, kRoundToZero); \
+ __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
+ }
case kS390_I32x4SConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
__ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(2));
__ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
Condition(0), Condition(0));
- __ vcgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
- Condition(0), Condition(2));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ __ vcgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
+ } else {
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
+ }
break;
}
case kS390_I32x4UConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0, negative to 0
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
__ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
Condition(0), Condition(2));
- __ vclgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
- Condition(0), Condition(2));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ __ vclgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
+ } else {
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
+ }
break;
}
+#undef CONVERT_FLOAT_TO_INT32
+#define CONVERT_INT32_TO_FLOAT(convert, double_index) \
+ Simd128Register src = i.InputSimd128Register(0); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ for (int index = 0; index < 4; index++) { \
+ __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
+ __ convert(kScratchDoubleReg, kScratchReg); \
+ __ MovFloatToInt(kScratchReg, kScratchDoubleReg); \
+ __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
+ }
case kS390_F32x4SConvertI32x4: {
- __ vcdg(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(4), Condition(0), Condition(2));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ __ vcdg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
+ } else {
+ CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
+ }
break;
}
case kS390_F32x4UConvertI32x4: {
- __ vcdlg(i.OutputSimd128Register(), i.InputSimd128Register(0),
- Condition(4), Condition(0), Condition(2));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ __ vcdlg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
+ } else {
+ CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
+ }
break;
}
+#undef CONVERT_INT32_TO_FLOAT
#define VECTOR_UNPACK(op, mode) \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
Condition(0), Condition(mode));
@@ -4017,33 +3843,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#undef Q15_MUL_ROAUND
-#define SIGN_SELECT(mode) \
- Simd128Register src0 = i.InputSimd128Register(0); \
- Simd128Register src1 = i.InputSimd128Register(1); \
- Simd128Register src2 = i.InputSimd128Register(2); \
- Simd128Register dst = i.OutputSimd128Register(); \
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
- Condition(0), Condition(3)); \
- __ vch(kScratchDoubleReg, kScratchDoubleReg, src2, Condition(0), \
- Condition(mode)); \
- __ vsel(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
- case kS390_I8x16SignSelect: {
- SIGN_SELECT(0)
- break;
- }
- case kS390_I16x8SignSelect: {
- SIGN_SELECT(1)
- break;
- }
- case kS390_I32x4SignSelect: {
- SIGN_SELECT(2)
- break;
- }
- case kS390_I64x2SignSelect: {
- SIGN_SELECT(3)
- break;
- }
-#undef SIGN_SELECT
case kS390_I8x16Popcnt: {
__ vpopct(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(0), Condition(0), Condition(0));
@@ -4190,6 +3989,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
@@ -4216,8 +4016,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -4255,6 +4054,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ b(cond, tlabel);
__ bind(&end);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -4316,6 +4116,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Jump(kScratchReg);
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
void CodeGenerator::FinishFrame(Frame* frame) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
@@ -4343,10 +4148,15 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ StubPrologue(StackFrame::C_WASM_ENTRY);
// Reserve stack space for saving the c_entry_fp later.
__ lay(sp, MemOperand(sp, -kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ Push(r14, fp);
__ mov(fp, sp);
@@ -4358,6 +4168,7 @@ void CodeGenerator::AssembleConstructFrame() {
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
// efficient intialization of the constant pool pointer register).
__ StubPrologue(type);
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4378,6 +4189,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ lay(sp, MemOperand(sp, -kSystemPointerSize));
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
@@ -4402,6 +4214,7 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (required_slots > 0) {
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4435,6 +4248,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are pushed below.
required_slots -= base::bits::CountPopulation(saves);
@@ -4457,10 +4271,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
const int returns = frame()->GetReturnSlotCount();
- if (returns != 0) {
- // Create space for returns.
- __ lay(sp, MemOperand(sp, -returns * kSystemPointerSize));
- }
+ // Create space for returns.
+ __ AllocateStackSpace(returns * kSystemPointerSize);
}
void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
@@ -4489,12 +4301,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// We might need r3 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r5.bit());
S390OperandConverter g(this, nullptr);
- const int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4505,12 +4317,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = r5;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4535,25 +4347,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
__ AddS64(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
- if (parameter_count > 1) {
+ if (parameter_slots > 1) {
Label skip;
- __ CmpS64(argc_reg, Operand(parameter_count));
+ __ CmpS64(argc_reg, Operand(parameter_slots));
__ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_count));
+ __ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
__ Drop(argc_reg);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- __ Drop(parameter_count + additional_count);
- } else if (parameter_count == 0) {
+ __ Drop(parameter_slots + additional_count);
+ } else if (parameter_slots == 0) {
__ Drop(g.ToRegister(additional_pop_count));
} else {
- // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
// 0}. Check RawMachineAssembler::PopAndReturn.
- __ Drop(parameter_count);
+ __ Drop(parameter_slots);
}
__ Ret();
}
@@ -4593,26 +4405,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
-#if V8_TARGET_ARCH_S390X
- if (false) {
-#else
- if (RelocInfo::IsWasmReference(src.rmode())) {
-#endif
- __ mov(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
__ mov(dst, Operand(src.ToInt32()));
- }
break;
case Constant::kInt64:
-#if V8_TARGET_ARCH_S390X
+#if V8_ENABLE_WEBASSEMBLY
if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
- } else {
- __ mov(dst, Operand(src.ToInt64()));
+ break;
}
-#else
+#endif // V8_ENABLE_WEBASSEMBLY
__ mov(dst, Operand(src.ToInt64()));
-#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 8068894b6b9..823160ed15b 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -221,7 +221,6 @@ namespace compiler {
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
V(S390_F32x4Add) \
- V(S390_F32x4AddHoriz) \
V(S390_F32x4Sub) \
V(S390_F32x4Mul) \
V(S390_F32x4Eq) \
@@ -267,7 +266,6 @@ namespace compiler {
V(S390_I64x2SConvertI32x4High) \
V(S390_I64x2UConvertI32x4Low) \
V(S390_I64x2UConvertI32x4High) \
- V(S390_I64x2SignSelect) \
V(S390_I64x2Ne) \
V(S390_I64x2GtS) \
V(S390_I64x2GeS) \
@@ -276,7 +274,6 @@ namespace compiler {
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
V(S390_I32x4Add) \
- V(S390_I32x4AddHoriz) \
V(S390_I32x4Sub) \
V(S390_I32x4Mul) \
V(S390_I32x4MinS) \
@@ -308,7 +305,6 @@ namespace compiler {
V(S390_I32x4ExtMulHighI16x8U) \
V(S390_I32x4ExtAddPairwiseI16x8S) \
V(S390_I32x4ExtAddPairwiseI16x8U) \
- V(S390_I32x4SignSelect) \
V(S390_I32x4TruncSatF64x2SZero) \
V(S390_I32x4TruncSatF64x2UZero) \
V(S390_I16x8Splat) \
@@ -316,7 +312,6 @@ namespace compiler {
V(S390_I16x8ExtractLaneS) \
V(S390_I16x8ReplaceLane) \
V(S390_I16x8Add) \
- V(S390_I16x8AddHoriz) \
V(S390_I16x8Sub) \
V(S390_I16x8Mul) \
V(S390_I16x8MinS) \
@@ -353,14 +348,12 @@ namespace compiler {
V(S390_I16x8ExtAddPairwiseI8x16S) \
V(S390_I16x8ExtAddPairwiseI8x16U) \
V(S390_I16x8Q15MulRSatS) \
- V(S390_I16x8SignSelect) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
V(S390_I8x16ReplaceLane) \
V(S390_I8x16Add) \
V(S390_I8x16Sub) \
- V(S390_I8x16Mul) \
V(S390_I8x16MinS) \
V(S390_I8x16MinU) \
V(S390_I8x16MaxS) \
@@ -386,12 +379,11 @@ namespace compiler {
V(S390_I8x16BitMask) \
V(S390_I8x16Shuffle) \
V(S390_I8x16Swizzle) \
- V(S390_I8x16SignSelect) \
V(S390_I8x16Popcnt) \
- V(S390_V64x2AllTrue) \
- V(S390_V32x4AllTrue) \
- V(S390_V16x8AllTrue) \
- V(S390_V8x16AllTrue) \
+ V(S390_I64x2AllTrue) \
+ V(S390_I32x4AllTrue) \
+ V(S390_I16x8AllTrue) \
+ V(S390_I8x16AllTrue) \
V(S390_V128AnyTrue) \
V(S390_S128And) \
V(S390_S128Or) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index de6abc56a3d..77d54fe43b7 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -129,7 +129,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse32RR:
case kS390_LoadReverse64RR:
case kS390_LoadReverseSimd128RR:
- case kS390_LoadReverseSimd128:
case kS390_LoadAndTestWord32:
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
@@ -168,7 +167,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
case kS390_F32x4Add:
- case kS390_F32x4AddHoriz:
case kS390_F32x4Sub:
case kS390_F32x4Mul:
case kS390_F32x4Eq:
@@ -214,7 +212,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I64x2SConvertI32x4High:
case kS390_I64x2UConvertI32x4Low:
case kS390_I64x2UConvertI32x4High:
- case kS390_I64x2SignSelect:
case kS390_I64x2Ne:
case kS390_I64x2GtS:
case kS390_I64x2GeS:
@@ -223,7 +220,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
case kS390_I32x4Add:
- case kS390_I32x4AddHoriz:
case kS390_I32x4Sub:
case kS390_I32x4Mul:
case kS390_I32x4MinS:
@@ -255,7 +251,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ExtMulHighI16x8U:
case kS390_I32x4ExtAddPairwiseI16x8S:
case kS390_I32x4ExtAddPairwiseI16x8U:
- case kS390_I32x4SignSelect:
case kS390_I32x4TruncSatF64x2SZero:
case kS390_I32x4TruncSatF64x2UZero:
case kS390_I16x8Splat:
@@ -263,7 +258,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ExtractLaneS:
case kS390_I16x8ReplaceLane:
case kS390_I16x8Add:
- case kS390_I16x8AddHoriz:
case kS390_I16x8Sub:
case kS390_I16x8Mul:
case kS390_I16x8MinS:
@@ -300,14 +294,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ExtAddPairwiseI8x16S:
case kS390_I16x8ExtAddPairwiseI8x16U:
case kS390_I16x8Q15MulRSatS:
- case kS390_I16x8SignSelect:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
case kS390_I8x16ReplaceLane:
case kS390_I8x16Add:
case kS390_I8x16Sub:
- case kS390_I8x16Mul:
case kS390_I8x16MinS:
case kS390_I8x16MinU:
case kS390_I8x16MaxS:
@@ -333,12 +325,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16BitMask:
case kS390_I8x16Shuffle:
case kS390_I8x16Swizzle:
- case kS390_I8x16SignSelect:
case kS390_I8x16Popcnt:
- case kS390_V64x2AllTrue:
- case kS390_V32x4AllTrue:
- case kS390_V16x8AllTrue:
- case kS390_V8x16AllTrue:
+ case kS390_I64x2AllTrue:
+ case kS390_I32x4AllTrue:
+ case kS390_I16x8AllTrue:
+ case kS390_I8x16AllTrue:
case kS390_V128AnyTrue:
case kS390_S128And:
case kS390_S128Or:
@@ -364,6 +355,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse16:
case kS390_LoadReverse32:
case kS390_LoadReverse64:
+ case kS390_LoadReverseSimd128:
case kS390_Peek:
case kS390_LoadDecompressTaggedSigned:
case kS390_LoadDecompressTaggedPointer:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 972d268014d..f2375525bb3 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -396,7 +396,8 @@ bool ProduceWord32Result(Node* node) {
// return false;
// }
// }
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
@@ -680,7 +681,7 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -1891,7 +1892,8 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
break;
case IrOpcode::kWord32And:
return VisitTestUnderMask(this, value, cont);
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
@@ -2409,7 +2411,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(F64x2Min) \
V(F64x2Max) \
V(F32x4Add) \
- V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Eq) \
@@ -2432,7 +2433,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2GtS) \
V(I64x2GeS) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2451,7 +2451,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I32x4ExtMulLowI16x8U) \
V(I32x4ExtMulHighI16x8U) \
V(I16x8Add) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8Mul) \
V(I16x8MinS) \
@@ -2477,7 +2476,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I16x8ExtMulHighI8x16U) \
V(I8x16Add) \
V(I8x16Sub) \
- V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
@@ -2566,10 +2564,10 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
#define SIMD_BOOL_LIST(V) \
V(V128AnyTrue) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
- V(V8x16AllTrue)
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue)
#define SIMD_CONVERSION_LIST(V) \
V(I32x4SConvertF32x4) \
@@ -2710,9 +2708,9 @@ SIMD_VISIT_PMIN_MAX(F32x4Pmax)
#undef SIMD_VISIT_PMIN_MAX
#undef SIMD_TYPES
+#if V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
- uint8_t* shuffle_p = &shuffle[0];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
S390OperandGenerator g(this);
@@ -2728,14 +2726,16 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
? max_index - current_index
: total_lane_count - current_index + max_index);
}
- shuffle_p = &shuffle_remapped[0];
Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node),
g.UseUniqueRegister(input0), g.UseUniqueRegister(input1),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p)),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p + 4)),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p + 8)),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p + 12)));
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
S390OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 0a3e065bbe8..814f14fb4af 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -15,12 +15,16 @@
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -206,21 +210,29 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
: OutOfLineCode(gen),
result_(result),
input_(input),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
unwinding_info_writer_(unwinding_info_writer),
isolate_(gen->isolate()),
- zone_(gen->zone()) {}
+ zone_(gen->zone()) {
+ }
void Generate() final {
__ AllocateStackSpace(kDoubleSize);
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
+#if V8_ENABLE_WEBASSEMBLY
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (tasm()->options().inline_offheap_trampolines) {
// With embedded builtins we do not need the isolate here. This allows
// the call to be generated asynchronously.
@@ -237,7 +249,9 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
private:
Register const result_;
XMMRegister const input_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
UnwindingInfoWriter* const unwinding_info_writer_;
Isolate* isolate_;
Zone* zone_;
@@ -255,8 +269,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
- zone_(gen->zone()) {}
+#endif // V8_ENABLE_WEBASSEMBLY
+ zone_(gen->zone()) {
+ }
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -278,12 +295,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kRecordWrite);
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -297,10 +316,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
Zone* zone_;
};
+#if V8_ENABLE_WEBASSEMBLY
class WasmOutOfLineTrap : public OutOfLineCode {
public:
WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
@@ -329,7 +351,7 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
size_t pop_size =
- call_descriptor->StackParameterCount() * kSystemPointerSize;
+ call_descriptor->ParameterSlotCount() * kSystemPointerSize;
// Use rcx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), rcx);
} else {
@@ -354,6 +376,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
: WasmOutOfLineTrap(gen, instr), pc_(pc) {}
void Generate() final {
+ DCHECK(FLAG_wasm_bounds_checks && FLAG_wasm_trap_handler);
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
}
@@ -371,6 +394,15 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
+#else
+
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr, int pc) {
+ DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+}
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter const& i) {
@@ -633,47 +665,44 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
- do { \
- Register dst = i.OutputRegister(); \
- XMMRegister tmp = i.TempSimd128Register(0); \
- __ xorq(dst, dst); \
- __ Pxor(tmp, tmp); \
- __ opcode(tmp, i.InputSimd128Register(0)); \
- __ Ptest(tmp, tmp); \
- __ setcc(equal, dst); \
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ __ xorq(dst, dst); \
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ setcc(equal, dst); \
} while (false)
// This macro will directly emit the opcode if the shift is an immediate - the
// shift value will be taken modulo 2^width. Otherwise, it will emit code to
// perform the modulus operation.
-#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- if (HasImmediateInput(instr, 1)) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
- __ v##opcode(dst, i.InputSimd128Register(0), \
- byte{i.InputInt##width(1)}); \
- } else { \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- __ opcode(dst, byte{i.InputInt##width(1)}); \
- } \
- } else { \
- XMMRegister tmp = i.TempSimd128Register(0); \
- Register tmp_shift = i.TempRegister(1); \
- constexpr int mask = (1 << width) - 1; \
- __ movq(tmp_shift, i.InputRegister(1)); \
- __ andq(tmp_shift, Immediate(mask)); \
- __ Movq(tmp, tmp_shift); \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
- __ v##opcode(dst, i.InputSimd128Register(0), tmp); \
- } else { \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- __ opcode(dst, tmp); \
- } \
- } \
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ if (HasImmediateInput(instr, 1)) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), \
+ byte{i.InputInt##width(1)}); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, byte{i.InputInt##width(1)}); \
+ } \
+ } else { \
+ constexpr int mask = (1 << width) - 1; \
+ __ movq(kScratchRegister, i.InputRegister(1)); \
+ __ andq(kScratchRegister, Immediate(mask)); \
+ __ Movq(kScratchDoubleReg, kScratchRegister); \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, kScratchDoubleReg); \
+ } \
+ } \
} while (false)
#define ASSEMBLE_PINSR(ASM_INSTR) \
@@ -756,14 +785,14 @@ void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms,
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, flags, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
+ first_unused_slot_offset)) {
DCHECK(!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp));
X64OperandConverter g(this, instr);
for (auto move : pushes) {
@@ -780,7 +809,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand source_location(LocationOperand::cast(source));
__ Push(source_location.GetRegister());
} else if (source.IsImmediate()) {
- __ Push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ __ Push(Immediate(ImmediateOperand::cast(source).inline_int32_value()));
} else {
// Pushes of non-scalar data types is not supported.
UNIMPLEMENTED();
@@ -790,14 +819,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
}
AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
- frame_access_state(), first_unused_stack_slot,
+ frame_access_state(), first_unused_slot_offset,
false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
+ int first_unused_slot_offset) {
AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
- frame_access_state(), first_unused_stack_slot);
+ frame_access_state(), first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -875,6 +904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -900,16 +930,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObject: {
+ case kArchTailCallWasm: {
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = i.InputCode(0);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
+ __ near_jmp(wasm_code, constant.rmode());
+ } else {
+ __ Move(kScratchRegister, wasm_code, constant.rmode());
+ __ jmp(kScratchRegister);
+ }
} else {
Register reg = i.InputRegister(0);
- DCHECK_IMPLIES(
- instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
- reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -921,18 +953,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallWasm: {
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Constant constant = i.ToConstant(instr->InputAt(0));
- Address wasm_code = static_cast<Address>(constant.ToInt64());
- if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
- __ near_jmp(wasm_code, constant.rmode());
- } else {
- __ Move(kScratchRegister, wasm_code, constant.rmode());
- __ jmp(kScratchRegister);
- }
+ Handle<Code> code = i.InputCode(0);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -1013,12 +1044,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
Label return_location;
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
__ leaq(kScratchRegister, Operand(&return_location, 0));
__ movq(MemOperand(rbp, WasmExitFrameConstants::kCallingPCOffset),
kScratchRegister);
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1027,9 +1060,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
__ bind(&return_location);
+#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
RecordSafepoint(instr->reference_map());
}
+#endif // V8_ENABLE_WEBASSEMBLY
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -2453,10 +2488,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
} else {
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Movapd(tmp, i.InputSimd128Register(2));
- __ Mulpd(tmp, i.InputSimd128Register(1));
- __ Addpd(i.OutputSimd128Register(), tmp);
+ __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Addpd(i.OutputSimd128Register(), kScratchDoubleReg);
}
break;
}
@@ -2466,10 +2500,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
} else {
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Movapd(tmp, i.InputSimd128Register(2));
- __ Mulpd(tmp, i.InputSimd128Register(1));
- __ Subpd(i.OutputSimd128Register(), tmp);
+ __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Subpd(i.OutputSimd128Register(), kScratchDoubleReg);
}
break;
}
@@ -2614,11 +2647,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(addps);
break;
}
- case kX64F32x4AddHoriz: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Haddps(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
case kX64F32x4Sub: {
ASSEMBLE_SIMD_BINOP(subps);
break;
@@ -2692,10 +2720,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
} else {
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Movaps(tmp, i.InputSimd128Register(2));
- __ Mulps(tmp, i.InputSimd128Register(1));
- __ Addps(i.OutputSimd128Register(), tmp);
+ __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Addps(i.OutputSimd128Register(), kScratchDoubleReg);
}
break;
}
@@ -2705,23 +2732,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
} else {
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Movaps(tmp, i.InputSimd128Register(2));
- __ Mulps(tmp, i.InputSimd128Register(1));
- __ Subps(i.OutputSimd128Register(), tmp);
+ __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Subps(i.OutputSimd128Register(), kScratchDoubleReg);
}
break;
}
case kX64F32x4Pmin: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Minps(dst, i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(minps);
break;
}
case kX64F32x4Pmax: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Maxps(dst, i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(maxps);
break;
}
case kX64F32x4Round: {
@@ -2737,15 +2759,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Pmin: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Minpd(dst, i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(minpd);
break;
}
case kX64F64x2Pmax: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Maxpd(dst, i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(maxpd);
break;
}
case kX64I64x2Splat: {
@@ -2763,7 +2781,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Abs: {
- __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64I64x2Neg: {
@@ -2818,7 +2837,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister left = i.InputSimd128Register(0);
XMMRegister right = i.InputSimd128Register(1);
XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = i.TempSimd128Register(1);
+ XMMRegister tmp2 = kScratchDoubleReg;
__ Movdqa(tmp1, left);
__ Movdqa(tmp2, right);
@@ -2845,20 +2864,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
__ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqq(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kX64I64x2GtS: {
__ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I64x2GeS: {
__ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I64x2ShrU: {
@@ -2868,25 +2886,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2ExtMulLowI32x4S: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
+ i.InputSimd128Register(1), kScratchDoubleReg, /*low=*/true,
/*is_signed=*/true);
break;
}
case kX64I64x2ExtMulHighI32x4S: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*low=*/false,
/*is_signed=*/true);
break;
}
case kX64I64x2ExtMulLowI32x4U: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
+ i.InputSimd128Register(1), kScratchDoubleReg, /*low=*/true,
/*is_signed=*/false);
break;
}
case kX64I64x2ExtMulHighI32x4U: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*low=*/false,
/*is_signed=*/false);
break;
}
@@ -2905,7 +2925,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2UConvertI32x4High: {
__ I64x2UConvertI32x4High(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kX64I32x4Splat: {
@@ -2926,20 +2946,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I32x4SConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0
- __ Movaps(tmp, dst);
- __ Cmpeqps(tmp, tmp);
- __ Pand(dst, tmp);
+ __ Movaps(kScratchDoubleReg, dst);
+ __ Cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pand(dst, kScratchDoubleReg);
// Set top bit if >= 0 (but not -0.0!)
- __ Pxor(tmp, dst);
+ __ Pxor(kScratchDoubleReg, dst);
// Convert
__ Cvttps2dq(dst, dst);
// Set top bit if >=0 is now < 0
- __ Pand(tmp, dst);
- __ Psrad(tmp, byte{31});
+ __ Pand(kScratchDoubleReg, dst);
+ __ Psrad(kScratchDoubleReg, byte{31});
// Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, tmp);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -2977,10 +2996,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(paddd);
break;
}
- case kX64I32x4AddHoriz: {
- ASSEMBLE_SIMD_BINOP(phaddd);
- break;
- }
case kX64I32x4Sub: {
ASSEMBLE_SIMD_BINOP(psubd);
break;
@@ -3002,10 +3017,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Ne: {
- XMMRegister tmp = i.TempSimd128Register(0);
__ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqd(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kX64I32x4GtS: {
@@ -3053,7 +3067,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4UConvertI16x8High: {
__ I32x4UConvertI16x8High(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kX64I32x4ShrU: {
@@ -3072,11 +3086,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I32x4GtU: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
__ Pmaxud(dst, src);
__ Pcmpeqd(dst, src);
- __ Pcmpeqd(tmp, tmp);
- __ Pxor(dst, tmp);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I32x4GeU: {
@@ -3197,10 +3210,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(paddsw);
break;
}
- case kX64I16x8AddHoriz: {
- ASSEMBLE_SIMD_BINOP(phaddw);
- break;
- }
case kX64I16x8Sub: {
ASSEMBLE_SIMD_BINOP(psubw);
break;
@@ -3226,11 +3235,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8Ne: {
- XMMRegister tmp = i.TempSimd128Register(0);
XMMRegister dst = i.OutputSimd128Register();
__ Pcmpeqw(dst, i.InputSimd128Register(1));
- __ Pcmpeqw(tmp, tmp);
- __ Pxor(dst, tmp);
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I16x8GtS: {
@@ -3250,7 +3258,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8UConvertI8x16High: {
__ I16x8UConvertI8x16High(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kX64I16x8ShrU: {
@@ -3281,11 +3289,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8GtU: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
__ Pmaxuw(dst, src);
__ Pcmpeqw(dst, src);
- __ Pcmpeqw(tmp, tmp);
- __ Pxor(dst, tmp);
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I16x8GeU: {
@@ -3305,34 +3312,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8BitMask: {
Register dst = i.OutputRegister();
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Packsswb(tmp, i.InputSimd128Register(0));
- __ Pmovmskb(dst, tmp);
+ __ Packsswb(kScratchDoubleReg, i.InputSimd128Register(0));
+ __ Pmovmskb(dst, kScratchDoubleReg);
__ shrq(dst, Immediate(8));
break;
}
case kX64I16x8ExtMulLowI8x16S: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
- /*is_signed=*/true);
+ __ I16x8ExtMulLow(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*is_signed=*/true);
break;
}
case kX64I16x8ExtMulHighI8x16S: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
- /*is_signed=*/true);
+ __ I16x8ExtMulHighS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I16x8ExtMulLowI8x16U: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
- /*is_signed=*/false);
+ __ I16x8ExtMulLow(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*is_signed=*/false);
break;
}
case kX64I16x8ExtMulHighI8x16U: {
- __ I16x8ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
- /*is_signed=*/false);
+ __ I16x8ExtMulHighU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I16x8ExtAddPairwiseI8x16S: {
@@ -3355,13 +3359,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx2_scope(tasm(), AVX2);
+ if (HasRegisterInput(instr, 0)) {
+ __ vmovd(kScratchDoubleReg, i.InputRegister(0));
+ __ vpbroadcastb(dst, kScratchDoubleReg);
+ } else {
+ __ vpbroadcastb(dst, i.InputOperand(0));
+ }
} else {
- __ Movd(dst, i.InputOperand(0));
+ if (HasRegisterInput(instr, 0)) {
+ __ Movd(dst, i.InputRegister(0));
+ } else {
+ __ Movd(dst, i.InputOperand(0));
+ }
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
}
- __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+
break;
}
case kX64Pextrb: {
@@ -3509,39 +3525,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(psubsb);
break;
}
- case kX64I8x16Mul: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- __ Movdqa(tmp, dst);
- __ Movdqa(kScratchDoubleReg, right);
- __ Psrlw(tmp, byte{8});
- __ Psrlw(kScratchDoubleReg, byte{8});
- // dst = left * 256
- __ Psllw(dst, byte{8});
- // t = I16x8Mul(t, s)
- // => __PP __PP ... __PP __PP
- __ Pmullw(tmp, kScratchDoubleReg);
- // dst = I16x8Mul(left * 256, right)
- // => pp__ pp__ ... pp__ pp__
- __ Pmullw(dst, right);
- // t = I16x8Shl(t, 8)
- // => PP00 PP00 ... PP00 PP00
- __ Psllw(tmp, byte{8});
- // dst = I16x8Shr(dst, 8)
- // => 00pp 00pp ... 00pp 00pp
- __ Psrlw(dst, byte{8});
- // dst = I16x8Or(dst, t)
- // => PPpp PPpp ... PPpp PPpp
- __ Por(dst, tmp);
- break;
- }
case kX64I8x16MinS: {
ASSEMBLE_SIMD_BINOP(pminsb);
break;
@@ -3555,11 +3538,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Ne: {
- XMMRegister tmp = i.TempSimd128Register(0);
XMMRegister dst = i.OutputSimd128Register();
__ Pcmpeqb(dst, i.InputSimd128Register(1));
- __ Pcmpeqb(tmp, tmp);
- __ Pxor(dst, tmp);
+ __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I8x16GtS: {
@@ -3629,11 +3611,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I8x16GtU: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
__ Pmaxub(dst, src);
__ Pcmpeqb(dst, src);
- __ Pcmpeqb(tmp, tmp);
- __ Pxor(dst, tmp);
+ __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
break;
}
case kX64I8x16GeU: {
@@ -3655,64 +3636,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
- case kX64I8x16SignSelect: {
- __ Pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), i.InputSimd128Register(2));
- break;
- }
- case kX64I16x8SignSelect: {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsraw(kScratchDoubleReg, i.InputSimd128Register(2), 15);
- __ vpblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
- } else {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister mask = i.InputSimd128Register(2);
- DCHECK_EQ(xmm0, mask);
- __ movaps(kScratchDoubleReg, mask);
- __ xorps(mask, mask);
- __ pcmpgtw(mask, kScratchDoubleReg);
- __ pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(1));
- // Restore mask.
- __ movaps(mask, kScratchDoubleReg);
- }
- break;
- }
- case kX64I32x4SignSelect: {
- __ Blendvps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), i.InputSimd128Register(2));
- break;
- }
case kX64I32x4ExtMulLowI16x8S: {
__ I32x4ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
+ i.InputSimd128Register(1), kScratchDoubleReg, /*low=*/true,
/*is_signed=*/true);
break;
}
case kX64I32x4ExtMulHighI16x8S: {
__ I32x4ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*low=*/false,
/*is_signed=*/true);
break;
}
case kX64I32x4ExtMulLowI16x8U: {
__ I32x4ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/true,
+ i.InputSimd128Register(1), kScratchDoubleReg, /*low=*/true,
/*is_signed=*/false);
break;
}
case kX64I32x4ExtMulHighI16x8U: {
__ I32x4ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), /*low=*/false,
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ /*low=*/false,
/*is_signed=*/false);
break;
}
- case kX64I64x2SignSelect: {
- __ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), i.InputSimd128Register(2));
- break;
- }
case kX64S128And: {
ASSEMBLE_SIMD_BINOP(pand);
break;
@@ -3739,7 +3688,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S128Select: {
__ S128Select(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), i.InputSimd128Register(2));
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64S128AndNot: {
@@ -3751,8 +3701,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
+ bool omit_add = MiscField::decode(instr->opcode());
__ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), omit_add);
break;
}
case kX64I8x16Shuffle: {
@@ -4125,28 +4076,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64V64x2AllTrue: {
+ case kX64I64x2AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
break;
}
- case kX64V32x4AllTrue: {
+ case kX64I32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
}
- case kX64V16x8AllTrue: {
+ case kX64I16x8AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
break;
}
- case kX64V8x16AllTrue: {
+ case kX64I8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
- case kX64Prefetch:
- __ prefetch(i.MemoryOperand(), 1);
- break;
- case kX64PrefetchNta:
- __ prefetch(i.MemoryOperand(), 0);
- break;
case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
@@ -4436,6 +4381,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
auto ool = zone()->New<WasmOutOfLineTrap>(this, instr);
@@ -4449,6 +4395,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ j(FlagsConditionToCondition(condition), tlabel);
__ bind(&end);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -4502,6 +4449,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand(kScratchRegister, input, times_8, 0));
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
namespace {
static const int kQuadWordSize = 16;
@@ -4540,15 +4492,18 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
+#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
// Reserve stack space for saving the c_entry_fp later.
__ AllocateStackSpace(kSystemPointerSize);
}
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall()) {
__ pushq(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
@@ -4569,6 +4524,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AllocateStackSpace(kSystemPointerSize);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
unwinding_info_writer_.MarkFrameConstructed(pc_base);
@@ -4595,6 +4551,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -4623,6 +4580,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Skip callee-saved and return slots, which are created below.
required_slots -= base::bits::CountPopulation(saves);
@@ -4699,12 +4657,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r10.bit());
X64OperandConverter g(this, nullptr);
- int parameter_count =
- static_cast<int>(call_descriptor->StackParameterCount());
+ int parameter_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
- // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
// Check RawMachineAssembler::PopAndReturn.
- if (parameter_count != 0) {
+ if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
} else if (__ emit_debug_code()) {
@@ -4715,12 +4672,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Register argc_reg = rcx;
// Functions with JS linkage have at least one parameter (the receiver).
- // If {parameter_count} == 0, it means it is a builtin with
+ // If {parameter_slots} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
// itself.
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
- parameter_count != 0;
+ parameter_slots != 0;
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -4743,16 +4700,16 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
- int parameter_count_without_receiver =
- parameter_count - 1; // Exclude the receiver to simplify the
+ // number of arguments is given by max(1 + argc_reg, parameter_slots).
+ int parameter_slots_without_receiver =
+ parameter_slots - 1; // Exclude the receiver to simplify the
// computation. We'll account for it at the end.
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
- __ cmpq(argc_reg, Immediate(parameter_count_without_receiver));
+ __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
__ j(greater, &mismatch_return, Label::kNear);
- __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ PopReturnAddressTo(scratch_reg);
__ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size,
@@ -4763,13 +4720,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
} else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = r10;
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
+ size_t pop_size = (parameter_slots + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == r10 ? rcx : r10;
- int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
+ int pop_size = static_cast<int>(parameter_slots * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
@@ -4786,7 +4743,13 @@ void CodeGenerator::PrepareForDeoptimizationExits(
void CodeGenerator::IncrementStackAccessCounter(
InstructionOperand* source, InstructionOperand* destination) {
DCHECK(FLAG_trace_turbo_stack_accesses);
- if (!info()->IsOptimizing() && !info()->IsWasm()) return;
+ if (!info()->IsOptimizing()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (!info()->IsWasm()) return;
+#else
+ return;
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
DCHECK_NOT_NULL(debug_name_);
auto IncrementCounter = [&](ExternalReference counter) {
__ incl(__ ExternalReferenceAsOperand(counter));
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 6c48a04ea1f..2ad717c0a04 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -188,7 +188,6 @@ namespace compiler {
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
- V(X64F32x4AddHoriz) \
V(X64F32x4Sub) \
V(X64F32x4Mul) \
V(X64F32x4Div) \
@@ -219,7 +218,6 @@ namespace compiler {
V(X64I64x2GeS) \
V(X64I64x2Ne) \
V(X64I64x2ShrU) \
- V(X64I64x2SignSelect) \
V(X64I64x2ExtMulLowI32x4S) \
V(X64I64x2ExtMulHighI32x4S) \
V(X64I64x2ExtMulLowI32x4U) \
@@ -237,7 +235,6 @@ namespace compiler {
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
- V(X64I32x4AddHoriz) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
@@ -257,7 +254,6 @@ namespace compiler {
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
V(X64I32x4DotI16x8S) \
- V(X64I32x4SignSelect) \
V(X64I32x4ExtMulLowI16x8S) \
V(X64I32x4ExtMulHighI16x8S) \
V(X64I32x4ExtMulLowI16x8U) \
@@ -276,7 +272,6 @@ namespace compiler {
V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
V(X64I16x8AddSatS) \
- V(X64I16x8AddHoriz) \
V(X64I16x8Sub) \
V(X64I16x8SubSatS) \
V(X64I16x8Mul) \
@@ -299,7 +294,6 @@ namespace compiler {
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
V(X64I16x8BitMask) \
- V(X64I16x8SignSelect) \
V(X64I16x8ExtMulLowI8x16S) \
V(X64I16x8ExtMulHighI8x16S) \
V(X64I16x8ExtMulLowI8x16U) \
@@ -323,7 +317,6 @@ namespace compiler {
V(X64I8x16AddSatS) \
V(X64I8x16Sub) \
V(X64I8x16SubSatS) \
- V(X64I8x16Mul) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
@@ -341,7 +334,6 @@ namespace compiler {
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
V(X64I8x16BitMask) \
- V(X64I8x16SignSelect) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
@@ -394,12 +386,10 @@ namespace compiler {
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
V(X64V128AnyTrue) \
- V(X64V64x2AllTrue) \
- V(X64V32x4AllTrue) \
- V(X64V16x8AllTrue) \
- V(X64V8x16AllTrue) \
- V(X64Prefetch) \
- V(X64PrefetchNta) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
V(X64Word64AtomicAddUint8) \
V(X64Word64AtomicAddUint16) \
V(X64Word64AtomicAddUint32) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 2ecbab8f509..dc323d4cc79 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -164,7 +164,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Neg:
case kX64F32x4Sqrt:
case kX64F32x4Add:
- case kX64F32x4AddHoriz:
case kX64F32x4Sub:
case kX64F32x4Mul:
case kX64F32x4Div:
@@ -195,7 +194,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I64x2GeS:
case kX64I64x2Ne:
case kX64I64x2ShrU:
- case kX64I64x2SignSelect:
case kX64I64x2ExtMulLowI32x4S:
case kX64I64x2ExtMulHighI32x4S:
case kX64I64x2ExtMulLowI32x4U:
@@ -213,7 +211,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
- case kX64I32x4AddHoriz:
case kX64I32x4Sub:
case kX64I32x4Mul:
case kX64I32x4MinS:
@@ -233,7 +230,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Abs:
case kX64I32x4BitMask:
case kX64I32x4DotI16x8S:
- case kX64I32x4SignSelect:
case kX64I32x4ExtMulLowI16x8S:
case kX64I32x4ExtMulHighI16x8S:
case kX64I32x4ExtMulLowI16x8U:
@@ -252,7 +248,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8SConvertI32x4:
case kX64I16x8Add:
case kX64I16x8AddSatS:
- case kX64I16x8AddHoriz:
case kX64I16x8Sub:
case kX64I16x8SubSatS:
case kX64I16x8Mul:
@@ -275,7 +270,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8RoundingAverageU:
case kX64I16x8Abs:
case kX64I16x8BitMask:
- case kX64I16x8SignSelect:
case kX64I16x8ExtMulLowI8x16S:
case kX64I16x8ExtMulHighI8x16S:
case kX64I16x8ExtMulLowI8x16U:
@@ -293,7 +287,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16AddSatS:
case kX64I8x16Sub:
case kX64I8x16SubSatS:
- case kX64I8x16Mul:
case kX64I8x16MinS:
case kX64I8x16MaxS:
case kX64I8x16Eq:
@@ -311,7 +304,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16RoundingAverageU:
case kX64I8x16Abs:
case kX64I8x16BitMask:
- case kX64I8x16SignSelect:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -321,9 +313,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64V64x2AllTrue:
- case kX64V32x4AllTrue:
- case kX64V16x8AllTrue:
+ case kX64I64x2AllTrue:
+ case kX64I32x4AllTrue:
+ case kX64I16x8AllTrue:
case kX64I8x16Swizzle:
case kX64I8x16Shuffle:
case kX64I8x16Popcnt:
@@ -355,7 +347,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x4Reverse:
case kX64S8x2Reverse:
case kX64V128AnyTrue:
- case kX64V8x16AllTrue:
+ case kX64I8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -427,8 +419,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64MFence:
case kX64LFence:
- case kX64Prefetch:
- case kX64PrefetchNta:
return kHasSideEffect;
case kX64Word64AtomicAddUint8:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index bbfc0a09bde..23bbed36137 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -17,7 +17,10 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/roots/roots-inl.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -338,7 +341,7 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
@@ -577,30 +580,6 @@ void InstructionSelector::VisitStoreLane(Node* node) {
Emit(opcode, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitPrefetchTemporal(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand inputs[2];
- size_t input_count = 0;
- InstructionCode opcode = kX64Prefetch;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- DCHECK_LE(input_count, 2);
- opcode |= AddressingModeField::encode(addressing_mode);
- Emit(opcode, 0, nullptr, input_count, inputs);
-}
-
-void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand inputs[2];
- size_t input_count = 0;
- InstructionCode opcode = kX64PrefetchNta;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- DCHECK_LE(input_count, 2);
- opcode |= AddressingModeField::encode(addressing_mode);
- Emit(opcode, 0, nullptr, input_count, inputs);
-}
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -988,7 +967,8 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
// immediate displacement. It seems that we never use M1 and M2, but we
// handle them here anyways.
mode = AddDisplacementToAddressingMode(mode);
- inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
+ inputs[input_count++] =
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 4);
} else {
// In the case that the base address was zero, the displacement will be
// in a register and replacing it with an immediate is not allowed. This
@@ -996,7 +976,7 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
if (!inputs[input_count - 1].IsImmediate()) return false;
int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
inputs[input_count - 1] =
- ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, displacement + 4);
}
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionCode code = opcode | AddressingModeField::encode(mode);
@@ -1457,6 +1437,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
}
case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
@@ -1694,7 +1675,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
break;
}
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
if (TryMergeTruncateInt64ToInt32IntoLoad(this, node, value)) {
return;
}
@@ -2855,7 +2837,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2873,7 +2854,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8UConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSatS) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSatS) \
V(I16x8Mul) \
@@ -2913,25 +2893,22 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F64x2Min) \
V(F64x2Max) \
- V(F32x4AddHoriz) \
V(F32x4Min) \
V(F32x4Max) \
+ V(I64x2Ne) \
+ V(I32x4Ne) \
+ V(I32x4GtU) \
V(I32x4GeS) \
V(I32x4GeU) \
+ V(I16x8Ne) \
+ V(I16x8GtU) \
V(I16x8GeS) \
V(I16x8GeU) \
+ V(I8x16Ne) \
+ V(I8x16GtU) \
V(I8x16GeS) \
V(I8x16GeU)
-#define SIMD_BINOP_ONE_TEMP_LIST(V) \
- V(I64x2Ne) \
- V(I32x4Ne) \
- V(I32x4GtU) \
- V(I16x8Ne) \
- V(I16x8GtU) \
- V(I8x16Ne) \
- V(I8x16GtU)
-
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
V(F64x2ConvertLowI32x4S) \
@@ -2962,9 +2939,14 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High) \
V(I16x8Abs) \
+ V(I16x8BitMask) \
V(I8x16Neg) \
V(I8x16Abs) \
V(I8x16BitMask) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -2981,12 +2963,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16Shl) \
V(I8x16ShrU)
-#define SIMD_ALLTRUE_LIST(V) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
- V(V8x16AllTrue)
-
void InstructionSelector::VisitS128Const(Node* node) {
X64OperandGenerator g(this);
static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
@@ -3088,20 +3064,18 @@ SIMD_TYPES_FOR_REPLACE_LANE(VISIT_SIMD_REPLACE_LANE)
#undef SIMD_TYPES_FOR_REPLACE_LANE
#undef VISIT_SIMD_REPLACE_LANE
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
- : g.DefineSameAsFirst(node); \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
- g.UseImmediate(node->InputAt(1))); \
- } else { \
- InstructionOperand temps[] = {g.TempSimd128Register(), \
- g.TempRegister()}; \
- Emit(kX64##Opcode, dst, g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
+ : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1))); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
@@ -3160,35 +3134,12 @@ SIMD_BINOP_SSE_AVX_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_SSE_AVX_LIST
-#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- arraysize(temps), temps); \
- }
-SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
-#undef VISIT_SIMD_BINOP_ONE_TEMP
-#undef SIMD_BINOP_ONE_TEMP_LIST
-
void InstructionSelector::VisitV128AnyTrue(Node* node) {
X64OperandGenerator g(this);
Emit(kX64V128AnyTrue, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_ALLTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
- }
-SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
-#undef VISIT_SIMD_ALLTRUE
-#undef SIMD_ALLTRUE_LIST
-
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
InstructionOperand dst =
@@ -3197,40 +3148,6 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
}
-namespace {
-void VisitSignSelect(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- // signselect(x, y, -1) = x
- // pblendvb(dst, x, y, -1) = dst <- y, so we need to swap x and y.
- if (selector->IsSupported(AVX)) {
- selector->Emit(
- opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
- } else {
- selector->Emit(
- opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(2), xmm0));
- }
-}
-} // namespace
-
-void InstructionSelector::VisitI8x16SignSelect(Node* node) {
- VisitSignSelect(this, node, kX64I8x16SignSelect);
-}
-
-void InstructionSelector::VisitI16x8SignSelect(Node* node) {
- VisitSignSelect(this, node, kX64I16x8SignSelect);
-}
-
-void InstructionSelector::VisitI32x4SignSelect(Node* node) {
- VisitSignSelect(this, node, kX64I32x4SignSelect);
-}
-
-void InstructionSelector::VisitI64x2SignSelect(Node* node) {
- VisitSignSelect(this, node, kX64I64x2SignSelect);
-}
-
void InstructionSelector::VisitS128AndNot(Node* node) {
X64OperandGenerator g(this);
// andnps a b does ~a & b, but we want a & !b, so flip the input.
@@ -3256,20 +3173,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_QFMOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- if (CpuFeatures::IsSupported(FMA3)) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
- } else { \
- InstructionOperand temps[] = {g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
}
VISIT_SIMD_QFMOP(F64x2Qfma)
VISIT_SIMD_QFMOP(F64x2Qfms)
@@ -3288,8 +3197,7 @@ void InstructionSelector::VisitI64x2ShrS(Node* node) {
void InstructionSelector::VisitI64x2Mul(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kX64I64x2Mul, g.DefineSameAsFirst(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -3297,9 +3205,8 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
@@ -3310,21 +3217,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI16x8BitMask(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I16x8BitMask, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitI8x16Mul(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I8x16Mul, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
void InstructionSelector::VisitI8x16ShrS(Node* node) {
X64OperandGenerator g(this);
if (g.CanBeImmediate(node->InputAt(1))) {
@@ -3346,6 +3238,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
// Returns true if shuffle can be decomposed into two 16x4 half shuffles
@@ -3651,13 +3544,31 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
}
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ InstructionCode op = kX64I8x16Swizzle;
+
+ auto m = V128ConstMatcher(node->InputAt(1));
+ if (m.HasResolvedValue()) {
+ // If the indices vector is a const, check if they are in range, or if the
+ // top bit is set, then we can avoid the paddusb in the codegen and simply
+ // emit a pshufb
+ auto imms = m.ResolvedValue().immediate();
+ op |= MiscField::encode(wasm::SimdSwizzle::AllInRangeOrTopBitSet(imms));
+ }
+
X64OperandGenerator g(this);
- Emit(kX64I8x16Swizzle,
+ Emit(op,
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
+#else
+void InstructionSelector::VisitI8x16Swizzle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
namespace {
void VisitPminOrPmax(InstructionSelector* selector, Node* node,
@@ -3665,8 +3576,10 @@ void VisitPminOrPmax(InstructionSelector* selector, Node* node,
// Due to the way minps/minpd work, we want the dst to be same as the second
// input: b = pmin(a, b) directly maps to minps b a.
X64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)),
+ InstructionOperand dst = selector->IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ selector->Emit(opcode, dst, g.UseRegister(node->InputAt(1)),
g.UseRegister(node->InputAt(0)));
}
} // namespace
@@ -3720,12 +3633,9 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
void InstructionSelector::VisitI8x16Popcnt(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand dst = CpuFeatures::IsSupported(AVX)
- ? g.DefineAsRegister(node)
- : g.DefineAsRegister(node);
InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I8x16Popcnt, dst, g.UseUniqueRegister(node->InputAt(0)),
- arraysize(temps), temps);
+ Emit(kX64I8x16Popcnt, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 40ca16cadc0..9a16e6d1483 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -42,6 +42,9 @@ Reduction BranchElimination::Reduce(Node* node) {
return ReduceIf(node, false);
case IrOpcode::kIfTrue:
return ReduceIf(node, true);
+ case IrOpcode::kTrapIf:
+ case IrOpcode::kTrapUnless:
+ return ReduceTrapConditional(node);
case IrOpcode::kStart:
return ReduceStart(node);
default:
@@ -71,9 +74,9 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
// | \ / \ /
// | \ / \ /
// | first_merge ==> first_merge
- // | | |
- // second_branch 1 0 |
- // / \ \ / |
+ // | | / |
+ // second_branch 1 0 / |
+ // / \ \ | / |
// / \ phi |
// second_true second_false \ |
// second_branch
@@ -154,6 +157,42 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return TakeConditionsFromFirstControl(node);
}
+Reduction BranchElimination::ReduceTrapConditional(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+ bool trapping_condition = node->opcode() == IrOpcode::kTrapIf;
+ Node* condition = node->InputAt(0);
+ Node* control_input = NodeProperties::GetControlInput(node, 0);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (!reduced_.Get(control_input)) {
+ return NoChange();
+ }
+ ControlPathConditions from_input = node_conditions_.Get(control_input);
+
+ Node* branch;
+ bool condition_value;
+
+ if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ if (condition_value == trapping_condition) {
+ // This will always trap. Mark its outputs as dead and connect it to
+ // graph()->end().
+ ReplaceWithValue(node, dead(), dead(), dead());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->NewNode(common()->Throw(), effect, node);
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+ return Changed(node);
+ } else {
+ // This will not trap, remove it.
+ return Replace(control_input);
+ }
+ }
+ return UpdateConditions(node, from_input, condition, node,
+ !trapping_condition);
+}
+
Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
@@ -284,8 +323,17 @@ Reduction BranchElimination::UpdateConditions(
void BranchElimination::ControlPathConditions::AddCondition(
Zone* zone, Node* condition, Node* branch, bool is_true,
ControlPathConditions hint) {
- DCHECK(!LookupCondition(condition, nullptr, nullptr));
- PushFront({condition, branch, is_true}, zone, hint);
+ if (!LookupCondition(condition)) {
+ PushFront({condition, branch, is_true}, zone, hint);
+ }
+}
+
+bool BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition) const {
+ for (BranchCondition element : *this) {
+ if (element.condition == condition) return true;
+ }
+ return false;
}
bool BranchElimination::ControlPathConditions::LookupCondition(
@@ -302,7 +350,9 @@ bool BranchElimination::ControlPathConditions::LookupCondition(
void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
// Check if {branch} is dead because we might have a stale side-table entry.
- if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead) {
+ if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
+ branch->opcode() != IrOpcode::kTrapIf &&
+ branch->opcode() != IrOpcode::kTrapUnless) {
IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
IsSafetyCheck combined_safety =
CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index adc2b0f378d..eb6f1f159d1 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -52,6 +52,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
// (true or false).
class ControlPathConditions : public FunctionalList<BranchCondition> {
public:
+ bool LookupCondition(Node* condition) const;
bool LookupCondition(Node* condition, Node** branch, bool* is_true) const;
void AddCondition(Zone* zone, Node* condition, Node* branch, bool is_true,
ControlPathConditions hint);
@@ -63,6 +64,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceBranch(Node* node);
Reduction ReduceDeoptimizeConditional(Node* node);
Reduction ReduceIf(Node* node, bool is_true_branch);
+ Reduction ReduceTrapConditional(Node* node);
Reduction ReduceLoop(Node* node);
Reduction ReduceMerge(Node* node);
Reduction ReduceStart(Node* node);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 8489a726585..b3734ee9fcf 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -6,8 +6,8 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
-#include "src/utils/ostreams.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -98,14 +98,14 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
namespace {
void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor) {
+ const interpreter::BytecodeArrayIterator& iterator) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator) {
// The generator object has to be live.
- in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ in_liveness->MarkRegisterLive(iterator.GetRegisterOperand(0).index());
// Suspend additionally reads and returns the accumulator
DCHECK(Bytecodes::ReadsAccumulator(bytecode));
in_liveness->MarkAccumulatorLive();
@@ -113,7 +113,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
}
if (bytecode == Bytecode::kResumeGenerator) {
// The generator object has to be live.
- in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ in_liveness->MarkRegisterLive(iterator.GetRegisterOperand(0).index());
return;
}
@@ -123,15 +123,15 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
for (int i = 0; i < num_operands; ++i) {
switch (operand_types[i]) {
case OperandType::kRegOut: {
- interpreter::Register r = accessor.GetRegisterOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i);
if (!r.is_parameter()) {
in_liveness->MarkRegisterDead(r.index());
}
break;
}
case OperandType::kRegOutList: {
- interpreter::Register r = accessor.GetRegisterOperand(i++);
- uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i++);
+ uint32_t reg_count = iterator.GetRegisterCountOperand(i);
if (!r.is_parameter()) {
for (uint32_t j = 0; j < reg_count; ++j) {
DCHECK(!interpreter::Register(r.index() + j).is_parameter());
@@ -141,7 +141,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
break;
}
case OperandType::kRegOutPair: {
- interpreter::Register r = accessor.GetRegisterOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i);
if (!r.is_parameter()) {
DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
in_liveness->MarkRegisterDead(r.index());
@@ -150,7 +150,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
break;
}
case OperandType::kRegOutTriple: {
- interpreter::Register r = accessor.GetRegisterOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i);
if (!r.is_parameter()) {
DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
@@ -177,14 +177,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
for (int i = 0; i < num_operands; ++i) {
switch (operand_types[i]) {
case OperandType::kReg: {
- interpreter::Register r = accessor.GetRegisterOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i);
if (!r.is_parameter()) {
in_liveness->MarkRegisterLive(r.index());
}
break;
}
case OperandType::kRegPair: {
- interpreter::Register r = accessor.GetRegisterOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i);
if (!r.is_parameter()) {
DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
in_liveness->MarkRegisterLive(r.index());
@@ -193,8 +193,8 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
break;
}
case OperandType::kRegList: {
- interpreter::Register r = accessor.GetRegisterOperand(i++);
- uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i++);
+ uint32_t reg_count = iterator.GetRegisterCountOperand(i);
if (!r.is_parameter()) {
for (uint32_t j = 0; j < reg_count; ++j) {
DCHECK(!interpreter::Register(r.index() + j).is_parameter());
@@ -212,10 +212,10 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
BytecodeLivenessState* next_bytecode_in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor,
+ const interpreter::BytecodeArrayIterator& iterator,
Handle<BytecodeArray> bytecode_array,
const BytecodeLivenessMap& liveness_map) {
- int current_offset = accessor.current_offset();
+ int current_offset = iterator.current_offset();
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator ||
@@ -227,10 +227,10 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
// Update from jump target (if any). Skip loops, we update these manually in
// the liveness iterations.
if (Bytecodes::IsForwardJump(bytecode)) {
- int target_offset = accessor.GetJumpTargetOffset();
+ int target_offset = iterator.GetJumpTargetOffset();
out_liveness->Union(*liveness_map.GetInLiveness(target_offset));
} else if (Bytecodes::IsSwitch(bytecode)) {
- for (const auto& entry : accessor.GetJumpTableTargetOffsets()) {
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
out_liveness->Union(*liveness_map.GetInLiveness(entry.target_offset));
}
}
@@ -272,40 +272,40 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
void UpdateLiveness(Bytecode bytecode, BytecodeLiveness const& liveness,
BytecodeLivenessState** next_bytecode_in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor,
+ const interpreter::BytecodeArrayIterator& iterator,
Handle<BytecodeArray> bytecode_array,
const BytecodeLivenessMap& liveness_map) {
UpdateOutLiveness(bytecode, liveness.out, *next_bytecode_in_liveness,
- accessor, bytecode_array, liveness_map);
+ iterator, bytecode_array, liveness_map);
liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, liveness.in, accessor);
+ UpdateInLiveness(bytecode, liveness.in, iterator);
*next_bytecode_in_liveness = liveness.in;
}
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
- const interpreter::BytecodeArrayAccessor& accessor) {
+ const interpreter::BytecodeArrayIterator& iterator) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
for (int i = 0; i < num_operands; ++i) {
switch (operand_types[i]) {
case OperandType::kRegOut: {
- assignments->Add(accessor.GetRegisterOperand(i));
+ assignments->Add(iterator.GetRegisterOperand(i));
break;
}
case OperandType::kRegOutList: {
- interpreter::Register r = accessor.GetRegisterOperand(i++);
- uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ interpreter::Register r = iterator.GetRegisterOperand(i++);
+ uint32_t reg_count = iterator.GetRegisterCountOperand(i);
assignments->AddList(r, reg_count);
break;
}
case OperandType::kRegOutPair: {
- assignments->AddList(accessor.GetRegisterOperand(i), 2);
+ assignments->AddList(iterator.GetRegisterOperand(i), 2);
break;
}
case OperandType::kRegOutTriple: {
- assignments->AddList(accessor.GetRegisterOperand(i), 3);
+ assignments->AddList(iterator.GetRegisterOperand(i), 3);
break;
}
default:
@@ -778,14 +778,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
valid = false;
} else {
// Make sure we're resuming to a Resume bytecode
- interpreter::BytecodeArrayAccessor accessor(bytecode_array(),
+ interpreter::BytecodeArrayIterator iterator(bytecode_array(),
target.target_offset());
- if (accessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ if (iterator.current_bytecode() != Bytecode::kResumeGenerator) {
PrintF(stderr,
"Expected resume target for id %d, offset %d, to be "
"ResumeGenerator, but found %s\n",
target.suspend_id(), target.target_offset(),
- Bytecodes::ToString(accessor.current_bytecode()));
+ Bytecodes::ToString(iterator.current_bytecode()));
valid = false;
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 0361a2ada04..4ec0c8f9d85 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -225,8 +225,24 @@ bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
: IsInt32AbsWithOverflowSupported();
}
+bool CodeAssembler::IsWord32PopcntSupported() const {
+ return raw_assembler()->machine()->Word32Popcnt().IsSupported();
+}
+
+bool CodeAssembler::IsWord64PopcntSupported() const {
+ return raw_assembler()->machine()->Word64Popcnt().IsSupported();
+}
+
+bool CodeAssembler::IsWord32CtzSupported() const {
+ return raw_assembler()->machine()->Word32Ctz().IsSupported();
+}
+
+bool CodeAssembler::IsWord64CtzSupported() const {
+ return raw_assembler()->machine()->Word64Ctz().IsSupported();
+}
+
#ifdef DEBUG
-void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
+void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
const char* location) {
Label ok(this);
GotoIf(WordNotEqual(WordAnd(BitcastMaybeObjectToWord(node),
@@ -553,10 +569,9 @@ TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
}
-#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
- TNode<ResType> CodeAssembler::name(SloppyTNode<Arg1Type> a, \
- SloppyTNode<Arg2Type> b) { \
- return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
+#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
+ TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
+ return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
}
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
@@ -651,7 +666,7 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
value, TruncateKind::kSetOverflowToMin));
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
- TNode<ResType> CodeAssembler::name(SloppyTNode<ArgType> a) { \
+ TNode<ResType> CodeAssembler::name(TNode<ArgType> a) { \
return UncheckedCast<ResType>(raw_assembler()->name(a)); \
}
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
@@ -669,14 +684,12 @@ Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(
- Load(MachineType::Pointer(), base, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset,
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(
- Load(MachineType::Pointer(), base, offset, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
}
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
@@ -720,6 +733,11 @@ TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
LoadFullTagged(isolate_root, IntPtrConstant(offset)));
}
+Node* CodeAssembler::UnalignedLoad(MachineType type, TNode<RawPtrT> base,
+ TNode<WordT> offset) {
+ return raw_assembler()->UnalignedLoad(type, static_cast<Node*>(base), offset);
+}
+
void CodeAssembler::Store(Node* base, Node* value) {
raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
kFullWriteBarrier);
@@ -1091,7 +1109,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<Object> target, TNode<Object> context,
TNode<Object> function,
- TNode<Object> new_target,
+ base::Optional<TNode<Object>> new_target,
TNode<Int32T> arity,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 10;
@@ -1099,8 +1117,8 @@ Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
NodeArray<kMaxNumArgs + 5> inputs;
inputs.Add(target);
inputs.Add(function);
- if (!new_target.is_null()) {
- inputs.Add(new_target);
+ if (new_target) {
+ inputs.Add(*new_target);
}
inputs.Add(arity);
for (auto arg : args) inputs.Add(arg);
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 263ed375361..9163295cd62 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -14,6 +14,7 @@
// Do not include anything from src/compiler here!
#include "include/cppgc/source-location.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
@@ -269,6 +270,7 @@ class CodeAssemblerParameterizedLabel;
V(Float64Min, Float64T, Float64T, Float64T) \
V(Float64InsertLowWord32, Float64T, Float64T, Word32T) \
V(Float64InsertHighWord32, Float64T, Float64T, Word32T) \
+ V(I8x16Eq, I8x16T, I8x16T, I8x16T) \
V(IntPtrAdd, WordT, WordT, WordT) \
V(IntPtrSub, WordT, WordT, WordT) \
V(IntPtrMul, WordT, WordT, WordT) \
@@ -283,6 +285,12 @@ class CodeAssemblerParameterizedLabel;
V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Div, Int32T, Int32T, Int32T) \
V(Int32Mod, Int32T, Int32T, Int32T) \
+ V(Int64Add, Word64T, Word64T, Word64T) \
+ V(Int64Sub, Word64T, Word64T, Word64T) \
+ V(Int64SubWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T, Int64T) \
+ V(Int64Mul, Word64T, Word64T, Word64T) \
+ V(Int64Div, Int64T, Int64T, Int64T) \
+ V(Int64Mod, Int64T, Int64T, Int64T) \
V(WordOr, WordT, WordT, WordT) \
V(WordAnd, WordT, WordT, WordT) \
V(WordXor, WordT, WordT, WordT) \
@@ -359,8 +367,16 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTiesEven, Float64T, Float64T) \
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
+ V(Word64Clz, Int64T, Word64T) \
+ V(Word32Ctz, Int32T, Word32T) \
+ V(Word64Ctz, Int64T, Word64T) \
+ V(Word32Popcnt, Int32T, Word32T) \
+ V(Word64Popcnt, Int64T, Word64T) \
V(Word32BitwiseNot, Word32T, Word32T) \
V(WordNot, WordT, WordT) \
+ V(Word64Not, Word64T, Word64T) \
+ V(I8x16BitMask, Int32T, I8x16T) \
+ V(I8x16Splat, I8x16T, Int32T) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
@@ -407,6 +423,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsInt32AbsWithOverflowSupported() const;
bool IsInt64AbsWithOverflowSupported() const;
bool IsIntPtrAbsWithOverflowSupported() const;
+ bool IsWord32PopcntSupported() const;
+ bool IsWord64PopcntSupported() const;
+ bool IsWord32CtzSupported() const;
+ bool IsWord64CtzSupported() const;
// Shortened aliases for use in CodeAssembler subclasses.
using Label = CodeAssemblerLabel;
@@ -447,7 +467,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#ifdef DEBUG
if (FLAG_debug_code) {
if (std::is_same<PreviousType, MaybeObject>::value) {
- code_assembler_->GenerateCheckMaybeObjectIsObject(node_, location_);
+ code_assembler_->GenerateCheckMaybeObjectIsObject(
+ TNode<MaybeObject>::UncheckedCast(node_), location_);
}
TNode<ExternalReference> function = code_assembler_->ExternalConstant(
ExternalReference::check_object_type());
@@ -464,11 +485,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<A>::UncheckedCast(node_);
}
- template <class A>
- operator SloppyTNode<A>() {
- return implicit_cast<TNode<A>>(*this);
- }
-
Node* node() const { return node_; }
private:
@@ -519,12 +535,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#endif
#ifdef DEBUG
- void GenerateCheckMaybeObjectIsObject(Node* node, const char* location);
+ void GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
+ const char* location);
#endif
// Constants.
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
+ TNode<Uint64T> Uint64Constant(uint64_t value) {
+ return Unsigned(Int64Constant(bit_cast<int64_t>(value)));
+ }
TNode<IntPtrT> IntPtrConstant(intptr_t value);
TNode<Uint32T> Uint32Constant(uint32_t value) {
return Unsigned(Int32Constant(bit_cast<int32_t>(value)));
@@ -576,10 +596,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsNullConstant(TNode<Object> node);
TNode<Int32T> Signed(TNode<Word32T> x) { return UncheckedCast<Int32T>(x); }
+ TNode<Int64T> Signed(TNode<Word64T> x) { return UncheckedCast<Int64T>(x); }
TNode<IntPtrT> Signed(TNode<WordT> x) { return UncheckedCast<IntPtrT>(x); }
TNode<Uint32T> Unsigned(TNode<Word32T> x) {
return UncheckedCast<Uint32T>(x);
}
+ TNode<Uint64T> Unsigned(TNode<Word64T> x) {
+ return UncheckedCast<Uint64T>(x);
+ }
TNode<UintPtrT> Unsigned(TNode<WordT> x) {
return UncheckedCast<UintPtrT>(x);
}
@@ -759,7 +783,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Object> LoadFullTagged(
Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
TNode<Object> LoadFullTagged(
- Node* base, Node* offset,
+ Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* LoadFromObject(MachineType type, TNode<Object> object,
@@ -768,6 +792,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Load a value from the root array.
TNode<Object> LoadRoot(RootIndex root_index);
+ template <typename Type>
+ TNode<Type> UnalignedLoad(TNode<RawPtrT> base, TNode<IntPtrT> offset) {
+ MachineType mt = MachineTypeOf<Type>::value;
+ return UncheckedCast<Type>(UnalignedLoad(mt, base, offset));
+ }
+
// Store value to raw memory location.
void Store(Node* base, Node* value);
void Store(Node* base, Node* offset, Node* value);
@@ -866,66 +896,95 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Basic arithmetic operations.
#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
- TNode<ResType> name(SloppyTNode<Arg1Type> a, SloppyTNode<Arg2Type> b);
+ TNode<ResType> name(TNode<Arg1Type> a, TNode<Arg2Type> b);
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
TNode<UintPtrT> WordShr(TNode<UintPtrT> left, TNode<IntegralT> right) {
- return Unsigned(
- WordShr(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(WordShr(static_cast<TNode<WordT>>(left), right));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return Signed(WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordSar(static_cast<TNode<WordT>>(left), right));
}
TNode<IntPtrT> WordShl(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return Signed(WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordShl(static_cast<TNode<WordT>>(left), right));
}
TNode<UintPtrT> WordShl(TNode<UintPtrT> left, TNode<IntegralT> right) {
- return Unsigned(
- WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(WordShl(static_cast<TNode<WordT>>(left), right));
}
TNode<Int32T> Word32Shl(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Word32Shl(static_cast<TNode<Word32T>>(left), right));
}
TNode<Uint32T> Word32Shl(TNode<Uint32T> left, TNode<Uint32T> right) {
- return Unsigned(
- Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(Word32Shl(static_cast<TNode<Word32T>>(left), right));
}
TNode<Uint32T> Word32Shr(TNode<Uint32T> left, TNode<Uint32T> right) {
- return Unsigned(
- Word32Shr(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(Word32Shr(static_cast<TNode<Word32T>>(left), right));
}
TNode<Int32T> Word32Sar(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Word32Sar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Word32Sar(static_cast<TNode<Word32T>>(left), right));
+ }
+
+ TNode<Int64T> Word64Shl(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Word64Shl(static_cast<TNode<Word64T>>(left), right));
+ }
+ TNode<Uint64T> Word64Shl(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Word64Shl(static_cast<TNode<Word64T>>(left), right));
+ }
+ TNode<Uint64T> Word64Shr(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Word64Shr(static_cast<TNode<Word64T>>(left), right));
+ }
+ TNode<Int64T> Word64Sar(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Word64Sar(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Int64T> Word64And(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Word64And(static_cast<TNode<Word64T>>(left), right));
+ }
+ TNode<Uint64T> Word64And(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Word64And(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Int64T> Word64Xor(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Word64Xor(static_cast<TNode<Word64T>>(left), right));
+ }
+ TNode<Uint64T> Word64Xor(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Word64Xor(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Int64T> Word64Not(TNode<Int64T> value) {
+ return Signed(Word64Not(static_cast<TNode<Word64T>>(value)));
+ }
+ TNode<Uint64T> Word64Not(TNode<Uint64T> value) {
+ return Unsigned(Word64Not(static_cast<TNode<Word64T>>(value)));
}
TNode<IntPtrT> WordAnd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return Signed(WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordAnd(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<UintPtrT> WordAnd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
- return Unsigned(
- WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(WordAnd(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<Int32T> Word32And(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Word32And(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Uint32T> Word32And(TNode<Uint32T> left, TNode<Uint32T> right) {
- return Unsigned(
- Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(Word32And(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Int32T> Word32Or(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Word32Or(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Uint32T> Word32Or(TNode<Uint32T> left, TNode<Uint32T> right) {
- return Unsigned(
- Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(Word32Or(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<BoolT> IntPtrEqual(TNode<WordT> left, TNode<WordT> right);
@@ -937,53 +996,82 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Word64NotEqual(TNode<Word64T> left, TNode<Word64T> right);
TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
- return UncheckedCast<BoolT>(
- Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return UncheckedCast<BoolT>(Word32Or(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<BoolT> Word32And(TNode<BoolT> left, TNode<BoolT> right) {
- return UncheckedCast<BoolT>(
- Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return UncheckedCast<BoolT>(Word32And(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Int32Add(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Uint32T> Uint32Add(TNode<Uint32T> left, TNode<Uint32T> right) {
- return Unsigned(
- Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(Int32Add(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
+ }
+
+ TNode<Uint32T> Uint32Sub(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(Int32Sub(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Int32T> Int32Sub(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Int32Sub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Int32Sub(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
}
TNode<Int32T> Int32Mul(TNode<Int32T> left, TNode<Int32T> right) {
- return Signed(
- Int32Mul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(Int32Mul(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
+ }
+
+ TNode<Int64T> Int64Add(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Int64Add(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Uint64T> Uint64Add(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Int64Add(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Int64T> Int64Sub(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Int64Sub(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Uint64T> Uint64Sub(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Int64Sub(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Int64T> Int64Mul(TNode<Int64T> left, TNode<Int64T> right) {
+ return Signed(Int64Mul(static_cast<TNode<Word64T>>(left), right));
+ }
+
+ TNode<Uint64T> Uint64Mul(TNode<Uint64T> left, TNode<Uint64T> right) {
+ return Unsigned(Int64Mul(static_cast<TNode<Word64T>>(left), right));
}
TNode<IntPtrT> IntPtrAdd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return Signed(
- IntPtrAdd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(IntPtrAdd(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<IntPtrT> IntPtrSub(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return Signed(
- IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(IntPtrSub(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<IntPtrT> IntPtrMul(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return Signed(
- IntPtrMul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(IntPtrMul(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<UintPtrT> UintPtrAdd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
- return Unsigned(
- IntPtrAdd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(IntPtrAdd(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<UintPtrT> UintPtrSub(TNode<UintPtrT> left, TNode<UintPtrT> right) {
- return Unsigned(
- IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Unsigned(IntPtrSub(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<RawPtrT> RawPtrAdd(TNode<RawPtrT> left, TNode<IntPtrT> right) {
return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
@@ -992,8 +1080,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return ReinterpretCast<RawPtrT>(IntPtrSub(left, right));
}
TNode<IntPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<RawPtrT> right) {
- return Signed(
- IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(IntPtrSub(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
}
TNode<WordT> WordShl(TNode<WordT> value, int shift);
@@ -1010,7 +1098,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
- TNode<ResType> name(SloppyTNode<ArgType> a);
+ TNode<ResType> name(TNode<ArgType> a);
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
@@ -1144,8 +1232,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Int32T> arity = Int32Constant(argc);
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
- CAST(function), TNode<Object>(), arity,
- {receiver, args...}));
+ CAST(function), {}, arity, {receiver, args...}));
}
template <class... TArgs>
@@ -1275,7 +1362,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<Object> target, TNode<Object> context,
- TNode<Object> function, TNode<Object> new_target,
+ TNode<Object> function,
+ base::Optional<TNode<Object>> new_target,
TNode<Int32T> arity, std::initializer_list<Node*> args);
Node* CallStubN(StubCallMode call_mode,
@@ -1284,6 +1372,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
+ Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
+ TNode<WordT> offset);
+
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 9a91ea15973..874bdb0d323 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -74,6 +74,9 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceSwitch(node);
case IrOpcode::kStaticAssert:
return ReduceStaticAssert(node);
+ case IrOpcode::kTrapIf:
+ case IrOpcode::kTrapUnless:
+ return ReduceTrapConditional(node);
default:
break;
}
@@ -472,6 +475,30 @@ Reduction CommonOperatorReducer::ReduceStaticAssert(Node* node) {
}
}
+Reduction CommonOperatorReducer::ReduceTrapConditional(Node* trap) {
+ DCHECK(trap->opcode() == IrOpcode::kTrapIf ||
+ trap->opcode() == IrOpcode::kTrapUnless);
+ bool trapping_condition = trap->opcode() == IrOpcode::kTrapIf;
+ Node* const cond = trap->InputAt(0);
+ Decision decision = DecideCondition(broker(), cond);
+
+ if (decision == Decision::kUnknown) {
+ return NoChange();
+ } else if ((decision == Decision::kTrue) == trapping_condition) {
+ // This will always trap. Mark its outputs as dead and connect it to
+ // graph()->end().
+ ReplaceWithValue(trap, dead(), dead(), dead());
+ Node* effect = NodeProperties::GetEffectInput(trap);
+ Node* control = graph()->NewNode(common()->Throw(), effect, trap);
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+ return Changed(trap);
+ } else {
+ // This will not trap, remove it.
+ return Replace(NodeProperties::GetControlInput(trap));
+ }
+}
+
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 4c7a06df16b..da07703b25f 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction ReduceSelect(Node* node);
Reduction ReduceSwitch(Node* node);
Reduction ReduceStaticAssert(Node* node);
+ Reduction ReduceTrapConditional(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 73aca646ce5..9d30f4a9e55 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1625,6 +1625,7 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
shared_info);
}
+#if V8_ENABLE_WEBASSEMBLY
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateJSToWasmFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
@@ -1635,6 +1636,7 @@ CommonOperatorBuilder::CreateJSToWasmFrameStateFunctionInfo(
return zone()->New<JSToWasmFrameStateFunctionInfo>(
type, parameter_count, local_count, shared_info, signature);
}
+#endif // V8_ENABLE_WEBASSEMBLY
const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
return zone()->New<Operator1<MachineRepresentation>>( // --
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index bf0e3a7bab6..77483b14e86 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -561,10 +561,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info);
+#if V8_ENABLE_WEBASSEMBLY
const FrameStateFunctionInfo* CreateJSToWasmFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info,
const wasm::FunctionSig* signature);
+#endif // V8_ENABLE_WEBASSEMBLY
const Operator* MarkAsSafetyCheck(const Operator* op,
IsSafetyCheck safety_check);
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 2628575e4de..3149fe490b9 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -105,6 +105,128 @@ class StableMapDependency final : public CompilationDependency {
MapRef map_;
};
+class ConstantInDictionaryPrototypeChainDependency final
+ : public CompilationDependency {
+ public:
+ explicit ConstantInDictionaryPrototypeChainDependency(
+ const MapRef receiver_map, const NameRef property_name,
+ const ObjectRef constant, PropertyKind kind)
+ : receiver_map_(receiver_map),
+ property_name_{property_name},
+ constant_{constant},
+ kind_{kind} {
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+ }
+
+ // Checks that |constant_| is still the value of accessing |property_name_|
+ // starting at |receiver_map_|.
+ bool IsValid() const override { return !GetHolderIfValid().is_null(); }
+
+ void Install(const MaybeObjectHandle& code) const override {
+ SLOW_DCHECK(IsValid());
+ Isolate* isolate = receiver_map_.isolate();
+ Handle<JSObject> holder = GetHolderIfValid().ToHandleChecked();
+ Handle<Map> map = receiver_map_.object();
+
+ while (map->prototype() != *holder) {
+ map = handle(map->prototype().map(), isolate);
+ DCHECK(map->IsJSObjectMap()); // Due to IsValid holding.
+ DependentCode::InstallDependency(isolate, code, map,
+ DependentCode::kPrototypeCheckGroup);
+ }
+
+ DCHECK(map->prototype().map().IsJSObjectMap()); // Due to IsValid holding.
+ DependentCode::InstallDependency(isolate, code,
+ handle(map->prototype().map(), isolate),
+ DependentCode::kPrototypeCheckGroup);
+ }
+
+ private:
+ // If the dependency is still valid, returns holder of the constant. Otherwise
+ // returns null.
+ // TODO(neis) Currently, invoking IsValid and then Install duplicates the call
+ // to GetHolderIfValid. Instead, consider letting IsValid change the state
+ // (and store the holder), or merge IsValid and Install.
+ MaybeHandle<JSObject> GetHolderIfValid() const {
+ DisallowGarbageCollection no_gc;
+ Isolate* isolate = receiver_map_.isolate();
+
+ Handle<Object> holder;
+ HeapObject prototype = receiver_map_.object()->prototype();
+
+ enum class ValidationResult { kFoundCorrect, kFoundIncorrect, kNotFound };
+ auto try_load = [&](auto dictionary) -> ValidationResult {
+ InternalIndex entry =
+ dictionary.FindEntry(isolate, property_name_.object());
+ if (entry.is_not_found()) {
+ return ValidationResult::kNotFound;
+ }
+
+ PropertyDetails details = dictionary.DetailsAt(entry);
+ if (details.constness() != PropertyConstness::kConst) {
+ return ValidationResult::kFoundIncorrect;
+ }
+
+ Object dictionary_value = dictionary.ValueAt(entry);
+ Object value;
+ // We must be able to detect the case that the property |property_name_|
+ // of |holder_| was originally a plain function |constant_| (when creating
+ // this dependency) and has since become an accessor whose getter is
+ // |constant_|. Therefore, we cannot just look at the property kind of
+ // |details|, because that reflects the current situation, not the one
+ // when creating this dependency.
+ if (details.kind() != kind_) {
+ return ValidationResult::kFoundIncorrect;
+ }
+ if (kind_ == PropertyKind::kAccessor) {
+ if (!dictionary_value.IsAccessorPair()) {
+ return ValidationResult::kFoundIncorrect;
+ }
+ // Only supporting loading at the moment, so we only ever want the
+ // getter.
+ value = AccessorPair::cast(dictionary_value)
+ .get(AccessorComponent::ACCESSOR_GETTER);
+ } else {
+ value = dictionary_value;
+ }
+ return value == *constant_.object() ? ValidationResult::kFoundCorrect
+ : ValidationResult::kFoundIncorrect;
+ };
+
+ while (prototype.IsJSObject()) {
+ // We only care about JSObjects because that's the only type of holder
+ // (and types of prototypes on the chain to the holder) that
+ // AccessInfoFactory::ComputePropertyAccessInfo allows.
+ JSObject object = JSObject::cast(prototype);
+
+ // We only support dictionary mode prototypes on the chain for this kind
+ // of dependency.
+ CHECK(!object.HasFastProperties());
+
+ ValidationResult result =
+ V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? try_load(object.property_dictionary_swiss())
+ : try_load(object.property_dictionary());
+
+ if (result == ValidationResult::kFoundCorrect) {
+ return handle(object, isolate);
+ } else if (result == ValidationResult::kFoundIncorrect) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // In case of kNotFound, continue walking up the chain.
+ prototype = object.map().prototype();
+ }
+
+ return MaybeHandle<JSObject>();
+ }
+
+ MapRef receiver_map_;
+ NameRef property_name_;
+ ObjectRef constant_;
+ PropertyKind kind_;
+};
+
class TransitionDependency final : public CompilationDependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
@@ -170,7 +292,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
Handle<Map> owner = owner_.object();
- return representation_.Equals(owner->instance_descriptors(kRelaxedLoad)
+ return representation_.Equals(owner->instance_descriptors(owner_.isolate())
.GetDetails(descriptor_)
.representation());
}
@@ -209,8 +331,8 @@ class FieldTypeDependency final : public CompilationDependency {
DisallowGarbageCollection no_heap_allocation;
Handle<Map> owner = owner_.object();
Handle<Object> type = type_.object();
- return *type ==
- owner->instance_descriptors(kRelaxedLoad).GetFieldType(descriptor_);
+ return *type == owner->instance_descriptors(owner_.isolate())
+ .GetFieldType(descriptor_);
}
void Install(const MaybeObjectHandle& code) const override {
@@ -238,7 +360,7 @@ class FieldConstnessDependency final : public CompilationDependency {
DisallowGarbageCollection no_heap_allocation;
Handle<Map> owner = owner_.object();
return PropertyConstness::kConst ==
- owner->instance_descriptors(kRelaxedLoad)
+ owner->instance_descriptors(owner_.isolate())
.GetDetails(descriptor_)
.constness();
}
@@ -394,6 +516,7 @@ ObjectRef CompilationDependencies::DependOnPrototypeProperty(
}
void CompilationDependencies::DependOnStableMap(const MapRef& map) {
+ DCHECK(!map.is_dictionary_map());
DCHECK(!map.IsNeverSerializedHeapObject());
if (map.CanTransition()) {
RecordDependency(zone_->New<StableMapDependency>(map));
@@ -402,6 +525,13 @@ void CompilationDependencies::DependOnStableMap(const MapRef& map) {
}
}
+void CompilationDependencies::DependOnConstantInDictionaryPrototypeChain(
+ const MapRef& receiver_map, const NameRef& property_name,
+ const ObjectRef& constant, PropertyKind kind) {
+ RecordDependency(zone_->New<ConstantInDictionaryPrototypeChainDependency>(
+ receiver_map, property_name, constant, kind));
+}
+
AllocationType CompilationDependencies::DependOnPretenureMode(
const AllocationSiteRef& site) {
DCHECK(!site.IsNeverSerializedHeapObject());
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index bcf619ea093..5cf2a3f94c4 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -45,6 +45,20 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Record the assumption that {map} stays stable.
void DependOnStableMap(const MapRef& map);
+ // Depend on the fact that accessing property |property_name| from
+ // |receiver_map| yields the constant value |constant|, which is held by
+ // |holder|. Therefore, must be invalidated if |property_name| is added to any
+ // of the objects between receiver and |holder| on the prototype chain, b) any
+ // of the objects on the prototype chain up to |holder| change prototypes, or
+ // c) the value of |property_name| in |holder| changes.
+ // If PropertyKind is kData, |constant| is the value of the property in
+ // question. In case of PropertyKind::kAccessor, |constant| is the accessor
+ // function (i.e., getter or setter) itself, not the overall AccessorPair.
+ void DependOnConstantInDictionaryPrototypeChain(const MapRef& receiver_map,
+ const NameRef& property_name,
+ const ObjectRef& constant,
+ PropertyKind kind);
+
// Return the pretenure mode of {site} and record the assumption that it does
// not change.
AllocationType DependOnPretenureMode(const AllocationSiteRef& site);
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index 17250bba5e7..dadbeb0f7b7 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -54,36 +54,35 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
- break;
+ return NoChange();
case IrOpcode::kStart:
return ReduceStart(node);
default:
return ReduceOtherNode(node);
}
- return NoChange();
+ UNREACHABLE();
}
namespace CsaLoadEliminationHelpers {
-bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
- if (r1 == r2) return true;
- return IsAnyTagged(r1) && IsAnyTagged(r2);
+bool Subsumes(MachineRepresentation from, MachineRepresentation to) {
+ if (from == to) return true;
+ if (IsAnyTagged(from)) return IsAnyTagged(to);
+ if (IsIntegral(from)) {
+ return IsIntegral(to) && ElementSizeInBytes(from) >= ElementSizeInBytes(to);
+ }
+ return false;
}
bool ObjectMayAlias(Node* a, Node* b) {
if (a != b) {
- if (b->opcode() == IrOpcode::kAllocate) {
- std::swap(a, b);
- }
- if (a->opcode() == IrOpcode::kAllocate) {
- switch (b->opcode()) {
- case IrOpcode::kAllocate:
- case IrOpcode::kHeapConstant:
- case IrOpcode::kParameter:
- return false;
- default:
- break;
- }
+ if (NodeProperties::IsFreshObject(b)) std::swap(a, b);
+ if (NodeProperties::IsFreshObject(a) &&
+ (NodeProperties::IsFreshObject(b) ||
+ b->opcode() == IrOpcode::kParameter ||
+ b->opcode() == IrOpcode::kLoadImmutable ||
+ IrOpcode::IsConstantOpcode(b->opcode()))) {
+ return false;
}
}
return true;
@@ -181,9 +180,11 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
if (!lookup_result.IsEmpty()) {
// Make sure we don't reuse values that were recorded with a different
// representation or resurrect dead {replacement} nodes.
- Node* replacement = lookup_result.value;
- if (Helpers::IsCompatible(representation, lookup_result.representation) &&
- !replacement->IsDead()) {
+ MachineRepresentation from = lookup_result.representation;
+ if (Helpers::Subsumes(from, representation) &&
+ !lookup_result.value->IsDead()) {
+ Node* replacement =
+ TruncateAndExtend(lookup_result.value, from, access.machine_type);
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
}
@@ -255,24 +256,20 @@ Reduction CsaLoadElimination::ReduceCall(Node* node) {
}
Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
- if (node->op()->EffectInputCount() == 1) {
- if (node->op()->EffectOutputCount() == 1) {
- Node* const effect = NodeProperties::GetEffectInput(node);
- AbstractState const* state = node_states_.Get(effect);
- // If we do not know anything about the predecessor, do not propagate
- // just yet because we will have to recompute anyway once we compute
- // the predecessor.
- if (state == nullptr) return NoChange();
- // Check if this {node} has some uncontrolled side effects.
- if (!node->op()->HasProperty(Operator::kNoWrite)) {
- state = empty_state();
- }
- return UpdateState(node, state);
- } else {
- return NoChange();
- }
+ if (node->op()->EffectInputCount() == 1 &&
+ node->op()->EffectOutputCount() == 1) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (state == nullptr) return NoChange();
+ // If this {node} has some uncontrolled side effects, set its state to
+ // {empty_state()}, otherwise to its input state.
+ return UpdateState(node, node->op()->HasProperty(Operator::kNoWrite)
+ ? state
+ : empty_state());
}
- DCHECK_EQ(0, node->op()->EffectInputCount());
DCHECK_EQ(0, node->op()->EffectOutputCount());
return NoChange();
}
@@ -323,10 +320,58 @@ CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
return state;
}
+Node* CsaLoadElimination::TruncateAndExtend(Node* node,
+ MachineRepresentation from,
+ MachineType to) {
+ DCHECK(Helpers::Subsumes(from, to.representation()));
+ DCHECK_GE(ElementSizeInBytes(from), ElementSizeInBytes(to.representation()));
+
+ if (to == MachineType::Int8() || to == MachineType::Int16()) {
+ // 1st case: We want to eliminate a signed 8/16-bit load using the value
+ // from a previous subsuming load or store. Since that value might be
+ // outside 8/16-bit range, we first truncate it accordingly. Then we
+ // sign-extend the result to 32-bit.
+ DCHECK_EQ(to.semantic(), MachineSemantic::kInt32);
+ if (from == MachineRepresentation::kWord64) {
+ node = graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
+ }
+ int shift = 32 - 8 * ElementSizeInBytes(to.representation());
+ return graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), node,
+ jsgraph()->Int32Constant(shift)),
+ jsgraph()->Int32Constant(shift));
+ } else if (to == MachineType::Uint8() || to == MachineType::Uint16()) {
+ // 2nd case: We want to eliminate an unsigned 8/16-bit load using the value
+ // from a previous subsuming load or store. Since that value might be
+ // outside 8/16-bit range, we first truncate it accordingly.
+ if (from == MachineRepresentation::kWord64) {
+ node = graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
+ }
+ int mask = (1 << 8 * ElementSizeInBytes(to.representation())) - 1;
+ return graph()->NewNode(machine()->Word32And(), node,
+ jsgraph()->Int32Constant(mask));
+ } else if (from == MachineRepresentation::kWord64 &&
+ to.representation() == MachineRepresentation::kWord32) {
+ // 3rd case: Truncate 64-bits into 32-bits.
+ return graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
+ } else {
+ // 4th case: No need for truncation.
+ DCHECK((from == to.representation() &&
+ (from == MachineRepresentation::kWord32 ||
+ from == MachineRepresentation::kWord64 || !IsIntegral(from))) ||
+ (IsAnyTagged(from) && IsAnyTagged(to.representation())));
+ return node;
+ }
+}
+
CommonOperatorBuilder* CsaLoadElimination::common() const {
return jsgraph()->common();
}
+MachineOperatorBuilder* CsaLoadElimination::machine() const {
+ return jsgraph()->machine();
+}
+
Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); }
Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
index f738475a945..bd314cad8e4 100644
--- a/deps/v8/src/compiler/csa-load-elimination.h
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -97,8 +97,11 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
AbstractState const* ComputeLoopState(Node* node,
AbstractState const* state) const;
+ Node* TruncateAndExtend(Node* node, MachineRepresentation from,
+ MachineType to);
CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
Isolate* isolate() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d64c3c80e5d..dedf68e93c2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -50,8 +50,7 @@ class EffectControlLinearizer {
broker_(broker),
graph_assembler_(js_graph, temp_zone, base::nullopt,
should_maintain_schedule() ? schedule : nullptr),
- frame_state_zapper_(nullptr),
- fast_api_call_stack_slot_(nullptr) {}
+ frame_state_zapper_(nullptr) {}
void Run();
@@ -322,8 +321,6 @@ class EffectControlLinearizer {
JSHeapBroker* broker_;
JSGraphAssembler graph_assembler_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
- Node* fast_api_call_stack_slot_; // For caching the stack slot allocated for
- // fast API calls.
};
namespace {
@@ -5011,28 +5008,24 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count),
value_input_count);
- if (fast_api_call_stack_slot_ == nullptr) {
+ Node* stack_slot = nullptr;
+ if (c_signature->HasOptions()) {
int kAlign = alignof(v8::FastApiCallbackOptions);
int kSize = sizeof(v8::FastApiCallbackOptions);
// If this check fails, you've probably added new fields to
// v8::FastApiCallbackOptions, which means you'll need to write code
- // that initializes and reads from them too (see the Store and Load to
- // fast_api_call_stack_slot_ below).
+ // that initializes and reads from them too.
CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
- fast_api_call_stack_slot_ = __ StackSlot(kSize, kAlign);
- }
+ stack_slot = __ StackSlot(kSize, kAlign);
- // Leave the slot uninit if the callback doesn't use it.
- if (c_signature->HasOptions()) {
- // Generate the stores to `fast_api_call_stack_slot_`.
__ Store(
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- fast_api_call_stack_slot_,
+ stack_slot,
static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
- jsgraph()->ZeroConstant());
+ __ ZeroConstant());
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
- fast_api_call_stack_slot_,
+ stack_slot,
static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
}
@@ -5047,7 +5040,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
builder.AddParam(machine_type);
}
if (c_signature->HasOptions()) {
- builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
+ builder.AddParam(MachineType::Pointer()); // stack_slot
}
CallDescriptor* call_descriptor =
@@ -5076,7 +5069,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
}
}
if (c_signature->HasOptions()) {
- inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
+ inputs[c_arg_count + 1] = stack_slot;
inputs[c_arg_count + 2] = __ effect();
inputs[c_arg_count + 3] = __ control();
} else {
@@ -5125,9 +5118,9 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
if (!c_signature->HasOptions()) return fast_call_result;
- // Generate the load from `fast_api_call_stack_slot_`.
+ DCHECK_NOT_NULL(stack_slot);
Node* load =
- __ Load(MachineType::Int32(), fast_api_call_stack_slot_,
+ __ Load(MachineType::Int32(), stack_slot,
static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)));
Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index b7c4588e363..8e630778adc 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -11,7 +11,10 @@
#include "src/compiler/node.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -61,9 +64,11 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kBuiltinContinuation:
os << "BUILTIN_CONTINUATION_FRAME";
break;
+#if V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJSToWasmBuiltinContinuation:
os << "JS_TO_WASM_BUILTIN_CONTINUATION_FRAME";
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case FrameStateType::kJavaScriptBuiltinContinuation:
os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
break;
@@ -119,11 +124,18 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
BytecodeOffset bailout_id = Builtins::GetContinuationBytecodeOffset(name);
+#if V8_ENABLE_WEBASSEMBLY
const FrameStateFunctionInfo* state_info =
signature ? common->CreateJSToWasmFrameStateFunctionInfo(
frame_type, parameter_count, 0, shared, signature)
: common->CreateFrameStateFunctionInfo(
frame_type, parameter_count, 0, shared);
+#else
+ DCHECK_NULL(signature);
+ const FrameStateFunctionInfo* state_info =
+ common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
+ shared);
+#endif // V8_ENABLE_WEBASSEMBLY
const Operator* op = common->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
@@ -167,29 +179,33 @@ FrameState CreateStubBuiltinContinuationFrameState(
}
FrameStateType frame_state_type = FrameStateType::kBuiltinContinuation;
+#if V8_ENABLE_WEBASSEMBLY
if (name == Builtins::kJSToWasmLazyDeoptContinuation) {
CHECK_NOT_NULL(signature);
frame_state_type = FrameStateType::kJSToWasmBuiltinContinuation;
}
+#endif // V8_ENABLE_WEBASSEMBLY
return CreateBuiltinContinuationFrameStateCommon(
jsgraph, frame_state_type, name, jsgraph->UndefinedConstant(), context,
actual_parameters.data(), static_cast<int>(actual_parameters.size()),
outer_frame_state, Handle<SharedFunctionInfo>(), signature);
}
+#if V8_ENABLE_WEBASSEMBLY
FrameState CreateJSWasmCallBuiltinContinuationFrameState(
JSGraph* jsgraph, Node* context, Node* outer_frame_state,
const wasm::FunctionSig* signature) {
- base::Optional<wasm::ValueKind> wasm_return_type =
+ base::Optional<wasm::ValueKind> wasm_return_kind =
wasm::WasmReturnTypeFromSignature(signature);
Node* node_return_type =
- jsgraph->SmiConstant(wasm_return_type ? wasm_return_type.value() : -1);
+ jsgraph->SmiConstant(wasm_return_kind ? wasm_return_kind.value() : -1);
Node* lazy_deopt_parameters[] = {node_return_type};
return CreateStubBuiltinContinuationFrameState(
jsgraph, Builtins::kJSToWasmLazyDeoptContinuation, context,
lazy_deopt_parameters, arraysize(lazy_deopt_parameters),
outer_frame_state, ContinuationFrameStateMode::LAZY, signature);
}
+#endif // V8_ENABLE_WEBASSEMBLY
FrameState CreateJavaScriptBuiltinContinuationFrameState(
JSGraph* jsgraph, const SharedFunctionInfoRef& shared, Builtins::Name name,
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 32586264e79..e6d5416cc9f 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -14,6 +14,11 @@
namespace v8 {
namespace internal {
+namespace wasm {
+class ValueType;
+using FunctionSig = Signature<ValueType>;
+} // namespace wasm
+
namespace compiler {
class JSGraph;
@@ -66,8 +71,10 @@ enum class FrameStateType {
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
kConstructStub, // Represents a ConstructStubFrame.
kBuiltinContinuation, // Represents a continuation to a stub.
+#if V8_ENABLE_WEBASSEMBLY // ↓ WebAssembly only
kJSToWasmBuiltinContinuation, // Represents a lazy deopt continuation for a
// JS to Wasm call.
+#endif // ↑ WebAssembly only
kJavaScriptBuiltinContinuation, // Represents a continuation to a JavaScipt
// builtin.
kJavaScriptBuiltinContinuationWithCatch // Represents a continuation to a
@@ -103,6 +110,7 @@ class FrameStateFunctionInfo {
Handle<SharedFunctionInfo> const shared_info_;
};
+#if V8_ENABLE_WEBASSEMBLY
class JSToWasmFrameStateFunctionInfo : public FrameStateFunctionInfo {
public:
JSToWasmFrameStateFunctionInfo(FrameStateType type, int parameter_count,
@@ -119,6 +127,7 @@ class JSToWasmFrameStateFunctionInfo : public FrameStateFunctionInfo {
private:
const wasm::FunctionSig* const signature_;
};
+#endif // V8_ENABLE_WEBASSEMBLY
class FrameStateInfo final {
public:
@@ -170,9 +179,11 @@ FrameState CreateStubBuiltinContinuationFrameState(
ContinuationFrameStateMode mode,
const wasm::FunctionSig* signature = nullptr);
+#if V8_ENABLE_WEBASSEMBLY
FrameState CreateJSWasmCallBuiltinContinuationFrameState(
JSGraph* jsgraph, Node* context, Node* outer_frame_state,
const wasm::FunctionSig* signature);
+#endif // V8_ENABLE_WEBASSEMBLY
FrameState CreateJavaScriptBuiltinContinuationFrameState(
JSGraph* graph, const SharedFunctionInfoRef& shared, Builtins::Name name,
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index 98938e83589..83583d7976f 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -12,27 +12,30 @@ namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots)
: fixed_slot_count_(fixed_frame_size_in_slots),
- frame_slot_count_(fixed_frame_size_in_slots),
- spill_slot_count_(0),
- return_slot_count_(0),
allocated_registers_(nullptr),
- allocated_double_registers_(nullptr) {}
+ allocated_double_registers_(nullptr) {
+ slot_allocator_.AllocateUnaligned(fixed_frame_size_in_slots);
+}
void Frame::AlignFrame(int alignment) {
- int alignment_slots = alignment / kSystemPointerSize;
- // In the calculations below we assume that alignment_slots is a power of 2.
- DCHECK(base::bits::IsPowerOfTwo(alignment_slots));
+#if DEBUG
+ spill_slots_finished_ = true;
+ frame_aligned_ = true;
+#endif
+ // In the calculations below we assume that alignment is a power of 2.
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
+ int alignment_in_slots = AlignedSlotAllocator::NumSlotsForWidth(alignment);
// We have to align return slots separately, because they are claimed
// separately on the stack.
- int return_delta =
- alignment_slots - (return_slot_count_ & (alignment_slots - 1));
- if (return_delta != alignment_slots) {
- frame_slot_count_ += return_delta;
+ const int mask = alignment_in_slots - 1;
+ int return_delta = alignment_in_slots - (return_slot_count_ & mask);
+ if (return_delta != alignment_in_slots) {
+ return_slot_count_ += return_delta;
}
- int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
- if (delta != alignment_slots) {
- frame_slot_count_ += delta;
+ int delta = alignment_in_slots - (slot_allocator_.Size() & mask);
+ if (delta != alignment_in_slots) {
+ slot_allocator_.Align(alignment_in_slots);
if (spill_slot_count_ != 0) {
spill_slot_count_ += delta;
}
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 7fc0c27b845..2320ee5dcb7 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_FRAME_H_
#define V8_COMPILER_FRAME_H_
+#include "src/base/bits.h"
+#include "src/codegen/aligned-slot-allocator.h"
#include "src/execution/frame-constants.h"
#include "src/utils/bit-vector.h"
@@ -92,7 +94,9 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject {
Frame(const Frame&) = delete;
Frame& operator=(const Frame&) = delete;
- inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
+ inline int GetTotalFrameSlotCount() const {
+ return slot_allocator_.Size() + return_slot_count_;
+ }
inline int GetFixedSlotCount() const { return fixed_slot_count_; }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
inline int GetReturnSlotCount() const { return return_slot_count_; }
@@ -112,69 +116,84 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject {
}
void AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
- int alignment_slots = alignment / kSystemPointerSize;
- int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
- if (delta != alignment_slots) {
- frame_slot_count_ += delta;
- }
- spill_slot_count_ += delta;
+ DCHECK(!frame_aligned_);
+#if DEBUG
+ spill_slots_finished_ = true;
+#endif
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
+ DCHECK_LE(alignment, kSimd128Size);
+ int alignment_in_slots = AlignedSlotAllocator::NumSlotsForWidth(alignment);
+ int padding = slot_allocator_.Align(alignment_in_slots);
+ spill_slot_count_ += padding;
}
void AllocateSavedCalleeRegisterSlots(int count) {
- frame_slot_count_ += count;
+ DCHECK(!frame_aligned_);
+#if DEBUG
+ spill_slots_finished_ = true;
+#endif
+ slot_allocator_.AllocateUnaligned(count);
}
int AllocateSpillSlot(int width, int alignment = 0) {
- DCHECK_EQ(frame_slot_count_,
+ DCHECK_EQ(GetTotalFrameSlotCount(),
fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
- int frame_slot_count_before = frame_slot_count_;
- if (alignment > kSystemPointerSize) {
- // Slots are pointer sized, so alignment greater than a pointer size
- // requires allocating additional slots.
- width += alignment - kSystemPointerSize;
+ // Never allocate spill slots after the callee-saved slots are defined.
+ DCHECK(!spill_slots_finished_);
+ DCHECK(!frame_aligned_);
+ int actual_width = std::max({width, AlignedSlotAllocator::kSlotSize});
+ int actual_alignment =
+ std::max({alignment, AlignedSlotAllocator::kSlotSize});
+ int slots = AlignedSlotAllocator::NumSlotsForWidth(actual_width);
+ int old_end = slot_allocator_.Size();
+ int slot;
+ if (actual_width == actual_alignment) {
+ // Simple allocation, alignment equal to width.
+ slot = slot_allocator_.Allocate(slots);
+ } else {
+ // Complex allocation, alignment different from width.
+ if (actual_alignment > AlignedSlotAllocator::kSlotSize) {
+ // Alignment required.
+ int alignment_in_slots =
+ AlignedSlotAllocator::NumSlotsForWidth(actual_alignment);
+ slot_allocator_.Align(alignment_in_slots);
+ }
+ slot = slot_allocator_.AllocateUnaligned(slots);
}
- AllocateAlignedFrameSlots(width);
- spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
- return frame_slot_count_ - return_slot_count_ - 1;
+ int end = slot_allocator_.Size();
+
+ spill_slot_count_ += end - old_end;
+ return slot + slots - 1;
}
void EnsureReturnSlots(int count) {
- if (count > return_slot_count_) {
- count -= return_slot_count_;
- frame_slot_count_ += count;
- return_slot_count_ += count;
- }
+ DCHECK(!frame_aligned_);
+ return_slot_count_ = std::max(return_slot_count_, count);
}
void AlignFrame(int alignment = kDoubleSize);
int ReserveSpillSlots(size_t slot_count) {
DCHECK_EQ(0, spill_slot_count_);
+ DCHECK(!frame_aligned_);
spill_slot_count_ += static_cast<int>(slot_count);
- frame_slot_count_ += static_cast<int>(slot_count);
- return frame_slot_count_ - 1;
- }
-
- private:
- void AllocateAlignedFrameSlots(int width) {
- DCHECK_LT(0, width);
- int new_frame_slots = (width + kSystemPointerSize - 1) / kSystemPointerSize;
- // Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
- // multiple of 16.
- int align_to =
- (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kSystemPointerSize;
- frame_slot_count_ = RoundUp(frame_slot_count_ + new_frame_slots,
- align_to / kSystemPointerSize);
- DCHECK_LT(0, frame_slot_count_);
+ slot_allocator_.AllocateUnaligned(static_cast<int>(slot_count));
+ return slot_allocator_.Size() - 1;
}
private:
int fixed_slot_count_;
- int frame_slot_count_;
- int spill_slot_count_;
- int return_slot_count_;
+ int spill_slot_count_ = 0;
+ // Account for return slots separately. Conceptually, they follow all
+ // allocated spill slots.
+ int return_slot_count_ = 0;
+ AlignedSlotAllocator slot_allocator_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
+#if DEBUG
+ bool spill_slots_finished_ = false;
+ bool frame_aligned_ = false;
+#endif
};
// Represents an offset from either the stack pointer or frame pointer.
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 1208d0f4f6d..eb3e4168f9a 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -1164,11 +1164,16 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
os << "\"type\": \"immediate\", ";
const ImmediateOperand* imm = ImmediateOperand::cast(op);
switch (imm->type()) {
- case ImmediateOperand::INLINE: {
- os << "\"text\": \"#" << imm->inline_value() << "\"";
+ case ImmediateOperand::INLINE_INT32: {
+ os << "\"text\": \"#" << imm->inline_int32_value() << "\"";
break;
}
- case ImmediateOperand::INDEXED: {
+ case ImmediateOperand::INLINE_INT64: {
+ os << "\"text\": \"#" << imm->inline_int64_value() << "\"";
+ break;
+ }
+ case ImmediateOperand::INDEXED_RPO:
+ case ImmediateOperand::INDEXED_IMM: {
int index = imm->indexed_value();
os << "\"text\": \"imm:" << index << "\",";
os << "\"tooltip\": \"";
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index e41bb6d7481..9d125537661 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -76,6 +76,7 @@ enum class OddballType : uint8_t {
/* Subtypes of FixedArrayBase */ \
V(BytecodeArray) \
/* Subtypes of Name */ \
+ V(String) \
V(Symbol) \
/* Subtypes of HeapObject */ \
V(AccessorInfo) \
@@ -83,8 +84,11 @@ enum class OddballType : uint8_t {
V(CallHandlerInfo) \
V(Cell) \
V(Code) \
+ V(DescriptorArray) \
V(FeedbackCell) \
V(FeedbackVector) \
+ V(FunctionTemplateInfo) \
+ V(Name) \
V(RegExpBoilerplateDescription) \
V(SharedFunctionInfo) \
V(TemplateObjectDescription)
@@ -128,17 +132,12 @@ enum class OddballType : uint8_t {
/* Subtypes of FixedArrayBase */ \
V(FixedArray) \
V(FixedDoubleArray) \
- /* Subtypes of Name */ \
- V(String) \
/* Subtypes of JSReceiver */ \
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
- V(DescriptorArray) \
V(FixedArrayBase) \
- V(FunctionTemplateInfo) \
V(JSReceiver) \
- V(Name) \
V(SourceTextModule) \
/* Subtypes of Object */ \
V(HeapObject)
@@ -193,6 +192,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
+ bool IsNull() const;
bool IsNullOrUndefined() const;
bool IsTheHole() const;
@@ -343,10 +343,17 @@ class JSObjectRef : public JSReceiverRef {
// Return the value of the property identified by the field {index}
// if {index} is known to be an own data property of the object.
- base::Optional<ObjectRef> GetOwnDataProperty(
+ base::Optional<ObjectRef> GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy =
SerializationPolicy::kAssumeSerialized) const;
+
+ // Return the value of the dictionary property at {index} in the dictionary
+ // if {index} is known to be an own data property of the object.
+ ObjectRef GetOwnDictionaryProperty(
+ InternalIndex index, SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
base::Optional<FixedArrayBaseRef> elements() const;
void SerializeElements();
void EnsureElementsTenured();
@@ -564,6 +571,7 @@ class DescriptorArrayRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_index) const;
};
@@ -603,8 +611,6 @@ class CallHandlerInfoRef : public HeapObjectRef {
Handle<CallHandlerInfo> object() const;
Address callback() const;
-
- void Serialize();
ObjectRef data() const;
};
@@ -807,7 +813,7 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
// NOTE: Concurrent reads of the actual bytecodes as well as the constant pool
// (both immutable) do not go through BytecodeArrayRef but are performed
- // directly through the handle by BytecodeArrayAccessor.
+ // directly through the handle by BytecodeArrayIterator.
int register_count() const;
int parameter_count() const;
@@ -861,22 +867,22 @@ class ScopeInfoRef : public HeapObjectRef {
void SerializeScopeInfoChain();
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- V(const wasm::WasmModule*, wasm_module) \
- V(const wasm::FunctionSig*, wasm_function_signature)
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+ IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -910,13 +916,18 @@ class StringRef : public NameRef {
Handle<String> object() const;
+ // With concurrent inlining on, we return base::nullopt due to not being able
+ // to use LookupIterator in a thread-safe way.
base::Optional<ObjectRef> GetCharAsStringOrUndefined(
uint32_t index, SerializationPolicy policy =
SerializationPolicy::kAssumeSerialized) const;
+ // When concurrently accessing non-read-only non-internalized strings, we
+ // return base::nullopt for these methods.
base::Optional<int> length() const;
base::Optional<uint16_t> GetFirstChar();
base::Optional<double> ToNumber();
+
bool IsSeqString() const;
bool IsExternalString() const;
};
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 2ef7d8af323..706cd7de509 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -23,19 +23,19 @@ namespace compiler {
Int64Lowering::Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, Zone* zone,
- Signature<MachineRepresentation>* signature,
+ CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified,
+ Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case)
: zone_(zone),
graph_(graph),
machine_(machine),
common_(common),
+ simplified_(simplified),
state_(graph, 3),
stack_(zone),
replacements_(nullptr),
signature_(signature),
- placeholder_(
- graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())),
+ placeholder_(graph->NewNode(common->Dead())),
special_case_(std::move(special_case)) {
DCHECK_NOT_NULL(graph);
DCHECK_NOT_NULL(graph->end());
@@ -161,6 +161,75 @@ void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
#endif
}
+void Int64Lowering::LowerLoadOperator(Node* node, MachineRepresentation rep,
+ const Operator* load_op) {
+ if (rep == MachineRepresentation::kWord64) {
+ LowerMemoryBaseAndIndex(node);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, &index_low, &index_high);
+ Node* high_node;
+ if (node->InputCount() > 2) {
+ Node* effect_high = node->InputAt(2);
+ Node* control_high = node->InputAt(3);
+ high_node = graph()->NewNode(load_op, base, index_high, effect_high,
+ control_high);
+ // change the effect change from old_node --> old_effect to
+ // old_node --> high_node --> old_effect.
+ node->ReplaceInput(2, high_node);
+ } else {
+ high_node = graph()->NewNode(load_op, base, index_high);
+ }
+ node->ReplaceInput(1, index_low);
+ NodeProperties::ChangeOp(node, load_op);
+ ReplaceNode(node, node, high_node);
+ } else {
+ DefaultLowering(node);
+ }
+}
+
+void Int64Lowering::LowerStoreOperator(Node* node, MachineRepresentation rep,
+ const Operator* store_op) {
+ if (rep == MachineRepresentation::kWord64) {
+ // We change the original store node to store the low word, and create
+ // a new store node to store the high word. The effect and control edges
+ // are copied from the original store to the new store node, the effect
+ // edge of the original store is redirected to the new store.
+ LowerMemoryBaseAndIndex(node);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, &index_low, &index_high);
+ Node* value = node->InputAt(2);
+ DCHECK(HasReplacementLow(value));
+ DCHECK(HasReplacementHigh(value));
+
+ Node* high_node;
+ if (node->InputCount() > 3) {
+ Node* effect_high = node->InputAt(3);
+ Node* control_high = node->InputAt(4);
+ high_node = graph()->NewNode(store_op, base, index_high,
+ GetReplacementHigh(value), effect_high,
+ control_high);
+ node->ReplaceInput(3, high_node);
+
+ } else {
+ high_node = graph()->NewNode(store_op, base, index_high,
+ GetReplacementHigh(value));
+ }
+
+ node->ReplaceInput(1, index_low);
+ node->ReplaceInput(2, GetReplacementLow(value));
+ NodeProperties::ChangeOp(node, store_op);
+ ReplaceNode(node, node, high_node);
+ } else {
+ DefaultLowering(node, true);
+ }
+}
+
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
@@ -172,109 +241,54 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
- case IrOpcode::kLoad:
+ case IrOpcode::kLoad: {
+ MachineRepresentation rep =
+ LoadRepresentationOf(node->op()).representation();
+ LowerLoadOperator(node, rep, machine()->Load(MachineType::Int32()));
+ break;
+ }
case IrOpcode::kUnalignedLoad: {
- MachineRepresentation rep;
- if (node->opcode() == IrOpcode::kLoad) {
- rep = LoadRepresentationOf(node->op()).representation();
- } else {
- DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
- rep = LoadRepresentationOf(node->op()).representation();
- }
-
- if (rep == MachineRepresentation::kWord64) {
- LowerMemoryBaseAndIndex(node);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* index_low;
- Node* index_high;
- GetIndexNodes(index, &index_low, &index_high);
- const Operator* load_op;
-
- if (node->opcode() == IrOpcode::kLoad) {
- load_op = machine()->Load(MachineType::Int32());
- } else {
- DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
- load_op = machine()->UnalignedLoad(MachineType::Int32());
- }
-
- Node* high_node;
- if (node->InputCount() > 2) {
- Node* effect_high = node->InputAt(2);
- Node* control_high = node->InputAt(3);
- high_node = graph()->NewNode(load_op, base, index_high, effect_high,
- control_high);
- // change the effect change from old_node --> old_effect to
- // old_node --> high_node --> old_effect.
- node->ReplaceInput(2, high_node);
- } else {
- high_node = graph()->NewNode(load_op, base, index_high);
- }
- node->ReplaceInput(1, index_low);
- NodeProperties::ChangeOp(node, load_op);
- ReplaceNode(node, node, high_node);
- } else {
- DefaultLowering(node);
- }
+ MachineRepresentation rep =
+ LoadRepresentationOf(node->op()).representation();
+ LowerLoadOperator(node, rep,
+ machine()->UnalignedLoad(MachineType::Int32()));
+ break;
+ }
+ case IrOpcode::kLoadImmutable: {
+ MachineRepresentation rep =
+ LoadRepresentationOf(node->op()).representation();
+ LowerLoadOperator(node, rep,
+ machine()->LoadImmutable(MachineType::Int32()));
+ break;
+ }
+ case IrOpcode::kLoadFromObject: {
+ ObjectAccess access = ObjectAccessOf(node->op());
+ LowerLoadOperator(node, access.machine_type.representation(),
+ simplified()->LoadFromObject(ObjectAccess(
+ MachineType::Int32(), access.write_barrier_kind)));
+ break;
+ }
+ case IrOpcode::kStore: {
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ LowerStoreOperator(
+ node, store_rep.representation(),
+ machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, store_rep.write_barrier_kind())));
break;
}
- case IrOpcode::kStore:
case IrOpcode::kUnalignedStore: {
- MachineRepresentation rep;
- if (node->opcode() == IrOpcode::kStore) {
- rep = StoreRepresentationOf(node->op()).representation();
- } else {
- DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
- rep = UnalignedStoreRepresentationOf(node->op());
- }
-
- if (rep == MachineRepresentation::kWord64) {
- // We change the original store node to store the low word, and create
- // a new store node to store the high word. The effect and control edges
- // are copied from the original store to the new store node, the effect
- // edge of the original store is redirected to the new store.
- LowerMemoryBaseAndIndex(node);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* index_low;
- Node* index_high;
- GetIndexNodes(index, &index_low, &index_high);
- Node* value = node->InputAt(2);
- DCHECK(HasReplacementLow(value));
- DCHECK(HasReplacementHigh(value));
-
- const Operator* store_op;
- if (node->opcode() == IrOpcode::kStore) {
- WriteBarrierKind write_barrier_kind =
- StoreRepresentationOf(node->op()).write_barrier_kind();
- store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, write_barrier_kind));
- } else {
- DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
- store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
- }
-
- Node* high_node;
- if (node->InputCount() > 3) {
- Node* effect_high = node->InputAt(3);
- Node* control_high = node->InputAt(4);
- high_node = graph()->NewNode(store_op, base, index_high,
- GetReplacementHigh(value), effect_high,
- control_high);
- node->ReplaceInput(3, high_node);
-
- } else {
- high_node = graph()->NewNode(store_op, base, index_high,
- GetReplacementHigh(value));
- }
-
- node->ReplaceInput(1, index_low);
- node->ReplaceInput(2, GetReplacementLow(value));
- NodeProperties::ChangeOp(node, store_op);
- ReplaceNode(node, node, high_node);
- } else {
- DefaultLowering(node, true);
- }
+ UnalignedStoreRepresentation store_rep =
+ UnalignedStoreRepresentationOf(node->op());
+ LowerStoreOperator(
+ node, store_rep,
+ machine()->UnalignedStore(MachineRepresentation::kWord32));
+ break;
+ }
+ case IrOpcode::kStoreToObject: {
+ ObjectAccess access = ObjectAccessOf(node->op());
+ LowerStoreOperator(node, access.machine_type.representation(),
+ simplified()->StoreToObject(ObjectAccess(
+ MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kStart: {
@@ -861,6 +875,21 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kLoopExitValue: {
+ MachineRepresentation rep = LoopExitValueRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kWord64) {
+ Node* low_node = graph()->NewNode(
+ common()->LoopExitValue(MachineRepresentation::kWord32),
+ GetReplacementLow(node->InputAt(0)), node->InputAt(1));
+ Node* high_node = graph()->NewNode(
+ common()->LoopExitValue(MachineRepresentation::kWord32),
+ GetReplacementHigh(node->InputAt(0)), node->InputAt(1));
+ ReplaceNode(node, low_node, high_node);
+ } else {
+ DefaultLowering(node);
+ }
+ break;
+ }
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
ReplaceNode(node,
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 944d2bc32a4..6a97760f5f1 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -12,6 +12,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
+#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -33,8 +34,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
public:
Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, Zone* zone,
- Signature<MachineRepresentation>* signature,
+ CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified_,
+ Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
void LowerGraph();
@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* graph() const { return graph_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
void PushNode(Node* node);
@@ -63,6 +65,10 @@ class V8_EXPORT_PRIVATE Int64Lowering {
const Operator* unsigned_op);
void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
+ void LowerLoadOperator(Node* node, MachineRepresentation rep,
+ const Operator* load_op);
+ void LowerStoreOperator(Node* node, MachineRepresentation rep,
+ const Operator* store_op);
const CallDescriptor* LowerCallDescriptor(
const CallDescriptor* call_descriptor);
@@ -86,6 +92,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* const graph_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
+ SimplifiedOperatorBuilder* simplified_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index b1405938ffe..bb7a11d16dc 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -1479,12 +1479,12 @@ struct MapFrameStateParams {
TNode<Object> receiver;
TNode<Object> callback;
TNode<Object> this_arg;
- TNode<JSArray> a;
+ base::Optional<TNode<JSArray>> a;
TNode<Object> original_length;
};
FrameState MapPreLoopLazyFrameState(const MapFrameStateParams& params) {
- DCHECK(params.a.is_null());
+ DCHECK(!params.a);
Node* checkpoint_params[] = {params.receiver, params.callback,
params.this_arg, params.original_length};
return CreateJavaScriptBuiltinContinuationFrameState(
@@ -1497,7 +1497,7 @@ FrameState MapPreLoopLazyFrameState(const MapFrameStateParams& params) {
FrameState MapLoopLazyFrameState(const MapFrameStateParams& params,
TNode<Number> k) {
Node* checkpoint_params[] = {
- params.receiver, params.callback, params.this_arg, params.a, k,
+ params.receiver, params.callback, params.this_arg, *params.a, k,
params.original_length};
return CreateJavaScriptBuiltinContinuationFrameState(
params.jsgraph, params.shared,
@@ -1509,7 +1509,7 @@ FrameState MapLoopLazyFrameState(const MapFrameStateParams& params,
FrameState MapLoopEagerFrameState(const MapFrameStateParams& params,
TNode<Number> k) {
Node* checkpoint_params[] = {
- params.receiver, params.callback, params.this_arg, params.a, k,
+ params.receiver, params.callback, params.this_arg, *params.a, k,
params.original_length};
return CreateJavaScriptBuiltinContinuationFrameState(
params.jsgraph, params.shared,
@@ -3429,8 +3429,9 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
return ReplaceWithSubgraph(&a, subgraph);
}
-namespace {
+#if V8_ENABLE_WEBASSEMBLY
+namespace {
bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
DCHECK(FLAG_turbo_inline_js_wasm_calls);
if (wasm_signature->return_count() > 1) {
@@ -3449,7 +3450,6 @@ bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
return true;
}
-
} // namespace
Reduction JSCallReducer::ReduceCallWasmFunction(
@@ -3462,11 +3462,6 @@ Reduction JSCallReducer::ReduceCallWasmFunction(
return NoChange();
}
- // TODO(paolosev@microsoft.com): Enable inlining for calls in try/catch.
- if (NodeProperties::IsExceptionalCall(node)) {
- return NoChange();
- }
-
const wasm::FunctionSig* wasm_signature = shared.wasm_function_signature();
if (!CanInlineJSToWasmCall(wasm_signature)) {
return NoChange();
@@ -3505,6 +3500,7 @@ Reduction JSCallReducer::ReduceCallWasmFunction(
NodeProperties::ChangeOp(node, op);
return Changed(node);
}
+#endif // V8_ENABLE_WEBASSEMBLY
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
namespace {
@@ -4630,9 +4626,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceCallApiFunction(node, shared);
}
+#if V8_ENABLE_WEBASSEMBLY
if ((flags() & kInlineJSToWasmCalls) && shared.wasm_function_signature()) {
return ReduceCallWasmFunction(node, shared);
}
+#endif // V8_ENABLE_WEBASSEMBLY
return NoChange();
}
@@ -7711,7 +7709,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
if (ai_exec.IsInvalid()) return inference.NoChange();
// If "exec" has been modified on {regexp}, we can't do anything.
- if (ai_exec.IsDataConstant()) {
+ if (ai_exec.IsFastDataConstant()) {
Handle<JSObject> holder;
// Do not reduce if the exec method is not on the prototype chain.
if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
@@ -7719,7 +7717,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
JSObjectRef holder_ref(broker(), holder);
// Bail out if the exec method is not the original one.
- base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
ai_exec.field_representation(), ai_exec.field_index());
if (!constant.has_value() ||
!constant->equals(native_context().regexp_exec_function())) {
@@ -7731,6 +7729,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
ai_exec.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
} else {
+ // TODO(v8:11457) Support dictionary mode protoypes here.
return inference.NoChange();
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 33f2f742b0d..71a0d43a415 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -700,6 +700,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
// Use the CreateShallowArrayLiteral builtin only for shallow boilerplates
// without properties up to the number of elements that the stubs can handle.
@@ -707,7 +708,6 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
ReplaceWithBuiltinCall(node, Builtins::kCreateShallowArrayLiteral);
} else {
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
}
}
@@ -1251,8 +1251,10 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
+#if V8_ENABLE_WEBASSEMBLY
// Will be lowered in SimplifiedLowering.
void JSGenericLowering::LowerJSWasmCall(Node* node) {}
+#endif // V8_ENABLE_WEBASSEMBLY
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
JSForInPrepareNode n(node);
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 0d428995a16..ac90e4c6673 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -164,6 +164,7 @@ class ObjectData : public ZoneObject {
kind_ == kNeverSerializedHeapObject ||
kind_ == kUnserializedReadOnlyHeapObject;
}
+ bool IsNull() const { return object_->IsNull(); }
#ifdef DEBUG
enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
@@ -271,6 +272,7 @@ FunctionTemplateInfoData::FunctionTemplateInfoData(
c_function_(v8::ToCData<Address>(object->GetCFunction())),
c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
known_receivers_(broker->zone()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
is_signature_undefined_ =
function_template_info->signature().IsUndefined(broker->isolate());
@@ -420,10 +422,13 @@ class JSObjectData : public JSReceiverData {
ObjectData* GetOwnConstantElement(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDataProperty(
+ ObjectData* GetOwnFastDataProperty(
JSHeapBroker* broker, Representation representation,
FieldIndex field_index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
+ InternalIndex dict_index,
+ SerializationPolicy policy);
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
@@ -453,8 +458,9 @@ class JSObjectData : public JSReceiverData {
// (2) are known not to (possibly they don't exist at all).
// In case (2), the second pair component is nullptr.
// For simplicity, this may in theory overlap with inobject_fields_.
- // The keys of the map are the property_index() values of the
- // respective property FieldIndex'es.
+ // For fast mode objects, the keys of the map are the property_index() values
+ // of the respective property FieldIndex'es. For slow mode objects, the keys
+ // are the dictionary indicies.
ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
@@ -494,15 +500,23 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
return base::nullopt;
}
-ObjectRef GetOwnDataPropertyFromHeap(JSHeapBroker* broker,
- Handle<JSObject> receiver,
- Representation representation,
- FieldIndex field_index) {
+ObjectRef GetOwnFastDataPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ Representation representation,
+ FieldIndex field_index) {
Handle<Object> constant =
JSObject::FastPropertyAt(receiver, representation, field_index);
return ObjectRef(broker, constant);
}
+ObjectRef GetOwnDictionaryPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ InternalIndex dict_index) {
+ Handle<Object> constant =
+ JSObject::DictionaryPropertyAt(receiver, dict_index);
+ return ObjectRef(broker, constant);
+}
+
} // namespace
ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
@@ -524,27 +538,46 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
return result;
}
-ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
+ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index,
+ SerializationPolicy policy) {
auto p = own_properties_.find(field_index.property_index());
if (p != own_properties_.end()) return p->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about property with index "
+ TRACE_MISSING(broker, "knowledge about fast property with index "
<< field_index.property_index() << " on "
<< this);
return nullptr;
}
- ObjectRef property = GetOwnDataPropertyFromHeap(
+ ObjectRef property = GetOwnFastDataPropertyFromHeap(
broker, Handle<JSObject>::cast(object()), representation, field_index);
ObjectData* result(property.data());
own_properties_.insert(std::make_pair(field_index.property_index(), result));
return result;
}
+ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
+ InternalIndex dict_index,
+ SerializationPolicy policy) {
+ auto p = own_properties_.find(dict_index.as_int());
+ if (p != own_properties_.end()) return p->second;
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about dictionary property with index "
+ << dict_index.as_int() << " on " << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnDictionaryPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), dict_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(dict_index.as_int(), result));
+ return result;
+}
+
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -637,9 +670,18 @@ class JSBoundFunctionData : public JSObjectData {
bool Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
- ObjectData* bound_target_function() const { return bound_target_function_; }
- ObjectData* bound_this() const { return bound_this_; }
- ObjectData* bound_arguments() const { return bound_arguments_; }
+ ObjectData* bound_target_function() const {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ return bound_target_function_;
+ }
+ ObjectData* bound_this() const {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ return bound_this_;
+ }
+ ObjectData* bound_arguments() const {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ return bound_arguments_;
+ }
private:
bool serialized_ = false;
@@ -849,7 +891,9 @@ class NativeContextData : public ContextData {
class NameData : public HeapObjectData {
public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
};
class StringData : public NameData {
@@ -895,7 +939,9 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
to_number_(TryStringToDouble(broker->local_isolate(), object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {}
+ chars_as_strings_(broker->zone()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
class InternalizedStringData : public StringData {
public:
@@ -975,7 +1021,7 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
- boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
+ boilerplate->map().instance_descriptors(isolate, kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -1122,10 +1168,6 @@ class MapData : public HeapObjectData {
InternalIndex descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
- // TODO(neis, solanes): This code needs to be changed to allow for
- // kNeverSerialized instance descriptors. However, this is likely to require a
- // non-trivial refactoring of how maps are serialized because actual instance
- // descriptors don't contain information about owner maps.
ObjectData* instance_descriptors() const { return instance_descriptors_; }
void SerializeRootMap(JSHeapBroker* broker);
@@ -1264,7 +1306,8 @@ namespace {
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray descriptors = jsarray_map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors =
+ jsarray_map->instance_descriptors(isolate, kRelaxedLoad);
// TODO(jkummerow): We could skip the search and hardcode number == 0.
InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
DCHECK(number.is_found());
@@ -1290,7 +1333,12 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
: HeapObjectData(broker, storage, object, kind),
instance_type_(object->instance_type()),
instance_size_(object->instance_size()),
- bit_field_(object->bit_field()),
+ // We read the bit_field as relaxed since `has_non_instance_prototype` can
+ // be modified in live objects, and because we serialize some maps on the
+ // background. Those background-serialized maps are the native context's
+ // maps for which this bit is "set" but it doesn't change value (i.e. it
+ // is set to false when it was already false).
+ bit_field_(object->relaxed_bit_field()),
bit_field2_(object->bit_field2()),
bit_field3_(object->bit_field3()),
can_be_deprecated_(object->NumberOfOwnDescriptors() > 0
@@ -1410,7 +1458,9 @@ class DescriptorArrayData : public HeapObjectData {
public:
DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<DescriptorArray> object)
- : HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
+ : HeapObjectData(broker, storage, object), contents_(broker->zone()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
return contents_.at(descriptor_index.as_int()).field_owner;
@@ -1455,7 +1505,7 @@ void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
Isolate* const isolate = broker->isolate();
auto descriptors = Handle<DescriptorArray>::cast(object());
- CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
+ CHECK_EQ(*descriptors, map->instance_descriptors(isolate));
PropertyDescriptor d;
d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
@@ -2245,13 +2295,29 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
Handle<Map> map = Handle<Map>::cast(object());
+ Isolate* isolate = broker->isolate();
if (instance_descriptors_ == nullptr) {
instance_descriptors_ =
- broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad));
- }
-
- if (!instance_descriptors()->should_access_heap()) {
+ broker->GetOrCreateData(map->instance_descriptors(isolate));
+ }
+
+ if (instance_descriptors()->should_access_heap()) {
+ // When accessing the fields concurrently, we still have to recurse on the
+ // owner map if it is different than the current map. This is because
+ // {instance_descriptors_} gets set on SerializeOwnDescriptor and otherwise
+ // we risk the field owner having a null {instance_descriptors_}.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
+ isolate);
+ if (descriptors->GetDetails(descriptor_index).location() == kField) {
+ Handle<Map> owner(map->FindFieldOwner(isolate, descriptor_index),
+ isolate);
+ if (!owner.equals(map)) {
+ broker->GetOrCreateData(owner)->AsMap()->SerializeOwnDescriptor(
+ broker, descriptor_index);
+ }
+ }
+ } else {
DescriptorArrayData* descriptors =
instance_descriptors()->AsDescriptorArray();
descriptors->SerializeDescriptor(broker, map, descriptor_index);
@@ -2345,7 +2411,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
- boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
+ boilerplate->map().instance_descriptors(isolate), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -3000,27 +3066,6 @@ void MapRef::SerializeForElementStore() {
data()->AsMap()->SerializeForElementStore(broker());
}
-namespace {
-// This helper function has two modes. If {prototype_maps} is nullptr, the
-// prototype chain is serialized as necessary to determine the result.
-// Otherwise, the heap is untouched and the encountered prototypes are pushed
-// onto {prototype_maps}.
-bool HasOnlyStablePrototypesWithFastElementsHelper(
- JSHeapBroker* broker, MapRef const& map,
- ZoneVector<MapRef>* prototype_maps) {
- for (MapRef prototype_map = map;;) {
- if (prototype_maps == nullptr) prototype_map.SerializePrototype();
- prototype_map = prototype_map.prototype().AsHeapObject().map();
- if (prototype_map.oddball_type() == OddballType::kNull) return true;
- if (!map.prototype().IsJSObject() || !prototype_map.is_stable() ||
- !IsFastElementsKind(prototype_map.elements_kind())) {
- return false;
- }
- if (prototype_maps != nullptr) prototype_maps->push_back(prototype_map);
- }
-}
-} // namespace
-
void MapData::SerializeForElementLoad(JSHeapBroker* broker) {
if (serialized_for_element_load_) return;
serialized_for_element_load_ = true;
@@ -3034,22 +3079,34 @@ void MapData::SerializeForElementStore(JSHeapBroker* broker) {
serialized_for_element_store_ = true;
TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
- HasOnlyStablePrototypesWithFastElementsHelper(broker, MapRef(broker, this),
- nullptr);
+ // TODO(solanes, v8:7790): This should use MapData methods rather than
+ // constructing MapRefs, but it involves non-trivial refactoring and this
+ // method should go away anyway once the compiler is fully concurrent.
+ MapRef map(broker, this);
+ for (MapRef prototype_map = map;;) {
+ prototype_map.SerializePrototype();
+ prototype_map = prototype_map.prototype().map();
+ if (prototype_map.oddball_type() == OddballType::kNull ||
+ !map.prototype().IsJSObject() || !prototype_map.is_stable() ||
+ !IsFastElementsKind(prototype_map.elements_kind())) {
+ return;
+ }
+ }
}
bool MapRef::HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps) {
- for (MapRef prototype_map = *this;;) {
- if (prototype_maps == nullptr) prototype_map.SerializePrototype();
- prototype_map = prototype_map.prototype().AsHeapObject().map();
- if (prototype_map.oddball_type() == OddballType::kNull) return true;
+ DCHECK_NOT_NULL(prototype_maps);
+ MapRef prototype_map = prototype().map();
+ while (prototype_map.oddball_type() != OddballType::kNull) {
if (!prototype().IsJSObject() || !prototype_map.is_stable() ||
!IsFastElementsKind(prototype_map.elements_kind())) {
return false;
}
- if (prototype_maps != nullptr) prototype_maps->push_back(prototype_map);
+ prototype_maps->push_back(prototype_map);
+ prototype_map = prototype_map.prototype().map();
}
+ return true;
}
bool MapRef::supports_fast_array_iteration() const {
@@ -3117,12 +3174,12 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
}
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
+ CHECK(index.is_inobject());
if (data_->should_access_heap()) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
object()->RawFastPropertyAt(index)));
}
JSObjectData* object_data = data()->AsJSObject();
- CHECK(index.is_inobject());
return ObjectRef(broker(),
object_data->GetInobjectField(index.property_index()));
}
@@ -3167,12 +3224,16 @@ void JSObjectRef::EnsureElementsTenured() {
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap()) {
- return FieldIndex::ForDescriptor(*object(), descriptor_index);
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
+ DCHECK(result.is_inobject());
+ return result;
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->GetFieldIndexFor(descriptor_index);
+ FieldIndex result = descriptors->GetFieldIndexFor(descriptor_index);
+ DCHECK(result.is_inobject());
+ return result;
}
int MapRef::GetInObjectPropertyOffset(int i) const {
@@ -3219,16 +3280,7 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap()) {
- Handle<FieldType> field_type(object()
- ->instance_descriptors(kRelaxedLoad)
- .GetFieldType(descriptor_index),
- broker()->isolate());
- return ObjectRef(broker(), field_type);
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return ObjectRef(broker(), descriptors->GetFieldType(descriptor_index));
+ return instance_descriptors().GetFieldType(descriptor_index);
}
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
@@ -3271,11 +3323,11 @@ base::Optional<uint16_t> StringRef::GetFirstChar() {
return base::nullopt;
}
- if (broker()->local_isolate()) {
+ if (!broker()->IsMainThread()) {
return object()->Get(0, broker()->local_isolate());
} else {
- // TODO(solanes, v8:7790): Remove this case once we always have a local
- // isolate, i.e. the inlining phase is done concurrently all the time.
+ // TODO(solanes, v8:7790): Remove this case once the inlining phase is
+ // done concurrently all the time.
return object()->Get(0);
}
}
@@ -3417,9 +3469,13 @@ BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
-BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
-BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
-BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
+// These JSBoundFunction fields are immutable after initialization. Moreover,
+// as long as JSObjects are still serialized on the main thread, all
+// JSBoundFunctionRefs are created at a time when the underlying objects are
+// guaranteed to be fully initialized.
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, JSReceiver, bound_target_function)
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, Object, bound_this)
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, FixedArray, bound_arguments)
BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
@@ -3462,9 +3518,9 @@ BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
-BIMODAL_ACCESSOR(Map, HeapObject, prototype)
-BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR(Map, Object, GetConstructor)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, prototype)
+BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
@@ -3484,8 +3540,10 @@ BIMODAL_ACCESSOR_C(RegExpBoilerplateDescription, int, flags)
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
- return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
- object()->call_code(kAcquireLoad)));
+ HeapObject call_code = object()->call_code(kAcquireLoad);
+ if (call_code.IsUndefined()) return base::nullopt;
+ return CallHandlerInfoRef(broker(),
+ broker()->CanonicalPersistentHandle(call_code));
}
ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
if (!call_code) return base::nullopt;
@@ -3501,40 +3559,72 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
bool FunctionTemplateInfoRef::has_call_code() const {
if (data_->should_access_heap()) {
- CallOptimization call_optimization(broker()->isolate(), object());
- return call_optimization.is_simple_api_call();
+ HeapObject call_code = object()->call_code(kAcquireLoad);
+ return !call_code.IsUndefined();
}
return data()->AsFunctionTemplateInfo()->has_call_code();
}
-BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
+bool FunctionTemplateInfoRef ::accept_any_receiver() const {
+ if (data_->should_access_heap()) {
+ return object()->accept_any_receiver();
+ }
+ return ObjectRef ::data()->AsFunctionTemplateInfo()->accept_any_receiver();
+}
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
MapRef receiver_map, SerializationPolicy policy) {
const HolderLookupResult not_found;
if (data_->should_access_heap()) {
- CallOptimization call_optimization(broker()->isolate(), object());
- Handle<Map> receiver_map_ref(receiver_map.object());
- if (!receiver_map_ref->IsJSReceiverMap() ||
- (receiver_map_ref->is_access_check_needed() &&
+ // There are currently two ways we can see a FunctionTemplateInfo on the
+ // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
+ // AccessorPair. In both cases, the FTI is fully constructed on the main
+ // thread before.
+ // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
+ // have to use the GC predicate to check whether objects are fully
+ // initialized and safe to read.
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() &&
!object()->accept_any_receiver())) {
return not_found;
}
- HolderLookupResult result;
- Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
- receiver_map_ref, &result.lookup);
+ if (!receiver_map.IsJSObjectMap()) return not_found;
- switch (result.lookup) {
- case CallOptimization::kHolderFound:
- result.holder = JSObjectRef(broker(), holder);
- break;
- default:
- DCHECK_EQ(result.holder, base::nullopt);
- break;
+ DCHECK(has_call_code());
+
+ DisallowGarbageCollection no_gc;
+ HeapObject signature = object()->signature();
+ if (signature.IsUndefined()) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
- return result;
+ auto expected_receiver_type = FunctionTemplateInfo::cast(signature);
+ if (expected_receiver_type.IsTemplateFor(*receiver_map.object())) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ }
+
+ if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
+ if (policy == SerializationPolicy::kSerializeIfNeeded) {
+ receiver_map.SerializePrototype();
+ }
+ if (!receiver_map.serialized_prototype()) return not_found;
+ if (receiver_map.prototype().IsNull()) return not_found;
+
+ JSObject raw_prototype = JSObject::cast(*receiver_map.prototype().object());
+ if (!expected_receiver_type.IsTemplateFor(raw_prototype.map())) {
+ return not_found;
+ }
+ Handle<JSObject> prototype =
+ broker()->CanonicalPersistentHandle(raw_prototype);
+ if (ObjectData* data = broker()->TryGetOrCreateData(prototype)) {
+ return HolderLookupResult(CallOptimization::kHolderFound,
+ JSObjectRef(broker(), data));
+ }
+
+ TRACE_BROKER_MISSING(broker(),
+ "holder for receiver with map " << receiver_map);
+ return not_found;
}
FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
@@ -3584,9 +3674,8 @@ BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
BytecodeArray bytecode_array;
- LocalIsolate* local_isolate = broker()->local_isolate();
- if (local_isolate && !local_isolate->is_main_thread()) {
- bytecode_array = object()->GetBytecodeArray(local_isolate);
+ if (!broker()->IsMainThread()) {
+ bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
} else {
bytecode_array = object()->GetBytecodeArray(broker()->isolate());
}
@@ -3603,8 +3692,8 @@ BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
const {
if (data_->should_access_heap()) {
- if (LocalIsolate* local_isolate = broker()->local_isolate()) {
- return object()->GetInlineability(local_isolate);
+ if (!broker()->IsMainThread()) {
+ return object()->GetInlineability(broker()->local_isolate());
} else {
return object()->GetInlineability(broker()->isolate());
}
@@ -3638,10 +3727,11 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
}
DescriptorArrayRef MapRef::instance_descriptors() const {
- if (data_->should_access_heap()) {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
return DescriptorArrayRef(
- broker(), broker()->CanonicalPersistentHandle(
- object()->instance_descriptors(kRelaxedLoad)));
+ broker(),
+ broker()->CanonicalPersistentHandle(
+ object()->instance_descriptors(broker()->isolate(), kRelaxedLoad)));
}
return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
@@ -3653,10 +3743,21 @@ void MapRef::SerializeRootMap() {
data()->AsMap()->SerializeRootMap(broker());
}
+// TODO(solanes, v8:7790): Remove base::Optional from the return type when
+// deleting serialization.
base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap()) {
- return MapRef(broker(), broker()->CanonicalPersistentHandle(
- object()->FindRootMap(broker()->isolate())));
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // TODO(solanes): Remove the TryGetOrCreateData part when Map is moved to
+ // kNeverSerialized.
+ ObjectData* root_map =
+ broker()->TryGetOrCreateData(broker()->CanonicalPersistentHandle(
+ object()->FindRootMap(broker()->isolate())));
+ if (root_map) {
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return MapRef(broker(), root_map);
+ }
+ TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
+ return base::nullopt;
}
ObjectData* map_data = data()->AsMap()->FindRootMap();
if (map_data != nullptr) {
@@ -3860,6 +3961,8 @@ base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
}
}
+bool ObjectRef::IsNull() const { return object()->IsNull(); }
+
bool ObjectRef::IsNullOrUndefined() const {
if (IsSmi()) return false;
OddballType type = AsHeapObject().map().oddball_type();
@@ -3905,30 +4008,93 @@ Maybe<double> ObjectRef::OddballToNumber() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // `elements` are currently still serialized as members of JSObjectRef.
+ // TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must
+ // guarantee consistency between `object`, `elements_kind` and `elements`
+ // through other means (store/load order? locks? storing elements_kind in
+ // elements.map?).
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
+
+ base::Optional<FixedArrayBaseRef> maybe_elements_ref = elements();
+ if (!maybe_elements_ref.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "JSObject::elements" << *this);
+ return {};
+ }
+
+ FixedArrayBaseRef elements_ref = maybe_elements_ref.value();
+ ElementsKind elements_kind = GetElementsKind();
+
+ DCHECK_LE(index, JSObject::kMaxElementIndex);
+
+ // See also ElementsAccessorBase::GetMaxIndex.
+ if (IsJSArray()) {
+ // For JSArrays we additionally need to check against JSArray::length.
+ // Length_unsafe is safe to use in this case since:
+ // - GetOwnConstantElement only detects a constant for JSArray holders if
+ // the array is frozen/sealed.
+ // - Frozen/sealed arrays can't change length.
+ // - We've already seen a map with frozen/sealed elements_kinds (above);
+ // - The release-load of that map ensures we read the newest value
+ // of `length` below.
+ uint32_t array_length;
+ if (!AsJSArray().length_unsafe().object()->ToArrayLength(&array_length)) {
+ return {};
+ }
+ if (index >= array_length) return {};
+ }
+
+ Object maybe_element;
+ auto result = ConcurrentLookupIterator::TryGetOwnConstantElement(
+ &maybe_element, broker()->isolate(), broker()->local_isolate(),
+ *object(), *elements_ref.object(), elements_kind, index);
+
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "JSObject::GetOwnConstantElement on "
+ << *this << " at index " << index);
+ return {};
+ } else if (result == ConcurrentLookupIterator::kNotPresent) {
+ return {};
+ }
+
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return ObjectRef{broker(),
+ broker()->CanonicalPersistentHandle(maybe_element)};
+ } else {
+ ObjectData* element =
+ data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
}
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
}
-base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
+base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- return GetOwnDataPropertyFromHeap(broker(),
- Handle<JSObject>::cast(object()),
- field_representation, index);
+ return GetOwnFastDataPropertyFromHeap(broker(),
+ Handle<JSObject>::cast(object()),
+ field_representation, index);
}
- ObjectData* property = data()->AsJSObject()->GetOwnDataProperty(
+ ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
broker(), field_representation, index, policy);
if (property == nullptr) return base::nullopt;
return ObjectRef(broker(), property);
}
+ObjectRef JSObjectRef::GetOwnDictionaryProperty(
+ InternalIndex index, SerializationPolicy policy) const {
+ CHECK(index.is_found());
+ if (data_->should_access_heap()) {
+ return GetOwnDictionaryPropertyFromHeap(
+ broker(), Handle<JSObject>::cast(object()), index);
+ }
+ ObjectData* property =
+ data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
+ CHECK_NE(property, nullptr);
+ return ObjectRef(broker(), property);
+}
+
ObjectRef JSArrayRef::GetBoilerplateLength() const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
@@ -4129,7 +4295,7 @@ Float64 FixedDoubleArrayData::Get(int i) const {
PropertyDetails DescriptorArrayRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ if (data_->should_access_heap()) {
return object()->GetDetails(descriptor_index);
}
return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
@@ -4137,7 +4303,7 @@ PropertyDetails DescriptorArrayRef::GetPropertyDetails(
NameRef DescriptorArrayRef::GetPropertyKey(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ if (data_->should_access_heap()) {
NameRef result(broker(), broker()->CanonicalPersistentHandle(
object()->GetKey(descriptor_index)));
CHECK(result.IsUniqueName());
@@ -4147,9 +4313,25 @@ NameRef DescriptorArrayRef::GetPropertyKey(
data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
}
+ObjectRef DescriptorArrayRef::GetFieldType(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap()) {
+ // This method only gets called for the creation of FieldTypeDependencies.
+ // These calls happen when the broker is either disabled or serializing,
+ // which means that GetOrCreateData would be able to successfully create the
+ // ObjectRef for the cases where we haven't seen the FieldType before.
+ DCHECK(broker()->mode() == JSHeapBroker::kDisabled ||
+ broker()->mode() == JSHeapBroker::kSerializing);
+ return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->GetFieldType(descriptor_index)));
+ }
+ return ObjectRef(broker(),
+ data()->AsDescriptorArray()->GetFieldType(descriptor_index));
+}
+
base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ if (data_->should_access_heap()) {
HeapObject heap_object;
if (object()
->GetValue(descriptor_index)
@@ -4204,15 +4386,11 @@ bool NameRef::IsUniqueName() const {
void RegExpBoilerplateDescriptionRef::Serialize() {
if (data_->should_access_heap()) {
// Even if the regexp boilerplate object itself is no longer serialized,
- // both `data` and `source` fields still are and thus we need to make sure
- // to visit them.
- // TODO(jgruber,v8:7790): Remove once these are no longer serialized types.
+ // the `data` still is and thus we need to make sure to visit it.
+ // TODO(jgruber,v8:7790): Remove once it is no longer a serialized type.
STATIC_ASSERT(IsSerializedHeapObject<FixedArray>());
FixedArrayRef data_ref{
broker(), broker()->CanonicalPersistentHandle(object()->data())};
- STATIC_ASSERT(IsSerializedHeapObject<String>());
- StringRef source_ref{
- broker(), broker()->CanonicalPersistentHandle(object()->source())};
} else {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
HeapObjectRef::data()->AsRegExpBoilerplateDescription()->Serialize(
@@ -4469,8 +4647,8 @@ void MapRef::SerializePrototype() {
}
bool MapRef::serialized_prototype() const {
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
if (data_->should_access_heap()) return true;
+ CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
return data()->AsMap()->serialized_prototype();
}
@@ -4534,7 +4712,19 @@ bool PropertyCellRef::Serialize() const {
}
void FunctionTemplateInfoRef::SerializeCallCode() {
- if (data_->should_access_heap()) return;
+ if (data_->should_access_heap()) {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ // CallHandlerInfo::data may still hold a serialized heap object, so we
+ // have to make the broker aware of it.
+ // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
+ Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
+ broker()->isolate());
+ if (call_code->IsCallHandlerInfo()) {
+ broker()->GetOrCreateData(
+ Handle<CallHandlerInfo>::cast(call_code)->data());
+ }
+ return;
+ }
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index cc86b1451cd..32b09dddbc8 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -19,7 +19,7 @@
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
-#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
@@ -251,6 +251,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
+ bool IsMainThread() const {
+ return local_isolate() == nullptr || local_isolate()->is_main_thread();
+ }
+
LocalIsolate* local_isolate() const { return local_isolate_; }
// Return the corresponding canonical persistent handle for {object}. Create
@@ -315,10 +319,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
friend class ObjectData;
friend class PropertyCellData;
- bool IsMainThread() const {
- return local_isolate() == nullptr || local_isolate()->is_main_thread();
- }
-
// If this returns false, the object is guaranteed to be fully initialized and
// thus safe to read from a memory safety perspective. The converse does not
// necessarily hold.
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index b38199bfffa..5777719107c 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -138,11 +138,15 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
Reduction JSInliningHeuristic::Reduce(Node* node) {
+#if V8_ENABLE_WEBASSEMBLY
if (mode() == kWasmOnly) {
- return (node->opcode() == IrOpcode::kJSWasmCall)
- ? inliner_.ReduceJSWasmCall(node)
- : NoChange();
+ if (node->opcode() == IrOpcode::kJSWasmCall) {
+ return inliner_.ReduceJSWasmCall(node);
+ }
+ return NoChange();
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
DCHECK_EQ(mode(), kJSOnly);
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
@@ -152,7 +156,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Check if we already saw that {node} before, and if so, just skip it.
if (seen_.find(node->id()) != seen_.end()) return NoChange();
- seen_.insert(node->id());
// Check if the {node} is an appropriate candidate for inlining.
Candidate candidate = CollectFunctions(node, kMaxCallPolymorphism);
@@ -230,6 +233,14 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
return NoChange();
}
+ // Found a candidate. Insert it into the set of seen nodes s.t. we don't
+ // revisit in the future. Note this insertion happens here and not earlier in
+ // order to make inlining decisions order-independent. A node may not be a
+ // candidate when first seen, but later reductions may turn it into a valid
+ // candidate. In that case, the node should be revisited by
+ // JSInliningHeuristic.
+ seen_.insert(node->id());
+
// Forcibly inline small functions here. In the case of polymorphic inlining
// candidate_is_small is set only when all functions are small.
if (candidate_is_small) {
@@ -670,7 +681,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
bool small_function) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
+#if V8_ENABLE_WEBASSEMBLY
DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
+#endif // V8_ENABLE_WEBASSEMBLY
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 5da0c9c1818..6e64f2b6777 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -20,11 +20,14 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/parsing/parse-info.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -81,6 +84,7 @@ class JSCallAccessor {
Node* call_;
};
+#if V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::InlineJSWasmCall(Node* call, Node* new_target,
Node* context, Node* frame_state,
StartNode start, Node* end,
@@ -92,6 +96,7 @@ Reduction JSInliner::InlineJSWasmCall(Node* call, Node* new_target,
uncaught_subcalls,
static_cast<int>(n.Parameters().signature()->parameter_count()));
}
+#endif // V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, StartNode start, Node* end,
@@ -384,10 +389,12 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
UNREACHABLE();
}
+#if V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::ReduceJSWasmCall(Node* node) {
// Create the subgraph for the inlinee.
Node* start_node;
Node* end;
+ size_t subgraph_min_node_id;
{
Graph::SubgraphScope scope(graph());
@@ -404,9 +411,16 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
jsgraph(), n.context(), n.frame_state(),
wasm_call_params.signature());
JSWasmCallData js_wasm_call_data(wasm_call_params.signature());
+
+ // All the nodes inserted by the inlined subgraph will have
+ // id >= subgraph_min_node_id. We use this later to avoid wire nodes that
+ // are not inserted by the inlinee but were already part of the graph to the
+ // surrounding exception handler, if present.
+ subgraph_min_node_id = graph()->NodeCount();
+
BuildInlinedJSToWasmWrapper(
graph()->zone(), jsgraph(), wasm_call_params.signature(),
- wasm_call_params.module(), source_positions_,
+ wasm_call_params.module(), isolate(), source_positions_,
StubCallMode::kCallBuiltinPointer, wasm::WasmFeatures::FromFlags(),
&js_wasm_call_data, continuation_frame_state);
@@ -427,6 +441,9 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
// Find all uncaught 'calls' in the inlinee.
AllNodes inlined_nodes(local_zone_, end, graph());
for (Node* subnode : inlined_nodes.reachable) {
+ // Ignore nodes that are not part of the inlinee.
+ if (subnode->id() < subgraph_min_node_id) continue;
+
// Every possibly throwing node should get {IfSuccess} and {IfException}
// projections, unless there already is local exception handling.
if (subnode->op()->HasProperty(Operator::kNoThrow)) continue;
@@ -444,10 +461,13 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
return InlineJSWasmCall(node, new_target, context, frame_state, start, end,
exception_target, uncaught_subcalls);
}
+#endif // V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+#if V8_ENABLE_WEBASSEMBLY
DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
+#endif // V8_ENABLE_WEBASSEMBLY
JSCallAccessor call(node);
// Determine the call target.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index e1e1bdfa0a8..a5b77c54eaa 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -41,7 +41,9 @@ class JSInliner final : public AdvancedReducer {
// using the above generic reducer interface of the inlining machinery.
Reduction ReduceJSCall(Node* node);
+#if V8_ENABLE_WEBASSEMBLY
Reduction ReduceJSWasmCall(Node* node);
+#endif // V8_ENABLE_WEBASSEMBLY
private:
Zone* zone() const { return local_zone_; }
@@ -73,10 +75,12 @@ class JSInliner final : public AdvancedReducer {
Node* exception_target,
const NodeVector& uncaught_subcalls, int argument_count);
+#if V8_ENABLE_WEBASSEMBLY
Reduction InlineJSWasmCall(Node* call, Node* new_target, Node* context,
Node* frame_state, StartNode start, Node* end,
Node* exception_target,
const NodeVector& uncaught_subcalls);
+#endif // V8_ENABLE_WEBASSEMBLY
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 2d105e55a83..3d9290a0bf9 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -424,7 +424,9 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
AccessMode::kLoad);
}
- if (access_info.IsInvalid()) return NoChange();
+ // TODO(v8:11457) Support dictionary mode holders here.
+ if (access_info.IsInvalid() || access_info.HasDictionaryHolder())
+ return NoChange();
access_info.RecordDependencies(dependencies());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
@@ -451,12 +453,12 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return Changed(node).FollowedBy(ReduceJSOrdinaryHasInstance(node));
}
- if (access_info.IsDataConstant()) {
+ if (access_info.IsFastDataConstant()) {
Handle<JSObject> holder;
bool found_on_proto = access_info.holder().ToHandle(&holder);
JSObjectRef holder_ref =
found_on_proto ? JSObjectRef(broker(), holder) : receiver_ref;
- base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index());
if (!constant.has_value() || !constant->IsHeapObject() ||
!constant->AsHeapObject().map().is_callable())
@@ -554,7 +556,9 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
break;
}
map = map.prototype().map();
- if (!map.is_stable()) return kMayBeInPrototypeChain;
+ // TODO(v8:11457) Support dictionary mode protoypes here.
+ if (!map.is_stable() || map.is_dictionary_map())
+ return kMayBeInPrototypeChain;
if (map.oddball_type() == OddballType::kNull) {
all = false;
break;
@@ -741,7 +745,10 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
- if (access_info.IsInvalid()) return inference.NoChange();
+
+ // TODO(v8:11457) Support dictionary mode prototypes here.
+ if (access_info.IsInvalid() || access_info.HasDictionaryHolder())
+ return inference.NoChange();
// Only optimize when {resolution} definitely doesn't have a "then" property.
if (!access_info.IsNotFound()) return inference.NoChange();
@@ -1951,8 +1958,10 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
// Check whether we're accessing a known element on the {receiver} and can
// constant-fold the load.
NumberMatcher mkey(key);
- if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
- uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
+ if (mkey.IsInteger() &&
+ mkey.IsInRange(0.0, static_cast<double>(JSObject::kMaxElementIndex))) {
+ STATIC_ASSERT(JSObject::kMaxElementIndex <= kMaxUInt32);
+ const uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
base::Optional<ObjectRef> element;
if (receiver_ref.IsJSObject()) {
@@ -2215,8 +2224,17 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* frame_state, Node** effect, Node** control,
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
ObjectRef constant(broker(), access_info.constant());
+
+ if (access_info.IsDictionaryProtoAccessorConstant()) {
+ // For fast mode holders we recorded dependencies in BuildPropertyLoad.
+ for (const Handle<Map> map : access_info.lookup_start_object_maps()) {
+ dependencies()->DependOnConstantInDictionaryPrototypeChain(
+ MapRef{broker(), map}, NameRef{broker(), access_info.name()},
+ constant, PropertyKind::kAccessor);
+ }
+ }
+
Node* target = jsgraph()->Constant(constant);
- FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the getter function.
Node* value;
if (constant.IsJSFunction()) {
@@ -2231,12 +2249,8 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
? receiver
: jsgraph()->Constant(ObjectRef(
broker(), access_info.holder().ToHandleChecked()));
- SharedFunctionInfoRef shared_info(
- broker(), frame_info.shared_info().ToHandleChecked());
-
- value =
- InlineApiCall(receiver, holder, frame_state, nullptr, effect, control,
- shared_info, constant.AsFunctionTemplateInfo());
+ value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
+ control, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2256,7 +2270,6 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
PropertyAccessInfo const& access_info) {
ObjectRef constant(broker(), access_info.constant());
Node* target = jsgraph()->Constant(constant);
- FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
Node* feedback = jsgraph()->UndefinedConstant();
@@ -2271,10 +2284,8 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
? receiver
: jsgraph()->Constant(ObjectRef(
broker(), access_info.holder().ToHandleChecked()));
- SharedFunctionInfoRef shared_info(
- broker(), frame_info.shared_info().ToHandleChecked());
InlineApiCall(receiver, holder, frame_state, value, effect, control,
- shared_info, constant.AsFunctionTemplateInfo());
+ constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2289,8 +2300,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
- Node** control, SharedFunctionInfoRef const& shared_info,
- FunctionTemplateInfoRef const& function_template_info) {
+ Node** control, FunctionTemplateInfoRef const& function_template_info) {
if (!function_template_info.has_call_code()) {
return nullptr;
}
@@ -2348,7 +2358,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
+ if (access_info.holder().ToHandle(&holder) &&
+ !access_info.HasDictionaryHolder()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
@@ -2358,7 +2369,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
Node* value;
if (access_info.IsNotFound()) {
value = jsgraph()->UndefinedConstant();
- } else if (access_info.IsAccessorConstant()) {
+ } else if (access_info.IsFastAccessorConstant() ||
+ access_info.IsDictionaryProtoAccessorConstant()) {
ConvertReceiverMode receiver_mode =
receiver == lookup_start_object
? ConvertReceiverMode::kNotNullOrUndefined
@@ -2376,10 +2388,15 @@ JSNativeContextSpecialization::BuildPropertyLoad(
DCHECK_EQ(receiver, lookup_start_object);
value = graph()->NewNode(simplified()->StringLength(), receiver);
} else {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
+ DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant() ||
+ access_info.IsDictionaryProtoDataConstant());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- value = access_builder.BuildLoadDataField(
- name, access_info, lookup_start_object, &effect, &control);
+ if (access_info.IsDictionaryProtoDataConstant()) {
+ value = access_builder.FoldLoadDictPrototypeConstant(access_info);
+ } else {
+ value = access_builder.BuildLoadDataField(
+ name, access_info, lookup_start_object, &effect, &control);
+ }
}
return ValueEffectControl(value, effect, control);
@@ -2388,6 +2405,9 @@ JSNativeContextSpecialization::BuildPropertyLoad(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyTest(
Node* effect, Node* control, PropertyAccessInfo const& access_info) {
+ // TODO(v8:11457) Support property tests for dictionary mode protoypes.
+ DCHECK(!access_info.HasDictionaryHolder());
+
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
@@ -2443,11 +2463,11 @@ JSNativeContextSpecialization::BuildPropertyStore(
DCHECK(!access_info.IsNotFound());
// Generate the actual property access.
- if (access_info.IsAccessorConstant()) {
+ if (access_info.IsFastAccessorConstant()) {
InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
&control, if_exceptions, access_info);
} else {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
+ DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant());
DCHECK(access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral);
FieldIndex const field_index = access_info.field_index();
@@ -2462,7 +2482,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()),
storage, effect, control);
}
- bool store_to_existing_constant_field = access_info.IsDataConstant() &&
+ bool store_to_existing_constant_field = access_info.IsFastDataConstant() &&
access_mode == AccessMode::kStore &&
!access_info.HasTransitionMap();
FieldAccess field_access = {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index cc351abb2f0..b26adc49383 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -188,7 +188,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
PropertyAccessInfo const& access_info);
Node* InlineApiCall(Node* receiver, Node* holder, Node* frame_state,
Node* value, Node** effect, Node** control,
- SharedFunctionInfoRef const& shared_info,
FunctionTemplateInfoRef const& function_template_info);
// Construct the appropriate subgraph for element access.
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index e565f1dfce6..4f491429f48 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -676,6 +676,7 @@ ForInParameters const& ForInParametersOf(const Operator* op) {
return OpParameter<ForInParameters>(op);
}
+#if V8_ENABLE_WEBASSEMBLY
JSWasmCallParameters const& JSWasmCallParametersOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSWasmCall, op->opcode());
return OpParameter<JSWasmCallParameters>(op);
@@ -719,6 +720,7 @@ Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
UNREACHABLE();
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
#define CACHED_OP_LIST(V) \
V(ToLength, Operator::kNoProperties, 1, 1) \
@@ -918,6 +920,7 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
+#if V8_ENABLE_WEBASSEMBLY
const Operator* JSOperatorBuilder::CallWasm(
const wasm::WasmModule* wasm_module,
const wasm::FunctionSig* wasm_signature, FeedbackSource const& feedback) {
@@ -928,6 +931,7 @@ const Operator* JSOperatorBuilder::CallWasm(
parameters.input_count(), 1, 1, 1, 1, 2, // inputs/outputs
parameters); // parameter
}
+#endif // V8_ENABLE_WEBASSEMBLY
const Operator* JSOperatorBuilder::ConstructForwardVarargs(
size_t arity, uint32_t start_index) {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 46258f3bb16..4e447149bc6 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -821,6 +821,7 @@ size_t hash_value(ForInParameters const&);
std::ostream& operator<<(std::ostream&, ForInParameters const&);
const ForInParameters& ForInParametersOf(const Operator* op);
+#if V8_ENABLE_WEBASSEMBLY
class JSWasmCallParameters {
public:
explicit JSWasmCallParameters(const wasm::WasmModule* module,
@@ -849,6 +850,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
JSWasmCallParameters const&);
size_t hash_value(JSWasmCallParameters const&);
bool operator==(JSWasmCallParameters const&, JSWasmCallParameters const&);
+#endif // V8_ENABLE_WEBASSEMBLY
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -959,9 +961,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
+#if V8_ENABLE_WEBASSEMBLY
const Operator* CallWasm(const wasm::WasmModule* wasm_module,
const wasm::FunctionSig* wasm_signature,
FeedbackSource const& feedback);
+#endif // V8_ENABLE_WEBASSEMBLY
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
@@ -1285,8 +1289,11 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstruct ||
node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
- node->opcode() == IrOpcode::kJSConstructWithSpread ||
- node->opcode() == IrOpcode::kJSWasmCall);
+ node->opcode() == IrOpcode::kJSConstructWithSpread
+#if V8_ENABLE_WEBASSEMBLY
+ || node->opcode() == IrOpcode::kJSWasmCall
+#endif // V8_ENABLE_WEBASSEMBLY
+ ); // NOLINT(whitespace/parens)
}
#define INPUTS(V) \
@@ -1394,6 +1401,7 @@ using JSCallNode = JSCallNodeBase<IrOpcode::kJSCall>;
using JSCallWithSpreadNode = JSCallNodeBase<IrOpcode::kJSCallWithSpread>;
using JSCallWithArrayLikeNode = JSCallNodeBase<IrOpcode::kJSCallWithArrayLike>;
+#if V8_ENABLE_WEBASSEMBLY
class JSWasmCallNode final : public JSCallOrConstructNode {
public:
explicit constexpr JSWasmCallNode(Node* node) : JSCallOrConstructNode(node) {
@@ -1422,6 +1430,7 @@ class JSWasmCallNode final : public JSCallOrConstructNode {
static Type TypeForWasmReturnType(const wasm::ValueType& type);
};
+#endif // V8_ENABLE_WEBASSEMBLY
template <int kOpcode>
class JSConstructNodeBase final : public JSCallOrConstructNode {
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 4f1565d0a96..b34661a80b4 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -17,6 +17,10 @@ namespace compiler {
namespace {
+// Offsets from callee to caller frame, in slots.
+constexpr int kFirstCallerSlotOffset = 1;
+constexpr int kNoCallerSlotOffset = 0;
+
inline LinkageLocation regloc(Register reg, MachineType type) {
return LinkageLocation::ForRegister(reg.code(), type);
}
@@ -39,6 +43,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmCapiFunction:
os << "WasmExit";
break;
@@ -48,6 +53,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallWasmImportWrapper:
os << "WasmImportWrapper";
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallBuiltinPointer:
os << "BuiltinPointer";
break;
@@ -59,7 +65,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
- << "s" << d.StackParameterCount() << "i" << d.InputCount() << "f"
+ << "s" << d.ParameterSlotCount() << "i" << d.InputCount() << "f"
<< d.FrameStateCount();
}
@@ -85,62 +91,51 @@ int CallDescriptor::GetStackParameterDelta(
// inputs to the TailCall node, since they already exist on the stack.
if (IsTailCallForTierUp()) return 0;
- int callee_slots_above_sp = GetOffsetToReturns();
- int tail_caller_slots_above_sp = tail_caller->GetOffsetToReturns();
+ // Add padding if necessary before computing the stack parameter delta.
+ int callee_slots_above_sp = AddArgumentPaddingSlots(GetOffsetToReturns());
+ int tail_caller_slots_above_sp =
+ AddArgumentPaddingSlots(tail_caller->GetOffsetToReturns());
int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
- if (ShouldPadArguments(stack_param_delta)) {
- if (callee_slots_above_sp % 2 != 0) {
- // The delta is odd due to the callee - we will need to add one slot
- // of padding.
- ++stack_param_delta;
- } else {
- DCHECK_NE(tail_caller_slots_above_sp % 2, 0);
- // The delta is odd because of the caller. We already have one slot of
- // padding that we can reuse for arguments, so we will need one fewer
- // slot.
- --stack_param_delta;
- }
- }
+ DCHECK(!ShouldPadArguments(stack_param_delta));
return stack_param_delta;
}
-int CallDescriptor::GetFirstUnusedStackSlot() const {
- int start_of_args = 0;
+int CallDescriptor::GetOffsetToFirstUnusedStackSlot() const {
+ int offset = kFirstCallerSlotOffset;
for (size_t i = 0; i < InputCount(); ++i) {
LinkageLocation operand = GetInputLocation(i);
if (!operand.IsRegister()) {
- // Reverse, since arguments have negative offsets in the frame.
- int reverse_location =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- DCHECK_GE(reverse_location, 0);
- start_of_args = std::max(start_of_args, reverse_location);
+ DCHECK(operand.IsCallerFrameSlot());
+ int slot_offset = -operand.GetLocation();
+ offset = std::max(offset, slot_offset + operand.GetSizeInPointers());
}
}
- return start_of_args;
+ return offset;
}
int CallDescriptor::GetOffsetToReturns() const {
- // If there are return stack slots, return the first slot of the last one.
- constexpr int kNoReturnSlot = std::numeric_limits<int>::max();
- int end_of_returns = kNoReturnSlot;
+ // Find the return slot with the least offset relative to the callee.
+ int offset = kNoCallerSlotOffset;
for (size_t i = 0; i < ReturnCount(); ++i) {
LinkageLocation operand = GetReturnLocation(i);
if (!operand.IsRegister()) {
- // Reverse, since returns have negative offsets in the frame.
- int reverse_location = -operand.GetLocation() - 1;
- DCHECK_GE(reverse_location, 0);
- end_of_returns = std::min(end_of_returns, reverse_location);
+ DCHECK(operand.IsCallerFrameSlot());
+ int slot_offset = -operand.GetLocation();
+ offset = std::min(offset, slot_offset);
}
}
- if (end_of_returns != kNoReturnSlot) return end_of_returns;
+ // If there was a return slot, return the offset minus 1 slot.
+ if (offset != kNoCallerSlotOffset) {
+ return offset - 1;
+ }
- // Otherwise, return the first unused slot before the parameters, with any
- // additional padding slot if it exists.
- end_of_returns = GetFirstUnusedStackSlot();
- if (ShouldPadArguments(end_of_returns)) end_of_returns++;
+ // Otherwise, return the first slot after the parameters area, including
+ // optional padding slots.
+ int last_argument_slot = GetOffsetToFirstUnusedStackSlot() - 1;
+ offset = AddArgumentPaddingSlots(last_argument_slot);
- DCHECK_EQ(end_of_returns == 0, StackParameterCount() == 0);
- return end_of_returns;
+ DCHECK_IMPLIES(offset == 0, ParameterSlotCount() == 0);
+ return offset;
}
int CallDescriptor::GetTaggedParameterSlots() const {
@@ -181,26 +176,34 @@ int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
case kCallJSFunction:
return StandardFrameConstants::kFixedSlotCount;
case kCallAddress:
+#if V8_ENABLE_WEBASSEMBLY
if (code_kind == CodeKind::C_WASM_ENTRY) {
return CWasmEntryFrameConstants::kFixedSlotCount;
}
+#endif // V8_ENABLE_WEBASSEMBLY
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
case kCallBuiltinPointer:
return TypedFrameConstants::kFixedSlotCount;
+#if V8_ENABLE_WEBASSEMBLY
case kCallWasmFunction:
case kCallWasmImportWrapper:
return WasmFrameConstants::kFixedSlotCount;
case kCallWasmCapiFunction:
return WasmExitFrameConstants::kFixedSlotCount;
+#endif // V8_ENABLE_WEBASSEMBLY
}
UNREACHABLE();
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info) {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(info->IsOptimizing() || info->IsWasm());
+#else
+ DCHECK(info->IsOptimizing());
+#endif // V8_ENABLE_WEBASSEMBLY
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
@@ -475,10 +478,12 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
kind = CallDescriptor::kCallCodeObject;
target_type = MachineType::AnyTagged();
break;
+#if V8_ENABLE_WEBASSEMBLY
case StubCallMode::kCallWasmRuntimeStub:
kind = CallDescriptor::kCallWasmFunction;
target_type = MachineType::Pointer();
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case StubCallMode::kCallBuiltinPointer:
kind = CallDescriptor::kCallBuiltinPointer;
target_type = MachineType::AnyTagged();
@@ -585,10 +590,12 @@ bool Linkage::ParameterHasSecondaryLocation(int index) const {
return IsTaggedReg(loc, kJSFunctionRegister) ||
IsTaggedReg(loc, kContextRegister);
}
+#if V8_ENABLE_WEBASSEMBLY
if (incoming_->IsWasmFunctionCall()) {
LinkageLocation loc = GetParameterLocation(index);
return IsTaggedReg(loc, kWasmInstanceRegister);
}
+#endif // V8_ENABLE_WEBASSEMBLY
return false;
}
@@ -596,7 +603,6 @@ LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
// TODO(titzer): these constants are necessary due to offset/slot# mismatch
static const int kJSContextSlot = 2 + StandardFrameConstants::kCPSlotCount;
static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
- static const int kWasmInstanceSlot = 3 + StandardFrameConstants::kCPSlotCount;
DCHECK(ParameterHasSecondaryLocation(index));
LinkageLocation loc = GetParameterLocation(index);
@@ -612,11 +618,14 @@ LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
MachineType::AnyTagged());
}
}
+#if V8_ENABLE_WEBASSEMBLY
+ static const int kWasmInstanceSlot = 3 + StandardFrameConstants::kCPSlotCount;
if (incoming_->IsWasmFunctionCall()) {
DCHECK(IsTaggedReg(loc, kWasmInstanceRegister));
return LinkageLocation::ForCalleeFrameSlot(kWasmInstanceSlot,
MachineType::AnyTagged());
}
+#endif // V8_ENABLE_WEBASSEMBLY
UNREACHABLE();
return LinkageLocation::ForCalleeFrameSlot(0, MachineType::AnyTagged());
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 4aecb7c3a85..96c0d368eae 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -199,9 +199,11 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
kCallAddress, // target is a machine pointer
+#if V8_ENABLE_WEBASSEMBLY // ↓ WebAssembly only
kCallWasmCapiFunction, // target is a Wasm C API function
kCallWasmFunction, // target is a wasm function
kCallWasmImportWrapper, // target is a wasm import wrapper
+#endif // ↑ WebAssembly only
kCallBuiltinPointer, // target is a builtin pointer
};
@@ -254,20 +256,20 @@ class V8_EXPORT_PRIVATE CallDescriptor final
using Flags = base::Flags<Flag>;
CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
- LocationSignature* location_sig, size_t stack_param_count,
+ LocationSignature* location_sig, size_t param_slot_count,
Operator::Properties properties,
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
StackArgumentOrder stack_order = StackArgumentOrder::kDefault,
const RegList allocatable_registers = 0,
- size_t stack_return_count = 0)
+ size_t return_slot_count = 0)
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
location_sig_(location_sig),
- stack_param_count_(stack_param_count),
- stack_return_count_(stack_return_count),
+ param_slot_count_(param_slot_count),
+ return_slot_count_(return_slot_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers),
@@ -288,6 +290,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+#if V8_ENABLE_WEBASSEMBLY
// Returns {true} if this descriptor is a call to a WebAssembly function.
bool IsWasmFunctionCall() const { return kind_ == kCallWasmFunction; }
@@ -296,9 +299,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Returns {true} if this descriptor is a call to a Wasm C API function.
bool IsWasmCapiFunction() const { return kind_ == kCallWasmCapiFunction; }
+#endif // V8_ENABLE_WEBASSEMBLY
bool RequiresFrameAsIncoming() const {
- return IsCFunctionCall() || IsJSFunctionCall() || IsWasmFunctionCall();
+ if (IsCFunctionCall() || IsJSFunctionCall()) return true;
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsWasmFunctionCall()) return true;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
}
// The number of return values from this call.
@@ -308,15 +316,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
size_t ParameterCount() const { return location_sig_->parameter_count(); }
// The number of stack parameter slots to the call.
- size_t StackParameterCount() const { return stack_param_count_; }
+ size_t ParameterSlotCount() const { return param_slot_count_; }
// The number of stack return value slots from the call.
- size_t StackReturnCount() const { return stack_return_count_; }
+ size_t ReturnSlotCount() const { return return_slot_count_; }
// The number of parameters to the JS function call.
size_t JSParameterCount() const {
DCHECK(IsJSFunctionCall());
- return stack_param_count_;
+ return param_slot_count_;
}
int GetStackIndexFromSlot(int slot_index) const {
@@ -324,7 +332,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
case StackArgumentOrder::kDefault:
return -slot_index - 1;
case StackArgumentOrder::kJS:
- return slot_index + static_cast<int>(StackParameterCount());
+ return slot_index + static_cast<int>(ParameterSlotCount());
}
}
@@ -391,13 +399,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
- // Returns the first stack slot that is not used by the stack parameters,
- // which is the return slot area, or a padding slot for frame alignment.
- int GetFirstUnusedStackSlot() const;
+ // Returns the offset to the area below the parameter slots on the stack,
+ // relative to callee slot 0, the return address. If there are no parameter
+ // slots, returns +1.
+ int GetOffsetToFirstUnusedStackSlot() const;
- // If there are return stack slots, returns the first slot of the last one.
- // Otherwise, return the first unused slot before the parameters. This is the
- // slot where returns would go if there were any.
+ // Returns the offset to the area above the return slots on the stack,
+ // relative to callee slot 0, the return address. If there are no return
+ // slots, returns the offset to the lowest slot of the parameter area.
+ // If there are no parameter slots, returns 0.
int GetOffsetToReturns() const;
int GetTaggedParameterSlots() const;
@@ -426,8 +436,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const MachineType target_type_;
const LinkageLocation target_loc_;
const LocationSignature* const location_sig_;
- const size_t stack_param_count_;
- const size_t stack_return_count_;
+ const size_t param_slot_count_;
+ const size_t return_slot_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 582eebd8f5d..0d52e780049 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -39,7 +39,6 @@ struct TempLoopInfo {
LoopTree::Loop* loop;
};
-
// Encapsulation of the loop finding algorithm.
// -----------------------------------------------------------------------------
// Conceptually, the contents of a loop are those nodes that are "between" the
@@ -54,6 +53,8 @@ struct TempLoopInfo {
// 1 bit per loop per node per direction are required during the marking phase.
// To handle nested loops correctly, the algorithm must filter some reachability
// marks on edges into/out-of the loop header nodes.
+// Note: this algorithm assumes there are no unreachable loop header nodes
+// (including loop phis).
class LoopFinderImpl {
public:
LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter,
@@ -542,6 +543,55 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
return loop_tree;
}
+// static
+ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
+ Node* loop_header, Zone* zone, size_t max_size) {
+ auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
+
+ std::vector<Node*> queue;
+
+ DCHECK(loop_header->opcode() == IrOpcode::kLoop);
+
+ queue.push_back(loop_header);
+
+ while (!queue.empty()) {
+ Node* node = queue.back();
+ queue.pop_back();
+ // Terminate is not part of the loop, and neither are its uses.
+ if (node->opcode() == IrOpcode::kTerminate) {
+ DCHECK_EQ(node->InputAt(1), loop_header);
+ continue;
+ }
+ visited->insert(node);
+ if (visited->size() > max_size) return nullptr;
+ switch (node->opcode()) {
+ case IrOpcode::kLoopExit:
+ DCHECK_EQ(node->InputAt(1), loop_header);
+ // LoopExitValue/Effect uses are inside the loop. The rest are not.
+ for (Node* use : node->uses()) {
+ if (use->opcode() == IrOpcode::kLoopExitEffect ||
+ use->opcode() == IrOpcode::kLoopExitValue) {
+ if (visited->count(use) == 0) queue.push_back(use);
+ }
+ }
+ break;
+ case IrOpcode::kLoopExitEffect:
+ case IrOpcode::kLoopExitValue:
+ DCHECK_EQ(NodeProperties::GetControlInput(node)->InputAt(1),
+ loop_header);
+ // All uses are outside the loop, do nothing.
+ break;
+ default:
+ for (Node* use : node->uses()) {
+ if (visited->count(use) == 0) queue.push_back(use);
+ }
+ break;
+ }
+ }
+
+ return visited;
+}
+
bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
const LoopTree::Loop* loop) {
// Look for returns and if projections that are outside the loop but whose
@@ -608,34 +658,6 @@ void NodeCopier::Insert(Node* original, Node* copy) {
copies_->push_back(copy);
}
-void NodeCopier::CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead,
- NodeRange nodes,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins) {
- // Copy all the nodes first.
- for (Node* original : nodes) {
- SourcePositionTable::Scope position(
- source_positions, source_positions->GetSourcePosition(original));
- NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", original);
- node_map_.Set(original, copies_->size() + 1);
- copies_->push_back(original);
- for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
- Node* copy = graph->CloneNode(original);
- copies_->push_back(copy);
- }
- }
-
- // Fix inputs of the copies.
- for (Node* original : nodes) {
- for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
- Node* copy = map(original, copy_index);
- for (int i = 0; i < copy->InputCount(); i++) {
- copy->ReplaceInput(i, map(original->InputAt(i), copy_index));
- }
- }
- }
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 3cce611be92..49db12fef3b 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -178,6 +178,17 @@ class V8_EXPORT_PRIVATE LoopFinder {
Zone* temp_zone);
static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
+
+ // Find all nodes of a loop given its header node. Will exit early once the
+ // current loop size exceed {max_size}. This is a very restricted version of
+ // BuildLoopTree.
+ // Assumptions:
+ // 1) All loop exits of the loop are marked with LoopExit, LoopExitEffect,
+ // and LoopExitValue nodes.
+ // 2) There are no nested loops within this loop.
+ static ZoneUnorderedSet<Node*>* FindUnnestedLoopFromHeader(Node* loop_header,
+ Zone* zone,
+ size_t max_size);
};
// Copies a range of nodes any number of times.
@@ -205,9 +216,34 @@ class NodeCopier {
// Helper version of {Insert} for one copy.
void Insert(Node* original, Node* copy);
- void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ template <typename InputIterator>
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead,
+ base::iterator_range<InputIterator> nodes,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins);
+ NodeOriginTable* node_origins) {
+ // Copy all the nodes first.
+ for (Node* original : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(original));
+ NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", original);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = graph->CloneNode(original);
+ copies_->push_back(copy);
+ }
+ }
+
+ // Fix inputs of the copies.
+ for (Node* original : nodes) {
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = map(original, copy_index);
+ for (int i = 0; i < copy->InputCount(); i++) {
+ copy->ReplaceInput(i, map(original->InputAt(i), copy_index));
+ }
+ }
+ }
+ }
bool Marked(Node* node) { return node_map_.Get(node) > 0; }
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index e666f8c6428..cfc149f6399 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -236,9 +236,7 @@ void LoopPeeler::PeelInnerLoops(LoopTree::Loop* loop) {
Peel(loop);
}
-namespace {
-
-void EliminateLoopExit(Node* node) {
+void LoopPeeler::EliminateLoopExit(Node* node) {
DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
// The exit markers take the loop exit as input. We iterate over uses
// and remove all the markers from the graph.
@@ -260,8 +258,6 @@ void EliminateLoopExit(Node* node) {
node->Kill();
}
-} // namespace
-
void LoopPeeler::PeelInnerLoopsOfTree() {
for (LoopTree::Loop* loop : loop_tree_->outer_loops()) {
PeelInnerLoops(loop);
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index af7b5f6ce00..2ee1544d8eb 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE LoopPeeler {
void PeelInnerLoopsOfTree();
static void EliminateLoopExits(Graph* graph, Zone* tmp_zone);
+ static void EliminateLoopExit(Node* loop);
static const size_t kMaxPeeledNodes = 1000;
private:
diff --git a/deps/v8/src/compiler/loop-unrolling.cc b/deps/v8/src/compiler/loop-unrolling.cc
new file mode 100644
index 00000000000..973bb7af193
--- /dev/null
+++ b/deps/v8/src/compiler/loop-unrolling.cc
@@ -0,0 +1,220 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/loop-unrolling.h"
+
+#include "src/base/small-vector.h"
+#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/loop-peeling.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
+ Graph* graph, CommonOperatorBuilder* common, Zone* tmp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins) {
+ DCHECK_EQ(loop_node->opcode(), IrOpcode::kLoop);
+
+ if (loop == nullptr) return;
+ // No back-jump to the loop header means this is not really a loop.
+ if (loop_node->InputCount() < 2) return;
+
+ uint32_t unrolling_count =
+ unrolling_count_heuristic(static_cast<uint32_t>(loop->size()), depth);
+ if (unrolling_count == 0) return;
+
+ uint32_t iteration_count = unrolling_count + 1;
+
+ uint32_t copied_size = static_cast<uint32_t>(loop->size()) * iteration_count;
+
+ NodeVector copies(tmp_zone);
+
+ NodeCopier copier(graph, copied_size, &copies, unrolling_count);
+ {
+ copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
+ base::make_iterator_range(loop->begin(), loop->end()),
+ source_positions, node_origins);
+ }
+
+#define COPY(node, n) copier.map(node, n)
+#define FOREACH_COPY_INDEX(i) for (uint32_t i = 0; i < unrolling_count; i++)
+
+ for (Node* node : loop_node->uses()) {
+ switch (node->opcode()) {
+ case IrOpcode::kBranch: {
+ /*** Step 1: Remove stack checks from all but the first iteration of the
+ loop. ***/
+ Node* stack_check = node->InputAt(0);
+ if (stack_check->opcode() != IrOpcode::kStackPointerGreaterThan) {
+ break;
+ }
+ FOREACH_COPY_INDEX(i) {
+ COPY(node, i)->ReplaceInput(0,
+ graph->NewNode(common->Int32Constant(1)));
+ }
+ for (Node* use : stack_check->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ // We now need to remove stack check and the related function call
+ // from the effect chain.
+ // The effect chain looks like this (* stand for irrelevant nodes):
+ //
+ // {replacing_effect} (effect before stack check)
+ // * * | *
+ // | | | |
+ // ( LoadFromObject )
+ // | |
+ // {stack_check}
+ // | * | *
+ // | | | |
+ // | ( Call )
+ // | | *
+ // | | |
+ // {use}: EffectPhi (stack check effect that we need to replace)
+ DCHECK_EQ(use->opcode(), IrOpcode::kEffectPhi);
+ DCHECK_EQ(NodeProperties::GetEffectInput(use, 1)->opcode(),
+ IrOpcode::kCall);
+ DCHECK_EQ(NodeProperties::GetEffectInput(use), stack_check);
+ DCHECK_EQ(NodeProperties::GetEffectInput(
+ NodeProperties::GetEffectInput(use, 1)),
+ stack_check);
+ DCHECK_EQ(NodeProperties::GetEffectInput(stack_check)->opcode(),
+ IrOpcode::kLoadFromObject);
+ Node* replacing_effect = NodeProperties::GetEffectInput(
+ NodeProperties::GetEffectInput(stack_check));
+ FOREACH_COPY_INDEX(i) {
+ COPY(use, i)->ReplaceUses(COPY(replacing_effect, i));
+ }
+ }
+ }
+ break;
+ }
+
+ case IrOpcode::kLoopExit: {
+ /*** Step 2: Create merges for loop exits. ***/
+ if (node->InputAt(1) == loop_node) {
+ // Create a merge node from all iteration exits.
+ Node** merge_inputs = tmp_zone->NewArray<Node*>(iteration_count);
+ merge_inputs[0] = node;
+ for (uint32_t i = 1; i < iteration_count; i++) {
+ merge_inputs[i] = COPY(node, i - 1);
+ }
+ Node* merge_node = graph->NewNode(common->Merge(iteration_count),
+ iteration_count, merge_inputs);
+ // Replace all uses of the loop exit with the merge node.
+ for (Edge use_edge : node->use_edges()) {
+ Node* use = use_edge.from();
+ if (loop->count(use) == 1) {
+ // Uses within the loop will be LoopExitEffects and
+ // LoopExitValues. We need to create a phi from all loop
+ // iterations. Its merge will be the merge node for LoopExits.
+ const Operator* phi_operator;
+ if (use->opcode() == IrOpcode::kLoopExitEffect) {
+ phi_operator = common->EffectPhi(iteration_count);
+ } else {
+ DCHECK(use->opcode() == IrOpcode::kLoopExitValue);
+ phi_operator = common->Phi(
+ LoopExitValueRepresentationOf(use->op()), iteration_count);
+ }
+ Node** phi_inputs =
+ tmp_zone->NewArray<Node*>(iteration_count + 1);
+ phi_inputs[0] = use;
+ for (uint32_t i = 1; i < iteration_count; i++) {
+ phi_inputs[i] = COPY(use, i - 1);
+ }
+ phi_inputs[iteration_count] = merge_node;
+ Node* phi =
+ graph->NewNode(phi_operator, iteration_count + 1, phi_inputs);
+ use->ReplaceUses(phi);
+ // Repair phi which we just broke.
+ phi->ReplaceInput(0, use);
+ } else if (use != merge_node) {
+ // For uses outside the loop, simply redirect them to the merge.
+ use->ReplaceInput(use_edge.index(), merge_node);
+ }
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /*** Step 3: Rewire the iterations of the loop. Each iteration should flow
+ into the next one, and the last should flow into the first. ***/
+
+ // 3a) Rewire control.
+
+ // We start at index=1 assuming that index=0 is the (non-recursive) loop
+ // entry.
+ for (int input_index = 1; input_index < loop_node->InputCount();
+ input_index++) {
+ Node* last_iteration_input =
+ COPY(loop_node, unrolling_count - 1)->InputAt(input_index);
+ for (uint32_t copy_index = unrolling_count - 1; copy_index > 0;
+ copy_index--) {
+ COPY(loop_node, copy_index)
+ ->ReplaceInput(input_index,
+ COPY(loop_node, copy_index - 1)->InputAt(input_index));
+ }
+ COPY(loop_node, 0)
+ ->ReplaceInput(input_index, loop_node->InputAt(input_index));
+ loop_node->ReplaceInput(input_index, last_iteration_input);
+ }
+ // The loop of each following iteration will become a merge. We need to remove
+ // its non-recursive input.
+ FOREACH_COPY_INDEX(i) {
+ COPY(loop_node, i)->RemoveInput(0);
+ NodeProperties::ChangeOp(COPY(loop_node, i),
+ common->Merge(loop_node->InputCount() - 1));
+ }
+
+ // 3b) Rewire phis and loop exits.
+ for (Node* use : loop_node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ int count = use->opcode() == IrOpcode::kPhi
+ ? use->op()->ValueInputCount()
+ : use->op()->EffectInputCount();
+ // Phis depending on the loop header should take their input from the
+ // previous iteration instead.
+ for (int input_index = 1; input_index < count; input_index++) {
+ Node* last_iteration_input =
+ COPY(use, unrolling_count - 1)->InputAt(input_index);
+ for (uint32_t copy_index = unrolling_count - 1; copy_index > 0;
+ copy_index--) {
+ COPY(use, copy_index)
+ ->ReplaceInput(input_index,
+ COPY(use, copy_index - 1)->InputAt(input_index));
+ }
+ COPY(use, 0)->ReplaceInput(input_index, use->InputAt(input_index));
+ use->ReplaceInput(input_index, last_iteration_input);
+ }
+
+ // Phis in each following iteration should not depend on the
+ // (non-recursive) entry to the loop. Remove their first input.
+ FOREACH_COPY_INDEX(i) {
+ COPY(use, i)->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ COPY(use, i), common->ResizeMergeOrPhi(use->op(), count - 1));
+ }
+ }
+
+ // Loop exits should point to the loop header.
+ if (use->opcode() == IrOpcode::kLoopExit) {
+ FOREACH_COPY_INDEX(i) { COPY(use, i)->ReplaceInput(1, loop_node); }
+ }
+ }
+}
+
+#undef COPY
+#undef FOREACH_COPY_INDEX
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/loop-unrolling.h b/deps/v8/src/compiler/loop-unrolling.h
new file mode 100644
index 00000000000..5aec6350f85
--- /dev/null
+++ b/deps/v8/src/compiler/loop-unrolling.h
@@ -0,0 +1,44 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_UNROLLING_H_
+#define V8_COMPILER_LOOP_UNROLLING_H_
+
+// Loop unrolling is an optimization that copies the body of a loop and creates
+// a fresh loop, whose iteration corresponds to 2 or more iterations of the
+// initial loop. For a high-level description of the algorithm see
+// docs.google.com/document/d/1AsUCqslMUB6fLdnGq0ZoPk2kn50jIJAWAL77lKXXP5g/
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/loop-analysis.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static constexpr uint32_t kMaximumUnnestedSize = 50;
+static constexpr uint32_t kMaximumUnrollingCount = 7;
+
+// A simple heuristic to decide how many times to unroll a loop. Favors small
+// and deeply nested loops.
+// TODO(manoskouk): Investigate how this can be improved.
+V8_INLINE uint32_t unrolling_count_heuristic(uint32_t size, uint32_t depth) {
+ return std::min((depth + 1) * kMaximumUnnestedSize / size,
+ kMaximumUnrollingCount);
+}
+
+V8_INLINE uint32_t maximum_unrollable_size(uint32_t depth) {
+ return (depth + 1) * kMaximumUnnestedSize;
+}
+
+void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
+ Graph* graph, CommonOperatorBuilder* common, Zone* tmp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_LOOP_UNROLLING_H_
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 16ff3ff9369..cabf23f4d55 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -122,6 +122,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
@@ -270,6 +271,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kRoundFloat64ToInt32:
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
+ case IrOpcode::kWord32Popcnt:
+ case IrOpcode::kI8x16BitMask:
MACHINE_UNOP_32_LIST(LABEL)
MACHINE_BINOP_32_LIST(LABEL) {
representation_vector_[node->id()] =
@@ -283,6 +286,9 @@ class MachineRepresentationInferrer {
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kChangeFloat64ToInt64:
case IrOpcode::kChangeFloat64ToUint64:
+ case IrOpcode::kWord64Popcnt:
+ case IrOpcode::kWord64Ctz:
+ case IrOpcode::kWord64Clz:
MACHINE_BINOP_64_LIST(LABEL) {
representation_vector_[node->id()] =
MachineRepresentation::kWord64;
@@ -318,6 +324,8 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kI32x4ReplaceLane:
case IrOpcode::kI32x4Splat:
+ case IrOpcode::kI8x16Splat:
+ case IrOpcode::kI8x16Eq:
representation_vector_[node->id()] =
MachineRepresentation::kSimd128;
break;
@@ -376,6 +384,9 @@ class MachineRepresentationChecker {
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kWord64Ctz:
+ case IrOpcode::kWord64Clz:
+ case IrOpcode::kWord64Popcnt:
CheckValueInputForInt64Op(node, 0);
break;
case IrOpcode::kBitcastWordToTagged:
@@ -437,6 +448,7 @@ class MachineRepresentationChecker {
case IrOpcode::kI32x4ExtractLane:
case IrOpcode::kI16x8ExtractLaneU:
case IrOpcode::kI16x8ExtractLaneS:
+ case IrOpcode::kI8x16BitMask:
case IrOpcode::kI8x16ExtractLaneU:
case IrOpcode::kI8x16ExtractLaneS:
CheckValueInputRepresentationIs(node, 0,
@@ -448,8 +460,16 @@ class MachineRepresentationChecker {
CheckValueInputForInt32Op(node, 1);
break;
case IrOpcode::kI32x4Splat:
+ case IrOpcode::kI8x16Splat:
CheckValueInputForInt32Op(node, 0);
break;
+ case IrOpcode::kI8x16Eq:
+ CheckValueInputRepresentationIs(node, 0,
+ MachineRepresentation::kSimd128);
+ CheckValueInputRepresentationIs(node, 1,
+ MachineRepresentation::kSimd128);
+ break;
+
#define LABEL(opcode) case IrOpcode::k##opcode:
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
@@ -461,6 +481,7 @@ class MachineRepresentationChecker {
case IrOpcode::kBitcastWord32ToWord64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
+ case IrOpcode::kWord32Popcnt:
MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
break;
case IrOpcode::kWord32Equal:
@@ -540,6 +561,8 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
+ case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
@@ -559,6 +582,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord32);
V8_FALLTHROUGH;
case IrOpcode::kStore:
+ case IrOpcode::kUnalignedStore:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicExchange:
case IrOpcode::kWord32AtomicAdd:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 2220cdb82fc..5d61dfac6ab 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -285,6 +285,22 @@ Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) {
return reduction.Changed() ? reduction.replacement() : node;
}
+namespace {
+bool ObjectsMayAlias(Node* a, Node* b) {
+ if (a != b) {
+ if (NodeProperties::IsFreshObject(b)) std::swap(a, b);
+ if (NodeProperties::IsFreshObject(a) &&
+ (NodeProperties::IsFreshObject(b) ||
+ b->opcode() == IrOpcode::kParameter ||
+ b->opcode() == IrOpcode::kLoadImmutable ||
+ IrOpcode::IsConstantOpcode(b->opcode()))) {
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
+
// Perform constant folding and strength reduction on machine operators.
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
@@ -340,6 +356,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
+ // This is a workaround for not having escape analysis for wasm
+ // (machine-level) turbofan graphs.
+ if (!ObjectsMayAlias(m.left().node(), m.right().node())) {
+ return ReplaceBool(false);
+ }
break;
}
case IrOpcode::kInt32Add:
@@ -1930,6 +1951,10 @@ Reduction MachineOperatorReducer::ReduceWordNXor(Node* node) {
Reduction MachineOperatorReducer::ReduceWord32Xor(Node* node) {
DCHECK_EQ(IrOpcode::kWord32Xor, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().IsWord32Equal() && m.left().Is(1)) {
+ return Replace(Word32Equal(m.right().node(), Int32Constant(0)));
+ }
return ReduceWordNXor<Word32Adapter>(node);
}
@@ -1967,6 +1992,11 @@ Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
return Changed(node);
}
}
+ // This is a workaround for not having escape analysis for wasm
+ // (machine-level) turbofan graphs.
+ if (!ObjectsMayAlias(m.left().node(), m.right().node())) {
+ return ReplaceBool(false);
+ }
return NoChange();
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index f90f9345a36..1a897a32032 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -125,7 +125,8 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() ||
- IrOpcode::kUnalignedLoad == op->opcode());
+ IrOpcode::kUnalignedLoad == op->opcode() ||
+ IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -405,7 +406,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(F32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(F32x4Div, Operator::kNoProperties, 2, 0, 1) \
@@ -447,7 +447,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I64x2ExtMulHighI32x4S, Operator::kCommutative, 2, 0, 1) \
V(I64x2ExtMulLowI32x4U, Operator::kCommutative, 2, 0, 1) \
V(I64x2ExtMulHighI32x4U, Operator::kCommutative, 2, 0, 1) \
- V(I64x2SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -456,7 +455,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I32x4Shl, Operator::kNoProperties, 2, 0, 1) \
V(I32x4ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(I32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(I32x4MinS, Operator::kCommutative, 2, 0, 1) \
@@ -480,7 +478,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I32x4ExtMulHighI16x8S, Operator::kCommutative, 2, 0, 1) \
V(I32x4ExtMulLowI16x8U, Operator::kCommutative, 2, 0, 1) \
V(I32x4ExtMulHighI16x8U, Operator::kCommutative, 2, 0, 1) \
- V(I32x4SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4ExtAddPairwiseI16x8S, Operator::kNoProperties, 1, 0, 1) \
V(I32x4ExtAddPairwiseI16x8U, Operator::kNoProperties, 1, 0, 1) \
V(I32x4TruncSatF64x2SZero, Operator::kNoProperties, 1, 0, 1) \
@@ -494,7 +491,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
V(I16x8AddSatS, Operator::kCommutative, 2, 0, 1) \
- V(I16x8AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
V(I16x8SubSatS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
@@ -522,7 +518,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I16x8ExtMulHighI8x16S, Operator::kCommutative, 2, 0, 1) \
V(I16x8ExtMulLowI8x16U, Operator::kCommutative, 2, 0, 1) \
V(I16x8ExtMulHighI8x16U, Operator::kCommutative, 2, 0, 1) \
- V(I16x8SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I16x8ExtAddPairwiseI8x16S, Operator::kNoProperties, 1, 0, 1) \
V(I16x8ExtAddPairwiseI8x16U, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
@@ -534,7 +529,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I8x16AddSatS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Sub, Operator::kNoProperties, 2, 0, 1) \
V(I8x16SubSatS, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16Mul, Operator::kCommutative, 2, 0, 1) \
V(I8x16MinS, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Eq, Operator::kCommutative, 2, 0, 1) \
@@ -553,9 +547,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I8x16Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Abs, Operator::kNoProperties, 1, 0, 1) \
V(I8x16BitMask, Operator::kNoProperties, 1, 0, 1) \
- V(I8x16SignSelect, Operator::kNoProperties, 3, 0, 1) \
- V(S128Load, Operator::kNoProperties, 2, 0, 1) \
- V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
V(S128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -564,10 +555,10 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
V(V128AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
// The format is:
@@ -591,7 +582,9 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
+ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Float64Select, Operator::kNoProperties, 3, 0, 1)
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
@@ -812,21 +805,6 @@ struct MachineOperatorGlobalCache {
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
- struct PrefetchTemporalOperator final : public Operator {
- PrefetchTemporalOperator()
- : Operator(IrOpcode::kPrefetchTemporal,
- Operator::kNoDeopt | Operator::kNoThrow, "PrefetchTemporal",
- 2, 1, 1, 0, 1, 0) {}
- };
- PrefetchTemporalOperator kPrefetchTemporal;
- struct PrefetchNonTemporalOperator final : public Operator {
- PrefetchNonTemporalOperator()
- : Operator(IrOpcode::kPrefetchNonTemporal,
- Operator::kNoDeopt | Operator::kNoThrow,
- "PrefetchNonTemporal", 2, 1, 1, 0, 1, 0) {}
- };
- PrefetchNonTemporalOperator kPrefetchNonTemporal;
-
#define OVERFLOW_OP(Name, properties) \
struct Name##Operator final : public Operator { \
Name##Operator() \
@@ -867,10 +845,18 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
+ struct LoadImmutable##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ LoadImmutable##Type##Operator() \
+ : Operator1<LoadRepresentation>(IrOpcode::kLoadImmutable, \
+ Operator::kPure, "LoadImmutable", 2, \
+ 0, 0, 1, 0, 0, MachineType::Type()) {} \
+ }; \
Load##Type##Operator kLoad##Type; \
PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- ProtectedLoad##Type##Operator kProtectedLoad##Type;
+ ProtectedLoad##Type##Operator kProtectedLoad##Type; \
+ LoadImmutable##Type##Operator kLoadImmutable##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1340,14 +1326,6 @@ const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-const Operator* MachineOperatorBuilder::PrefetchTemporal() {
- return &cache_.kPrefetchTemporal;
-}
-
-const Operator* MachineOperatorBuilder::PrefetchNonTemporal() {
- return &cache_.kPrefetchNonTemporal;
-}
-
#define OVERFLOW_OP(Name, properties) \
const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
OVERFLOW_OP_LIST(OVERFLOW_OP)
@@ -1363,6 +1341,22 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
UNREACHABLE();
}
+// Represents a load from a position in memory that is known to be immutable,
+// e.g. an immutable IsolateRoot or an immutable field of a WasmInstanceObject.
+// Because the returned value cannot change through the execution of a function,
+// LoadImmutable is a pure operator and does not have effect or control edges.
+// Requires that the memory in question has been initialized at function start
+// even through inlining.
+const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoadImmutable##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index c7985808451..87a2eb891f1 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -182,6 +182,7 @@ class S128ImmediateParameter {
explicit S128ImmediateParameter(const uint8_t immediate[16]) {
std::copy(immediate, immediate + 16, immediate_.begin());
}
+ S128ImmediateParameter() = default;
const std::array<uint8_t, 16>& immediate() const { return immediate_; }
const uint8_t* data() const { return immediate_.data(); }
uint8_t operator[](int x) const { return immediate_[x]; }
@@ -251,6 +252,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64Popcnt = 1u << 15,
kWord32ReverseBits = 1u << 16,
kWord64ReverseBits = 1u << 17,
+ kFloat32Select = 1u << 18,
+ kFloat64Select = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
kWord32Rol = 1u << 22,
@@ -262,7 +265,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
kWord32ReverseBits | kWord64ReverseBits | kInt32AbsWithOverflow |
- kInt64AbsWithOverflow | kWord32Rol | kWord64Rol | kSatConversionIsSafe
+ kInt64AbsWithOverflow | kWord32Rol | kWord64Rol | kSatConversionIsSafe |
+ kFloat32Select | kFloat64Select
};
using Flags = base::Flags<Flag, unsigned>;
@@ -558,6 +562,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Float32RoundTiesEven();
const OptionalOperator Float64RoundTiesEven();
+ // Floating point conditional selects.
+ const OptionalOperator Float32Select();
+ const OptionalOperator Float64Select();
+
// Floating point neg.
const Operator* Float32Neg();
const Operator* Float64Neg();
@@ -641,7 +649,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
- const Operator* F32x4AddHoriz();
const Operator* F32x4Sub();
const Operator* F32x4Mul();
const Operator* F32x4Div();
@@ -687,7 +694,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I64x2ExtMulHighI32x4S();
const Operator* I64x2ExtMulLowI32x4U();
const Operator* I64x2ExtMulHighI32x4U();
- const Operator* I64x2SignSelect();
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
@@ -699,7 +705,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4Shl();
const Operator* I32x4ShrS();
const Operator* I32x4Add();
- const Operator* I32x4AddHoriz();
const Operator* I32x4Sub();
const Operator* I32x4Mul();
const Operator* I32x4MinS();
@@ -724,7 +729,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4ExtMulHighI16x8S();
const Operator* I32x4ExtMulLowI16x8U();
const Operator* I32x4ExtMulHighI16x8U();
- const Operator* I32x4SignSelect();
const Operator* I32x4ExtAddPairwiseI16x8S();
const Operator* I32x4ExtAddPairwiseI16x8U();
const Operator* I32x4TruncSatF64x2SZero();
@@ -742,7 +746,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8SConvertI32x4();
const Operator* I16x8Add();
const Operator* I16x8AddSatS();
- const Operator* I16x8AddHoriz();
const Operator* I16x8Sub();
const Operator* I16x8SubSatS();
const Operator* I16x8Mul();
@@ -771,7 +774,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8ExtMulHighI8x16S();
const Operator* I16x8ExtMulLowI8x16U();
const Operator* I16x8ExtMulHighI8x16U();
- const Operator* I16x8SignSelect();
const Operator* I16x8ExtAddPairwiseI8x16S();
const Operator* I16x8ExtAddPairwiseI8x16U();
@@ -787,7 +789,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16AddSatS();
const Operator* I8x16Sub();
const Operator* I8x16SubSatS();
- const Operator* I8x16Mul();
const Operator* I8x16MinS();
const Operator* I8x16MaxS();
const Operator* I8x16Eq();
@@ -807,10 +808,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Popcnt();
const Operator* I8x16Abs();
const Operator* I8x16BitMask();
- const Operator* I8x16SignSelect();
- const Operator* S128Load();
- const Operator* S128Store();
const Operator* S128Const(const uint8_t value[16]);
const Operator* S128Zero();
@@ -825,22 +823,20 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
const Operator* V128AnyTrue();
- const Operator* V64x2AllTrue();
- const Operator* V32x4AllTrue();
- const Operator* V16x8AllTrue();
- const Operator* V8x16AllTrue();
+ const Operator* I64x2AllTrue();
+ const Operator* I32x4AllTrue();
+ const Operator* I16x8AllTrue();
+ const Operator* I8x16AllTrue();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
+ const Operator* LoadImmutable(LoadRepresentation rep);
const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
LoadTransformation transform);
- const Operator* PrefetchTemporal();
- const Operator* PrefetchNonTemporal();
-
// SIMD load: replace a specified lane with [base + index].
const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
uint8_t laneidx);
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 21a0169f2e6..b8a75ee9502 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -292,7 +292,13 @@ Reduction MemoryLowering::ReduceAllocateRaw(
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ MachineRepresentation rep = access.machine_type.representation();
+ const Operator* load_op = ElementSizeInBytes(rep) > kTaggedSize &&
+ !machine()->UnalignedLoadSupported(
+ access.machine_type.representation())
+ ? machine()->UnalignedLoad(access.machine_type)
+ : machine()->Load(access.machine_type);
+ NodeProperties::ChangeOp(node, load_op);
return Changed(node);
}
@@ -387,9 +393,13 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node,
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ MachineRepresentation rep = access.machine_type.representation();
+ StoreRepresentation store_rep(rep, write_barrier_kind);
+ const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
+ !machine()->UnalignedStoreSupported(rep)
+ ? machine()->UnalignedStore(rep)
+ : machine()->Store(store_rep);
+ NodeProperties::ChangeOp(node, store_op);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 0208b3ec5fe..1f36f25c298 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -32,9 +32,13 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadLane:
+ case IrOpcode::kLoadTransform:
+ case IrOpcode::kMemoryBarrier:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
@@ -47,6 +51,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index a1e254d333e..e5554d3cba4 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -13,6 +13,7 @@
#include "src/codegen/external-reference.h"
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/numbers/double.h"
@@ -169,6 +170,8 @@ using Int32Matcher = IntMatcher<int32_t, IrOpcode::kInt32Constant>;
using Uint32Matcher = IntMatcher<uint32_t, IrOpcode::kInt32Constant>;
using Int64Matcher = IntMatcher<int64_t, IrOpcode::kInt64Constant>;
using Uint64Matcher = IntMatcher<uint64_t, IrOpcode::kInt64Constant>;
+using V128ConstMatcher =
+ ValueMatcher<S128ImmediateParameter, IrOpcode::kS128Const>;
#if V8_HOST_ARCH_32_BIT
using IntPtrMatcher = Int32Matcher;
using UintPtrMatcher = Uint32Matcher;
@@ -732,6 +735,7 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* from = use.from();
switch (from->opcode()) {
case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 3219c216c6f..206b32e0d7a 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -581,6 +581,36 @@ bool NodeProperties::AllValueInputsAreTyped(Node* node) {
return true;
}
+// static
+bool NodeProperties::IsFreshObject(Node* node) {
+ if (node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kAllocateRaw)
+ return true;
+#if V8_ENABLE_WEBASSEMBLY
+ if (node->opcode() == IrOpcode::kCall) {
+ // TODO(manoskouk): Currently, some wasm builtins are called with in
+ // CallDescriptor::kCallWasmFunction mode. Make sure this is synced if the
+ // calling mechanism is refactored.
+ if (CallDescriptorOf(node->op())->kind() !=
+ CallDescriptor::kCallBuiltinPointer) {
+ return false;
+ }
+ NumberMatcher matcher(node->InputAt(0));
+ if (matcher.HasResolvedValue()) {
+ Builtins::Name callee =
+ static_cast<Builtins::Name>(matcher.ResolvedValue());
+ // Note: Make sure to only add builtins which are guaranteed to return a
+ // fresh object. E.g. kWasmAllocateFixedArray may return the canonical
+ // empty array, and kWasmAllocateRtt may return a cached rtt.
+ return callee == Builtins::kWasmAllocateArrayWithRtt ||
+ callee == Builtins::kWasmAllocateStructWithRtt ||
+ callee == Builtins::kWasmAllocateObjectWrapper ||
+ callee == Builtins::kWasmAllocatePair;
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
+}
// static
bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index d08d328beec..bec18b1e59b 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -136,6 +136,10 @@ class V8_EXPORT_PRIVATE NodeProperties {
}
}
+ // Determines if {node} has an allocating opcode, or is a builtin known to
+ // return a fresh object.
+ static bool IsFreshObject(Node* node);
+
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 912c0966d19..f5ceb7f7e1c 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -314,15 +314,12 @@ void Node::ReplaceUses(Node* that) {
}
bool Node::OwnedBy(Node const* owner) const {
- unsigned mask = 0;
for (Use* use = first_use_; use; use = use->next) {
- if (use->from() == owner) {
- mask |= 1;
- } else {
+ if (use->from() != owner) {
return false;
}
}
- return mask == 1;
+ return first_use_ != nullptr;
}
bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 117bea72125..37b45c403f1 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -69,14 +69,14 @@ class V8_EXPORT_PRIVATE Node final {
#endif
Node* InputAt(int index) const {
- CHECK_LE(0, index);
- CHECK_LT(index, InputCount());
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
return *GetInputPtrConst(index);
}
void ReplaceInput(int index, Node* new_to) {
- CHECK_LE(0, index);
- CHECK_LT(index, InputCount());
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
ZoneNodePtr* input_ptr = GetInputPtr(index);
Node* old_to = *input_ptr;
if (old_to != new_to) {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index bd2011ada21..3e5314d8574 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -197,7 +197,7 @@
V(JSCallForwardVarargs) \
V(JSCallWithArrayLike) \
V(JSCallWithSpread) \
- V(JSWasmCall)
+ IF_WASM(V, JSWasmCall)
#define JS_CONSTRUCT_OP_LIST(V) \
V(JSConstructForwardVarargs) \
@@ -681,6 +681,7 @@
V(Comment) \
V(Load) \
V(PoisonedLoad) \
+ V(LoadImmutable) \
V(Store) \
V(StackSlot) \
V(Word32Popcnt) \
@@ -733,6 +734,8 @@
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
+ V(Float32Select) \
+ V(Float64Select) \
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
@@ -797,7 +800,6 @@
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
- V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
@@ -844,7 +846,6 @@
V(I64x2ExtMulHighI32x4S) \
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U) \
- V(I64x2SignSelect) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -855,7 +856,6 @@
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -883,7 +883,6 @@
V(I32x4ExtMulHighI16x8S) \
V(I32x4ExtMulLowI16x8U) \
V(I32x4ExtMulHighI16x8U) \
- V(I32x4SignSelect) \
V(I32x4ExtAddPairwiseI16x8S) \
V(I32x4ExtAddPairwiseI16x8U) \
V(I32x4TruncSatF64x2SZero) \
@@ -900,7 +899,6 @@
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSatS) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSatS) \
V(I16x8Mul) \
@@ -932,7 +930,6 @@
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulLowI8x16U) \
V(I16x8ExtMulHighI8x16U) \
- V(I16x8SignSelect) \
V(I16x8ExtAddPairwiseI8x16S) \
V(I16x8ExtAddPairwiseI8x16U) \
V(I8x16Splat) \
@@ -947,7 +944,6 @@
V(I8x16AddSatS) \
V(I8x16Sub) \
V(I8x16SubSatS) \
- V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16Eq) \
@@ -970,9 +966,6 @@
V(I8x16Popcnt) \
V(I8x16Abs) \
V(I8x16BitMask) \
- V(I8x16SignSelect) \
- V(S128Load) \
- V(S128Store) \
V(S128Zero) \
V(S128Const) \
V(S128Not) \
@@ -984,13 +977,11 @@
V(I8x16Swizzle) \
V(I8x16Shuffle) \
V(V128AnyTrue) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
- V(V8x16AllTrue) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue) \
V(LoadTransform) \
- V(PrefetchTemporal) \
- V(PrefetchNonTemporal) \
V(LoadLane) \
V(StoreLane)
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 8c72ae3c723..3827d48b6f2 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -224,7 +224,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCall:
case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
+#if V8_ENABLE_WEBASSEMBLY
case IrOpcode::kJSWasmCall:
+#endif // V8_ENABLE_WEBASSEMBLY
// Misc operations
case IrOpcode::kJSAsyncFunctionEnter:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index c4e88db841e..f80f3064cef 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -56,6 +56,7 @@
#include "src/compiler/load-elimination.h"
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
+#include "src/compiler/loop-unrolling.h"
#include "src/compiler/loop-variable-optimizer.h"
#include "src/compiler/machine-graph-verifier.h"
#include "src/compiler/machine-operator-reducer.h"
@@ -79,7 +80,6 @@
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/compiler/zone-stats.h"
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/disassembler.h"
@@ -93,9 +93,13 @@
#include "src/tracing/traced-value.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -183,6 +187,7 @@ class PipelineData {
info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
}
+#if V8_ENABLE_WEBASSEMBLY
// For WebAssembly compile entry point.
PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
OptimizedCompilationInfo* info, MachineGraph* mcgraph,
@@ -213,7 +218,13 @@ class PipelineData {
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- assembler_options_(assembler_options) {}
+ assembler_options_(assembler_options) {
+ simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
+ javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
+ jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
+ simplified_, machine_);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
// For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
@@ -224,7 +235,9 @@ class PipelineData {
const AssemblerOptions& assembler_options,
const ProfileDataFromFile* profile_data)
: isolate_(isolate),
+#if V8_ENABLE_WEBASSEMBLY
wasm_engine_(isolate_->wasm_engine()),
+#endif // V8_ENABLE_WEBASSEMBLY
allocator_(allocator),
info_(info),
debug_name_(info_->GetDebugName()),
@@ -415,8 +428,10 @@ class PipelineData {
}
CodeTracer* GetCodeTracer() const {
- return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
- : wasm_engine_->GetCodeTracer();
+#if V8_ENABLE_WEBASSEMBLY
+ if (wasm_engine_) return wasm_engine_->GetCodeTracer();
+#endif // V8_ENABLE_WEBASSEMBLY
+ return isolate_->GetCodeTracer();
}
Typer* CreateTyper() {
@@ -584,7 +599,9 @@ class PipelineData {
private:
Isolate* const isolate_;
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine* const wasm_engine_ = nullptr;
+#endif // V8_ENABLE_WEBASSEMBLY
AccountingAllocator* const allocator_;
OptimizedCompilationInfo* const info_;
std::unique_ptr<char[]> debug_name_;
@@ -1001,6 +1018,7 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
return pipeline_statistics;
}
+#if V8_ENABLE_WEBASSEMBLY
PipelineStatistics* CreatePipelineStatistics(
wasm::WasmEngine* wasm_engine, wasm::FunctionBody function_body,
const wasm::WasmModule* wasm_module, OptimizedCompilationInfo* info,
@@ -1042,6 +1060,7 @@ PipelineStatistics* CreatePipelineStatistics(
return pipeline_statistics;
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace
@@ -1288,125 +1307,6 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
-class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
- public:
- WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
- CallDescriptor* call_descriptor,
- std::unique_ptr<Zone> zone, Graph* graph,
- CodeKind kind, std::unique_ptr<char[]> debug_name,
- const AssemblerOptions& options,
- SourcePositionTable* source_positions)
- // Note that the OptimizedCompilationInfo is not initialized at the time
- // we pass it to the CompilationJob constructor, but it is not
- // dereferenced there.
- : OptimizedCompilationJob(&info_, "TurboFan",
- CompilationJob::State::kReadyToExecute),
- debug_name_(std::move(debug_name)),
- info_(CStrVector(debug_name_.get()), graph->zone(), kind),
- call_descriptor_(call_descriptor),
- zone_stats_(zone->allocator()),
- zone_(std::move(zone)),
- graph_(graph),
- data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
- nullptr, nullptr, source_positions,
- zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
- pipeline_(&data_),
- wasm_engine_(wasm_engine) {}
-
- WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
- WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
- delete;
-
- protected:
- Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl(RuntimeCallStats* stats,
- LocalIsolate* local_isolate) final;
- Status FinalizeJobImpl(Isolate* isolate) final;
-
- private:
- std::unique_ptr<char[]> debug_name_;
- OptimizedCompilationInfo info_;
- CallDescriptor* call_descriptor_;
- ZoneStats zone_stats_;
- std::unique_ptr<Zone> zone_;
- Graph* graph_;
- PipelineData data_;
- PipelineImpl pipeline_;
- wasm::WasmEngine* wasm_engine_;
-};
-
-// static
-std::unique_ptr<OptimizedCompilationJob>
-Pipeline::NewWasmHeapStubCompilationJob(
- Isolate* isolate, wasm::WasmEngine* wasm_engine,
- CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
- CodeKind kind, std::unique_ptr<char[]> debug_name,
- const AssemblerOptions& options, SourcePositionTable* source_positions) {
- return std::make_unique<WasmHeapStubCompilationJob>(
- isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
- std::move(debug_name), options, source_positions);
-}
-
-CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
- Isolate* isolate) {
- UNREACHABLE();
-}
-
-CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
- RuntimeCallStats* stats, LocalIsolate* local_isolate) {
- std::unique_ptr<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(
- &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
- pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
- }
- if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
- CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
- tracing_scope.stream()
- << "---------------------------------------------------\n"
- << "Begin compiling method " << info_.GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
- if (info_.trace_turbo_graph()) { // Simple textual RPO.
- StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
- << " graph -- " << std::endl
- << AsRPO(*data_.graph());
- }
-
- if (info_.trace_turbo_json()) {
- TurboJsonFile json_of(&info_, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info_.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
- pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
- pipeline_.ComputeScheduledGraph();
- if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
- return CompilationJob::SUCCEEDED;
- }
- return CompilationJob::FAILED;
-}
-
-CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
- Isolate* isolate) {
- Handle<Code> code;
- if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
- V8::FatalProcessOutOfMemory(isolate,
- "WasmHeapStubCompilationJob::FinalizeJobImpl");
- }
- if (pipeline_.CommitDependencies(code)) {
- info_.SetCode(code);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
- code->Disassemble(compilation_info()->GetDebugName().get(),
- tracing_scope.stream(), isolate);
- }
-#endif
- return SUCCEEDED;
- }
- return FAILED;
-}
-
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name(),
@@ -1521,6 +1421,7 @@ struct InliningPhase {
}
};
+#if V8_ENABLE_WEBASSEMBLY
struct WasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
@@ -1544,6 +1445,7 @@ struct WasmInliningPhase {
graph_reducer.ReduceGraph();
}
};
+#endif // V8_ENABLE_WEBASSEMBLY
struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTS(Typer)
@@ -1776,6 +1678,42 @@ struct LoopPeelingPhase {
}
};
+#if V8_ENABLE_WEBASSEMBLY
+struct WasmLoopUnrollingPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopUnrolling)
+
+ void Run(PipelineData* data, Zone* temp_zone,
+ std::vector<compiler::WasmLoopInfo>* loop_infos) {
+ for (WasmLoopInfo& loop_info : *loop_infos) {
+ if (loop_info.is_innermost) {
+ ZoneUnorderedSet<Node*>* loop = LoopFinder::FindUnnestedLoopFromHeader(
+ loop_info.header, temp_zone,
+ // Only discover the loop until its size is the maximum unrolled
+ // size for its depth.
+ maximum_unrollable_size(loop_info.nesting_depth));
+ UnrollLoop(loop_info.header, loop, loop_info.nesting_depth,
+ data->graph(), data->common(), temp_zone,
+ data->source_positions(), data->node_origins());
+ }
+ }
+
+ for (WasmLoopInfo& loop_info : *loop_infos) {
+ std::unordered_set<Node*> loop_exits;
+ // We collect exits into a set first because we are not allowed to mutate
+ // them while iterating uses().
+ for (Node* use : loop_info.header->uses()) {
+ if (use->opcode() == IrOpcode::kLoopExit) {
+ loop_exits.insert(use);
+ }
+ }
+ for (Node* use : loop_exits) {
+ LoopPeeler::EliminateLoopExit(use);
+ }
+ }
+ }
+};
+#endif // V8_ENABLE_WEBASSEMBLY
+
struct LoopExitEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
@@ -2033,6 +1971,19 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct WasmBaseOptimizationPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmBaseOptimization)
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->mcgraph()->Dead(), data->observe_node_manager());
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ AddReducer(data, &graph_reducer, &value_numbering);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct DecompressionOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecompressionOptimization)
@@ -2092,11 +2043,12 @@ struct ScheduledMachineLoweringPhase {
struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
+ allow_signalling_nan);
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -2120,7 +2072,7 @@ struct CsaEarlyOptimizationPhase {
struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
@@ -2128,7 +2080,8 @@ struct CsaOptimizationPhase {
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
+ allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
@@ -2524,6 +2477,128 @@ struct VerifyGraphPhase {
#undef DECL_PIPELINE_PHASE_CONSTANTS
#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
+#if V8_ENABLE_WEBASSEMBLY
+class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+ public:
+ WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
+ CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph,
+ CodeKind kind, std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions)
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(&info_, "TurboFan",
+ CompilationJob::State::kReadyToExecute),
+ debug_name_(std::move(debug_name)),
+ info_(CStrVector(debug_name_.get()), graph->zone(), kind),
+ call_descriptor_(call_descriptor),
+ zone_stats_(zone->allocator()),
+ zone_(std::move(zone)),
+ graph_(graph),
+ data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
+ nullptr, nullptr, source_positions,
+ zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
+ pipeline_(&data_),
+ wasm_engine_(wasm_engine) {}
+
+ WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
+ WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
+ delete;
+
+ protected:
+ Status PrepareJobImpl(Isolate* isolate) final;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
+
+ private:
+ std::unique_ptr<char[]> debug_name_;
+ OptimizedCompilationInfo info_;
+ CallDescriptor* call_descriptor_;
+ ZoneStats zone_stats_;
+ std::unique_ptr<Zone> zone_;
+ Graph* graph_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+ wasm::WasmEngine* wasm_engine_;
+};
+
+// static
+std::unique_ptr<OptimizedCompilationJob>
+Pipeline::NewWasmHeapStubCompilationJob(
+ Isolate* isolate, wasm::WasmEngine* wasm_engine,
+ CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
+ CodeKind kind, std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options, SourcePositionTable* source_positions) {
+ return std::make_unique<WasmHeapStubCompilationJob>(
+ isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
+ std::move(debug_name), options, source_positions);
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ UNREACHABLE();
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
+ pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
+ }
+ if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+ if (info_.trace_turbo_graph()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
+ << " graph -- " << std::endl
+ << AsRPO(*data_.graph());
+ }
+
+ if (info_.trace_turbo_json()) {
+ TurboJsonFile json_of(&info_, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info_.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
+ pipeline_.Run<MemoryOptimizationPhase>();
+ pipeline_.ComputeScheduledGraph();
+ if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
+ return CompilationJob::SUCCEEDED;
+ }
+ return CompilationJob::FAILED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ Handle<Code> code;
+ if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "WasmHeapStubCompilationJob::FinalizeJobImpl");
+ }
+ if (pipeline_.CommitDependencies(code)) {
+ info_.SetCode(code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ code->Disassemble(compilation_info()->GetDebugName().get(),
+ tracing_scope.stream(), isolate);
+ }
+#endif
+ return SUCCEEDED;
+ }
+ return FAILED;
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
Run<PrintGraphPhase>(phase);
@@ -2658,11 +2733,13 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
+#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(FLAG_turbo_inline_js_wasm_calls);
Run<WasmInliningPhase>();
RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
}
+#endif // V8_ENABLE_WEBASSEMBLY
// From now on it is invalid to look at types on the nodes, because the types
// on the nodes might not make sense after representation selection due to the
@@ -2788,11 +2865,6 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
Run<ScheduledMachineLoweringPhase>();
RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
- // The DecompressionOptimizationPhase updates node's operations but does not
- // otherwise rewrite the graph, thus it is safe to run on a scheduled graph.
- Run<DecompressionOptimizationPhase>();
- RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
-
data->source_positions()->RemoveDecorator();
if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
@@ -2922,14 +2994,14 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
- pipeline.Run<CsaEarlyOptimizationPhase>();
+ pipeline.Run<CsaEarlyOptimizationPhase>(true);
pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
- pipeline.Run<CsaOptimizationPhase>();
+ pipeline.Run<CsaOptimizationPhase>(true);
pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
pipeline.Run<DecompressionOptimizationPhase>();
@@ -2996,6 +3068,7 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
return out;
}
+#if V8_ENABLE_WEBASSEMBLY
// static
wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
@@ -3043,6 +3116,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
+
+ pipeline.Run<MemoryOptimizationPhase>();
+ pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
+
pipeline.ComputeScheduledGraph();
Linkage linkage(call_descriptor);
@@ -3095,6 +3172,116 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
// static
+void Pipeline::GenerateCodeForWasmFunction(
+ OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
+ MachineGraph* mcgraph, CallDescriptor* call_descriptor,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ wasm::FunctionBody function_body, const wasm::WasmModule* module,
+ int function_index, std::vector<compiler::WasmLoopInfo>* loop_info) {
+ ZoneStats zone_stats(wasm_engine->allocator());
+ std::unique_ptr<PipelineStatistics> pipeline_statistics(
+ CreatePipelineStatistics(wasm_engine, function_body, module, info,
+ &zone_stats));
+ // {instruction_buffer} must live longer than {PipelineData}, since
+ // {PipelineData} will reference the {instruction_buffer} via the
+ // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
+ std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
+ wasm::WasmInstructionBuffer::New();
+ PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
+ pipeline_statistics.get(), source_positions, node_origins,
+ WasmAssemblerOptions());
+
+ PipelineImpl pipeline(&data);
+
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+
+ pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
+
+ data.BeginPhaseKind("V8.WasmOptimization");
+ if (FLAG_wasm_loop_unrolling) {
+ pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
+ pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
+ }
+ const bool is_asm_js = is_asmjs_module(module);
+
+ if (FLAG_wasm_opt || is_asm_js) {
+ pipeline.Run<CsaEarlyOptimizationPhase>(is_asm_js);
+ pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
+ } else {
+ pipeline.Run<WasmBaseOptimizationPhase>();
+ pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true);
+ }
+
+ pipeline.Run<MemoryOptimizationPhase>();
+ pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
+
+ if (FLAG_turbo_splitting && !is_asm_js) {
+ data.info()->set_splitting();
+ }
+
+ if (data.node_origins()) {
+ data.node_origins()->RemoveDecorator();
+ }
+
+ pipeline.ComputeScheduledGraph();
+
+ Linkage linkage(call_descriptor);
+ if (!pipeline.SelectInstructions(&linkage)) return;
+ pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
+
+ auto result = std::make_unique<wasm::WasmCompilationResult>();
+ CodeGenerator* code_generator = pipeline.code_generator();
+ code_generator->tasm()->GetCode(
+ nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
+ static_cast<int>(code_generator->GetHandlerTableOffset()));
+
+ result->instr_buffer = instruction_buffer->ReleaseBuffer();
+ result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
+ result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
+ result->source_positions = code_generator->GetSourcePositionTable();
+ result->protected_instructions_data =
+ code_generator->GetProtectedInstructionsData();
+ result->result_tier = wasm::ExecutionTier::kTurbofan;
+
+ if (data.info()->trace_turbo_json()) {
+ TurboJsonFile json_of(data.info(), std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
+ << BlockStartsAsJSON{&code_generator->block_starts()}
+ << "\"data\":\"";
+#ifdef ENABLE_DISASSEMBLER
+ std::stringstream disassembler_stream;
+ Disassembler::Decode(
+ nullptr, &disassembler_stream, result->code_desc.buffer,
+ result->code_desc.buffer + result->code_desc.safepoint_table_offset,
+ CodeReference(&result->code_desc));
+ for (auto const c : disassembler_stream.str()) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n]";
+ json_of << "\n}";
+ }
+
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+
+ DCHECK(result->succeeded());
+ info->SetWasmCompilationResult(std::move(result));
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
@@ -3203,135 +3390,6 @@ std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
isolate, shared, function, osr_offset, osr_frame, code_kind);
}
-// static
-void Pipeline::GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
- MachineGraph* mcgraph, CallDescriptor* call_descriptor,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- wasm::FunctionBody function_body, const wasm::WasmModule* module,
- int function_index) {
- ZoneStats zone_stats(wasm_engine->allocator());
- std::unique_ptr<PipelineStatistics> pipeline_statistics(
- CreatePipelineStatistics(wasm_engine, function_body, module, info,
- &zone_stats));
- // {instruction_buffer} must live longer than {PipelineData}, since
- // {PipelineData} will reference the {instruction_buffer} via the
- // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
- std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
- wasm::WasmInstructionBuffer::New();
- PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
- pipeline_statistics.get(), source_positions, node_origins,
- WasmAssemblerOptions());
-
- PipelineImpl pipeline(&data);
-
- if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
- CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
- tracing_scope.stream()
- << "---------------------------------------------------\n"
- << "Begin compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
-
- if (FLAG_wasm_loop_unrolling) {
- pipeline.Run<LoopExitEliminationPhase>();
- pipeline.RunPrintAndVerify("V8.LoopExitEliminationPhase", true);
- }
-
- data.BeginPhaseKind("V8.WasmOptimization");
- const bool is_asm_js = is_asmjs_module(module);
- if (FLAG_turbo_splitting && !is_asm_js) {
- data.info()->set_splitting();
- }
- if (FLAG_wasm_opt || is_asm_js) {
- PipelineRunScope scope(&data, "V8.WasmFullOptimization",
- RuntimeCallCounterId::kOptimizeWasmFullOptimization);
- GraphReducer graph_reducer(
- scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead(), data.observe_node_manager());
- DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
- data.common(), scope.zone());
- ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
- const bool allow_signalling_nan = is_asm_js;
- MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
- allow_signalling_nan);
- CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
- data.broker(), data.common(),
- data.machine(), scope.zone());
- AddReducer(&data, &graph_reducer, &dead_code_elimination);
- AddReducer(&data, &graph_reducer, &machine_reducer);
- AddReducer(&data, &graph_reducer, &common_reducer);
- AddReducer(&data, &graph_reducer, &value_numbering);
- graph_reducer.ReduceGraph();
- } else {
- PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
- RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
- GraphReducer graph_reducer(
- scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead(), data.observe_node_manager());
- ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
- AddReducer(&data, &graph_reducer, &value_numbering);
- graph_reducer.ReduceGraph();
- }
- pipeline.RunPrintAndVerify("V8.WasmOptimization", true);
-
- if (data.node_origins()) {
- data.node_origins()->RemoveDecorator();
- }
-
- pipeline.ComputeScheduledGraph();
-
- Linkage linkage(call_descriptor);
- if (!pipeline.SelectInstructions(&linkage)) return;
- pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
-
- auto result = std::make_unique<wasm::WasmCompilationResult>();
- CodeGenerator* code_generator = pipeline.code_generator();
- code_generator->tasm()->GetCode(
- nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
- static_cast<int>(code_generator->GetHandlerTableOffset()));
-
- result->instr_buffer = instruction_buffer->ReleaseBuffer();
- result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
- result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
- result->source_positions = code_generator->GetSourcePositionTable();
- result->protected_instructions_data =
- code_generator->GetProtectedInstructionsData();
- result->result_tier = wasm::ExecutionTier::kTurbofan;
-
- if (data.info()->trace_turbo_json()) {
- TurboJsonFile json_of(data.info(), std::ios_base::app);
- json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
- << BlockStartsAsJSON{&code_generator->block_starts()}
- << "\"data\":\"";
-#ifdef ENABLE_DISASSEMBLER
- std::stringstream disassembler_stream;
- Disassembler::Decode(
- nullptr, &disassembler_stream, result->code_desc.buffer,
- result->code_desc.buffer + result->code_desc.safepoint_table_offset,
- CodeReference(&result->code_desc));
- for (auto const c : disassembler_stream.str()) {
- json_of << AsEscapedUC16ForJSON(c);
- }
-#endif // ENABLE_DISASSEMBLER
- json_of << "\"}\n]";
- json_of << "\n}";
- }
-
- if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
- CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
- tracing_scope.stream()
- << "---------------------------------------------------\n"
- << "Finished compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- DCHECK(result->succeeded());
- info->SetWasmCompilationResult(std::move(result));
-}
-
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool use_mid_tier_register_allocator,
@@ -3415,8 +3473,10 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
}
// TODO(jgruber): The parameter is called is_stub but actually contains
// something different. Update either the name or its contents.
- const bool is_stub =
- !data->info()->IsOptimizing() && !data->info()->IsWasm();
+ bool is_stub = !data->info()->IsOptimizing();
+#if V8_ENABLE_WEBASSEMBLY
+ if (data->info()->IsWasm()) is_stub = false;
+#endif // V8_ENABLE_WEBASSEMBLY
Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage, is_stub,
data->debug_name(), &temp_zone);
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index db3aab46235..d07dcb5a722 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -41,6 +41,7 @@ class MachineGraph;
class NodeOriginTable;
class Schedule;
class SourcePositionTable;
+struct WasmLoopInfo;
class Pipeline : public AllStatic {
public:
@@ -57,7 +58,7 @@ class Pipeline : public AllStatic {
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
wasm::FunctionBody function_body, const wasm::WasmModule* module,
- int function_index);
+ int function_index, std::vector<compiler::WasmLoopInfo>* loop_infos);
// Run the pipeline on a machine graph and generate code.
static wasm::WasmCompilationResult GenerateCodeForWasmNativeStub(
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index a7eddbe826c..c5a70555a7d 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -148,10 +148,40 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
}
}
-Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
+Node* PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
+ PropertyAccessInfo const& access_info) {
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+ DCHECK(access_info.IsDictionaryProtoDataConstant());
+
+ JSObjectRef holder(broker(), access_info.holder().ToHandleChecked());
+ base::Optional<ObjectRef> value =
+ holder.GetOwnDictionaryProperty(access_info.dictionary_index());
+
+ for (Handle<Map> map : access_info.lookup_start_object_maps()) {
+ // Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo
+ // must have different lookup start map.
+ if (!map->IsJSReceiverMap()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor =
+ Map::GetConstructorFunction(
+ map, broker()->target_native_context().object())
+ .ToHandleChecked();
+ map = handle(constructor->initial_map(), isolate());
+ DCHECK(map->IsJSObjectMap());
+ }
+ dependencies()->DependOnConstantInDictionaryPrototypeChain(
+ MapRef{broker(), map}, NameRef{broker(), access_info.name()},
+ value.value(), PropertyKind::kData);
+ }
+
+ return jsgraph()->Constant(value.value());
+}
+
+Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* lookup_start_object) {
- if (!access_info.IsDataConstant()) return nullptr;
+ if (!access_info.IsFastDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
@@ -177,7 +207,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
}
JSObjectRef holder_ref(broker(), holder);
- base::Optional<ObjectRef> value = holder_ref.GetOwnDataProperty(
+ base::Optional<ObjectRef> value = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index());
if (!value.has_value()) {
return nullptr;
@@ -272,9 +302,10 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
Node* PropertyAccessBuilder::BuildLoadDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* lookup_start_object, Node** effect, Node** control) {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
- if (Node* value = TryBuildLoadConstantDataField(name, access_info,
- lookup_start_object)) {
+ DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant());
+
+ if (Node* value = TryFoldLoadConstantDataField(name, access_info,
+ lookup_start_object)) {
return value;
}
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 237f501dbb8..5f81f87e7c3 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -64,6 +64,10 @@ class PropertyAccessBuilder {
Node* lookup_start_object, Node** effect,
Node** control);
+ // Loads a constant value from a prototype object in dictionary mode and
+ // constant-folds it.
+ Node* FoldLoadDictPrototypeConstant(PropertyAccessInfo const& access_info);
+
// Builds the load for data-field access for minimorphic loads that use
// dynamic map checks. These cannot depend on any information from the maps.
Node* BuildMinimorphicLoadDataField(
@@ -82,9 +86,9 @@ class PropertyAccessBuilder {
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
- Node* TryBuildLoadConstantDataField(NameRef const& name,
- PropertyAccessInfo const& access_info,
- Node* lookup_start_object);
+ Node* TryFoldLoadConstantDataField(NameRef const& name,
+ PropertyAccessInfo const& access_info,
+ Node* lookup_start_object);
// Returns a node with the holder for the property access described by
// {access_info}.
Node* ResolveHolder(PropertyAccessInfo const& access_info,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index df12030c31d..f79776bad99 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -648,7 +648,7 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
// be dropped in ADDITION to the 'pop' number of arguments).
// Additionally, in order to simplify assembly code, PopAndReturn is also
// not allowed in builtins with stub linkage and parameters on stack.
- CHECK_EQ(call_descriptor()->StackParameterCount(), 0);
+ CHECK_EQ(call_descriptor()->ParameterSlotCount(), 0);
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
schedule()->AddReturn(CurrentBlock(), ret);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 907464f57e3..1dff0a7c0c0 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -539,6 +539,14 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
}
+ Node* Word32Popcnt(Node* a) {
+ return AddNode(machine()->Word32Popcnt().op(), a);
+ }
+ Node* Word64Popcnt(Node* a) {
+ return AddNode(machine()->Word64Popcnt().op(), a);
+ }
+ Node* Word32Ctz(Node* a) { return AddNode(machine()->Word32Ctz().op(), a); }
+ Node* Word64Ctz(Node* a) { return AddNode(machine()->Word64Ctz().op(), a); }
Node* StackPointerGreaterThan(Node* value) {
return AddNode(
machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
@@ -694,6 +702,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
return Float64LessThanOrEqual(b, a);
}
+ Node* Float32Select(Node* condition, Node* b, Node* c) {
+ return AddNode(machine()->Float32Select().op(), condition, b, c);
+ }
+ Node* Float64Select(Node* condition, Node* b, Node* c) {
+ return AddNode(machine()->Float64Select().op(), condition, b, c);
+ }
// Conversions.
Node* BitcastTaggedToWord(Node* a) {
@@ -856,6 +870,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// SIMD operations.
+ Node* S128Const(const uint8_t value[16]) {
+ return AddNode(machine()->S128Const(value));
+ }
Node* I64x2Splat(Node* a) { return AddNode(machine()->I64x2Splat(), a); }
Node* I64x2SplatI32Pair(Node* a, Node* b) {
return AddNode(machine()->I64x2SplatI32Pair(), a, b);
@@ -864,6 +881,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* I16x8Splat(Node* a) { return AddNode(machine()->I16x8Splat(), a); }
Node* I8x16Splat(Node* a) { return AddNode(machine()->I8x16Splat(), a); }
+ Node* I8x16BitMask(Node* a) { return AddNode(machine()->I8x16BitMask(), a); }
+
+ Node* I8x16Eq(Node* a, Node* b) {
+ return AddNode(machine()->I8x16Eq(), a, b);
+ }
+
// Stack operations.
Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
Node* LoadParentFramePointer() {
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 3dece629780..4556f772f11 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -104,7 +104,7 @@ BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
return b1;
}
-void BasicBlock::Print() { StdoutStream{} << this; }
+void BasicBlock::Print() { StdoutStream{} << *this << "\n"; }
std::ostream& operator<<(std::ostream& os, const BasicBlock& block) {
os << "B" << block.id();
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 5be9a7d7056..ad6879a02dd 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -2260,16 +2260,12 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
void SerializerForBackgroundCompilation::ProcessApiCall(
Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
- ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
- Builtins::kCallFunctionTemplate_CheckAccess));
- ObjectRef(broker(),
- broker()->isolate()->builtins()->builtin_handle(
- Builtins::kCallFunctionTemplate_CheckCompatibleReceiver));
- ObjectRef(
- broker(),
- broker()->isolate()->builtins()->builtin_handle(
- Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver));
-
+ for (const auto b :
+ {Builtins::kCallFunctionTemplate_CheckAccess,
+ Builtins::kCallFunctionTemplate_CheckCompatibleReceiver,
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver}) {
+ ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(b));
+ }
FunctionTemplateInfoRef target_template_info(
broker(),
broker()->CanonicalPersistentHandle(target->function_data(kAcquireLoad)));
@@ -2670,12 +2666,12 @@ PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
SerializationPolicy::kSerializeIfNeeded);
Handle<JSObject> holder;
- if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) {
+ if (ai_exec.IsFastDataConstant() && ai_exec.holder().ToHandle(&holder)) {
// The property is on the prototype chain.
JSObjectRef holder_ref(broker(), holder);
- holder_ref.GetOwnDataProperty(ai_exec.field_representation(),
- ai_exec.field_index(),
- SerializationPolicy::kSerializeIfNeeded);
+ holder_ref.GetOwnFastDataProperty(ai_exec.field_representation(),
+ ai_exec.field_index(),
+ SerializationPolicy::kSerializeIfNeeded);
}
return ai_exec;
}
@@ -2689,12 +2685,12 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
PropertyAccessInfo ai_exec =
ProcessMapForRegExpTest(MapRef(broker(), regexp_map));
Handle<JSObject> holder;
- if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
+ if (ai_exec.IsFastDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
// The property is on the object itself.
JSObjectRef holder_ref(broker(), regexp);
- holder_ref.GetOwnDataProperty(ai_exec.field_representation(),
- ai_exec.field_index(),
- SerializationPolicy::kSerializeIfNeeded);
+ holder_ref.GetOwnFastDataProperty(
+ ai_exec.field_representation(), ai_exec.field_index(),
+ SerializationPolicy::kSerializeIfNeeded);
}
}
@@ -3016,7 +3012,9 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
// For JSNativeContextSpecialization::InlinePropertySetterCall
// and InlinePropertyGetterCall.
- if (access_info.IsAccessorConstant() && !access_info.constant().is_null()) {
+ if ((access_info.IsFastAccessorConstant() ||
+ access_info.IsDictionaryProtoAccessorConstant()) &&
+ !access_info.constant().is_null()) {
if (access_info.constant()->IsJSFunction()) {
JSFunctionRef function(broker(), access_info.constant());
@@ -3062,8 +3060,10 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
switch (access_mode) {
case AccessMode::kLoad:
- // For PropertyAccessBuilder::TryBuildLoadConstantDataField
- if (access_info.IsDataConstant()) {
+ // For PropertyAccessBuilder::TryBuildLoadConstantDataField and
+ // PropertyAccessBuilder::BuildLoadDictPrototypeConstant
+ if (access_info.IsFastDataConstant() ||
+ access_info.IsDictionaryProtoDataConstant()) {
base::Optional<JSObjectRef> holder;
Handle<JSObject> prototype;
if (access_info.holder().ToHandle(&prototype)) {
@@ -3075,9 +3075,14 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
}
if (holder.has_value()) {
- base::Optional<ObjectRef> constant(holder->GetOwnDataProperty(
- access_info.field_representation(), access_info.field_index(),
- SerializationPolicy::kSerializeIfNeeded));
+ SerializationPolicy policy = SerializationPolicy::kSerializeIfNeeded;
+ base::Optional<ObjectRef> constant =
+ access_info.IsFastDataConstant()
+ ? holder->GetOwnFastDataProperty(
+ access_info.field_representation(),
+ access_info.field_index(), policy)
+ : holder->GetOwnDictionaryProperty(
+ access_info.dictionary_index(), policy);
if (constant.has_value()) {
result_hints->AddConstant(constant->object(), zone(), broker());
}
@@ -3087,7 +3092,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
case AccessMode::kStore:
case AccessMode::kStoreInLiteral:
// For MapInference (StoreField case).
- if (access_info.IsDataField() || access_info.IsDataConstant()) {
+ if (access_info.IsDataField() || access_info.IsFastDataConstant()) {
Handle<Map> transition_map;
if (access_info.transition_map().ToHandle(&transition_map)) {
MapRef map_ref(broker(), transition_map);
@@ -3324,6 +3329,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) {
base::Optional<ObjectRef> element;
if (receiver_ref.IsJSObject()) {
+ receiver_ref.AsJSObject().SerializeElements();
element = receiver_ref.AsJSObject().GetOwnConstantElement(
key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
if (!element.has_value() && receiver_ref.IsJSArray()) {
@@ -3331,7 +3337,6 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
// cow-array we can exploit the fact that any future write to the
// element will replace the whole elements storage.
JSArrayRef array_ref = receiver_ref.AsJSArray();
- array_ref.SerializeElements();
array_ref.GetOwnCowElement(
array_ref.elements().value(), key_ref.AsSmi(),
SerializationPolicy::kSerializeIfNeeded);
@@ -3442,12 +3447,12 @@ void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf(
if (access_info.IsNotFound()) {
ProcessConstantForOrdinaryHasInstance(constructor_heap_object,
walk_prototypes);
- } else if (access_info.IsDataConstant()) {
+ } else if (access_info.IsFastDataConstant()) {
Handle<JSObject> holder;
bool found_on_proto = access_info.holder().ToHandle(&holder);
JSObjectRef holder_ref = found_on_proto ? JSObjectRef(broker(), holder)
: constructor.AsJSObject();
- base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index(),
SerializationPolicy::kSerializeIfNeeded);
CHECK(constant.has_value());
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 445898d882a..7c96393c4ce 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -60,14 +60,15 @@ int GetMaskForShift(Node* node) {
} // anonymous namespace
SimdScalarLowering::SimdScalarLowering(
- MachineGraph* mcgraph, Signature<MachineRepresentation>* signature)
+ MachineGraph* mcgraph, SimplifiedOperatorBuilder* simplified,
+ Signature<MachineRepresentation>* signature)
: mcgraph_(mcgraph),
+ simplified_(simplified),
state_(mcgraph->graph(), 3),
stack_(mcgraph_->zone()),
replacements_(nullptr),
signature_(signature),
- placeholder_(graph()->NewNode(common()->Parameter(-2, "placeholder"),
- graph()->start())),
+ placeholder_(graph()->NewNode(common()->Dead())),
parameter_count_after_lowering_(-1) {
DCHECK_NOT_NULL(graph());
DCHECK_NOT_NULL(graph()->end());
@@ -140,7 +141,6 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
- V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -168,11 +168,11 @@ void SimdScalarLowering::LowerGraph() {
V(S128Not) \
V(S128AndNot) \
V(S128Select) \
- V(V64x2AllTrue) \
- V(V32x4AllTrue) \
- V(V16x8AllTrue) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
V(V128AnyTrue) \
- V(V8x16AllTrue) \
+ V(I8x16AllTrue) \
V(I32x4BitMask) \
V(I32x4ExtMulLowI16x8S) \
V(I32x4ExtMulLowI16x8U) \
@@ -211,7 +211,6 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
- V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
@@ -251,7 +250,6 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSatS) \
- V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSatS) \
V(I16x8Mul) \
@@ -296,7 +294,6 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16AddSatS) \
V(I8x16Sub) \
V(I8x16SubSatS) \
- V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16ShrU) \
@@ -545,12 +542,22 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
}
void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
- MachineRepresentation rep = LoadRepresentationOf(node->op()).representation();
+ MachineRepresentation rep =
+ node->opcode() == IrOpcode::kLoadFromObject
+ ? ObjectAccessOf(node->op()).machine_type.representation()
+ : LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
switch (node->opcode()) {
case IrOpcode::kLoad:
load_op = machine()->Load(MachineTypeFrom(type));
break;
+ case IrOpcode::kLoadImmutable:
+ load_op = machine()->LoadImmutable(MachineTypeFrom(type));
+ break;
+ case IrOpcode::kLoadFromObject:
+ load_op = simplified()->LoadFromObject(
+ ObjectAccess(MachineTypeFrom(type), kNoWriteBarrier));
+ break;
case IrOpcode::kUnalignedLoad:
load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
break;
@@ -736,6 +743,14 @@ void SimdScalarLowering::LowerStoreOp(Node* node) {
MachineTypeFrom(rep_type).representation(), write_barrier_kind));
break;
}
+ case IrOpcode::kStoreToObject: {
+ rep = ObjectAccessOf(node->op()).machine_type.representation();
+ WriteBarrierKind write_barrier_kind =
+ ObjectAccessOf(node->op()).write_barrier_kind;
+ store_op = simplified()->StoreToObject(
+ ObjectAccess(MachineTypeFrom(rep_type), write_barrier_kind));
+ break;
+ }
case IrOpcode::kUnalignedStore: {
rep = UnalignedStoreRepresentationOf(node->op());
store_op =
@@ -1456,8 +1471,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kLoad:
+ case IrOpcode::kLoadFromObject:
case IrOpcode::kUnalignedLoad:
- case IrOpcode::kProtectedLoad: {
+ case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadImmutable: {
LowerLoadOp(node, rep_type);
break;
}
@@ -1466,6 +1483,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kStore:
+ case IrOpcode::kStoreToObject:
case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: {
LowerStoreOp(node);
@@ -1633,14 +1651,6 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
- case IrOpcode::kI32x4AddHoriz: {
- LowerBinaryOp(node, rep_type, machine()->Int32Add(), false);
- break;
- }
- case IrOpcode::kI16x8AddHoriz: {
- LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add(), false);
- break;
- }
case IrOpcode::kI16x8Add:
case IrOpcode::kI8x16Add: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
@@ -1651,8 +1661,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Sub());
break;
}
- case IrOpcode::kI16x8Mul:
- case IrOpcode::kI8x16Mul: {
+ case IrOpcode::kI16x8Mul: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
@@ -1889,10 +1898,6 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerShiftOp(node, rep_type);
break;
}
- case IrOpcode::kF32x4AddHoriz: {
- LowerBinaryOp(node, rep_type, machine()->Float32Add(), false);
- break;
- }
#define F32X4_BINOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
@@ -2264,19 +2269,19 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 1);
break;
}
- case IrOpcode::kV64x2AllTrue: {
+ case IrOpcode::kI64x2AllTrue: {
LowerAllTrueOp(node, SimdType::kInt64x2);
break;
}
- case IrOpcode::kV32x4AllTrue: {
+ case IrOpcode::kI32x4AllTrue: {
LowerAllTrueOp(node, SimdType::kInt32x4);
break;
}
- case IrOpcode::kV16x8AllTrue: {
+ case IrOpcode::kI16x8AllTrue: {
LowerAllTrueOp(node, SimdType::kInt16x8);
break;
}
- case IrOpcode::kV8x16AllTrue: {
+ case IrOpcode::kI8x16AllTrue: {
LowerAllTrueOp(node, SimdType::kInt8x16);
break;
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 8143f308c4e..d67c389d16d 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -11,6 +11,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
+#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -24,6 +25,7 @@ namespace compiler {
class SimdScalarLowering {
public:
SimdScalarLowering(MachineGraph* mcgraph,
+ SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature);
void LowerGraph();
@@ -64,6 +66,7 @@ class SimdScalarLowering {
Graph* graph() const { return mcgraph_->graph(); }
MachineOperatorBuilder* machine() const { return mcgraph_->machine(); }
CommonOperatorBuilder* common() const { return mcgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
void LowerNode(Node* node);
@@ -131,6 +134,7 @@ class SimdScalarLowering {
Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
MachineGraph* const mcgraph_;
+ SimplifiedOperatorBuilder* const simplified_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 73c3e57c216..d5389df6ac8 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -28,7 +28,10 @@
#include "src/numbers/conversions-inl.h"
#include "src/objects/objects.h"
#include "src/utils/address-map.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -1833,6 +1836,7 @@ class RepresentationSelector {
SetOutput<T>(node, MachineRepresentation::kTagged);
}
+#if V8_ENABLE_WEBASSEMBLY
static MachineType MachineTypeForWasmReturnType(wasm::ValueType type) {
switch (type.kind()) {
case wasm::kI32:
@@ -1931,6 +1935,7 @@ class RepresentationSelector {
// The actual lowering of JSWasmCall nodes happens later, in the subsequent
// "wasm-inlining" phase.
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
@@ -3854,9 +3859,11 @@ class RepresentationSelector {
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
case IrOpcode::kJSParseInt:
+#if V8_ENABLE_WEBASSEMBLY
if (node->opcode() == IrOpcode::kJSWasmCall) {
return VisitJSWasmCall<T>(node, lowering);
}
+#endif // V8_ENABLE_WEBASSEMBLY
VisitInputs<T>(node);
// Assume the output is tagged.
return SetOutput<T>(node, MachineRepresentation::kTagged);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index a2103139faf..0f182222364 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -989,6 +989,7 @@ Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type Typer::Visitor::TypeFastApiCall(Node* node) { return Type::Any(); }
+#if V8_ENABLE_WEBASSEMBLY
Type Typer::Visitor::TypeJSWasmCall(Node* node) {
const JSWasmCallParameters& op_params = JSWasmCallParametersOf(node->op());
const wasm::FunctionSig* wasm_signature = op_params.signature();
@@ -997,6 +998,7 @@ Type Typer::Visitor::TypeJSWasmCall(Node* node) {
}
return Type::Any();
}
+#endif // V8_ENABLE_WEBASSEMBLY
Type Typer::Visitor::TypeProjection(Node* node) {
Type const type = Operand(node, 0);
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 2c6f05b44a1..236cff3cc68 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -255,6 +255,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
+#if V8_ENABLE_WEBASSEMBLY
case WASM_ARRAY_TYPE:
case WASM_EXCEPTION_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
@@ -264,6 +265,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case WASM_STRUCT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
@@ -343,7 +345,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
case COVERAGE_INFO_TYPE:
+#if V8_ENABLE_WEBASSEMBLY
case WASM_TYPE_INFO_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index f3cd4789e72..b5b3ab5a981 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -1617,15 +1617,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::ExternalPointer()); // callee
CheckValueInputIs(node, 1, Type::Any()); // receiver
break;
+#if V8_ENABLE_WEBASSEMBLY
case IrOpcode::kJSWasmCall:
CHECK_GE(value_count, 3);
CheckTypeIs(node, Type::Any());
CheckValueInputIs(node, 0, Type::Any()); // callee
break;
+#endif // V8_ENABLE_WEBASSEMBLY
// Machine operators
// -----------------------
case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
@@ -1788,6 +1791,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
+ case IrOpcode::kFloat32Select:
+ case IrOpcode::kFloat64Select:
case IrOpcode::kInt32PairAdd:
case IrOpcode::kInt32PairSub:
case IrOpcode::kInt32PairMul:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f4e99169e48..1b858f5dfcb 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -53,6 +53,7 @@
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
@@ -78,64 +79,32 @@ MachineType assert_size(int expected_size, MachineType type) {
(WasmInstanceObject::k##name##OffsetEnd - \
WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
-#define WASM_INSTANCE_OBJECT_OFFSET(name) \
- wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-
-#define LOAD_INSTANCE_FIELD(name, type) \
- gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
- instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
-
-#define LOAD_FULL_POINTER(base_pointer, byte_offset) \
- gasm_->Load(MachineType::Pointer(), base_pointer, byte_offset)
-
-#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
- gasm_->Load(MachineType::TaggedPointer(), base_pointer, byte_offset)
-
-#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
- gasm_->Load(MachineType::AnyTagged(), base_pointer, byte_offset)
-
-#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
- gasm_->Load(type, array_node, \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
-
-#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
-
-#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
-
-#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
-
-#define STORE_RAW(base, offset, val, rep, barrier) \
- STORE_RAW_NODE_OFFSET(base, Int32Constant(offset), val, rep, barrier)
-
-#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
- gasm_->Store(StoreRepresentation(rep, barrier), base, node_offset, val)
-
-// This can be used to store tagged Smi values only.
-#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
- STORE_RAW(array_node, \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
- MachineRepresentation::kTaggedSigned, kNoWriteBarrier)
-
-// This can be used to store any tagged (Smi and HeapObject) value.
-#define STORE_FIXED_ARRAY_SLOT_ANY(array_node, index, value) \
- STORE_RAW(array_node, \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
- MachineRepresentation::kTagged, kFullWriteBarrier)
-
-void EnsureEnd(MachineGraph* mcgraph) {
- Graph* g = mcgraph->graph();
- if (g->end() == nullptr) {
- g->SetEnd(g->NewNode(mcgraph->common()->End(0)));
- }
-}
-
-void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
- EnsureEnd(mcgraph);
- NodeProperties::MergeControlToEnd(mcgraph->graph(), mcgraph->common(), node);
-}
+#define LOAD_MUTABLE_INSTANCE_FIELD(name, type) \
+ gasm_->LoadFromObject( \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), GetInstance(), \
+ wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
+
+// TODO(11510): Using LoadImmutable for tagged values causes registers to be
+// spilled and added to the safepoint table, resulting in large code size
+// regressions. A possible solution would be to not spill the register at all,
+// but rather reload the value from memory. This will require non-trivial
+// changes in the register allocator and instuction selector.
+#define LOAD_INSTANCE_FIELD(name, type) \
+ (CanBeTaggedOrCompressedPointer((type).representation()) \
+ ? LOAD_MUTABLE_INSTANCE_FIELD(name, type) \
+ : gasm_->LoadImmutable( \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
+ GetInstance(), \
+ wasm::ObjectAccess::ToTagged( \
+ WasmInstanceObject::k##name##Offset)))
+
+#define LOAD_ROOT(root_name, factory_name) \
+ (use_js_isolate_and_params() \
+ ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
+ isolate_->factory()->factory_name())) \
+ : gasm_->LoadImmutable( \
+ MachineType::Pointer(), BuildLoadIsolateRoot(), \
+ IsolateData::root_slot_offset(RootIndex::k##root_name)))
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
@@ -170,9 +139,10 @@ constexpr Builtins::Name WasmRuntimeStubIdToBuiltinName(
}
}
-CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone,
- StubCallMode stub_mode,
- bool needs_frame_state = false) {
+CallDescriptor* GetBuiltinCallDescriptor(
+ Builtins::Name name, Zone* zone, StubCallMode stub_mode,
+ bool needs_frame_state = false,
+ Operator::Properties properties = Operator::kNoProperties) {
CallInterfaceDescriptor interface_descriptor =
Builtins::CallInterfaceDescriptorFor(name);
return Linkage::GetStubCallDescriptor(
@@ -181,17 +151,16 @@ CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone,
interface_descriptor.GetStackParameterCount(), // stack parameter count
needs_frame_state ? CallDescriptor::kNeedsFrameState
: CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
+ properties, // properties
stub_mode); // stub call mode
}
-Node* GetBuiltinPointerTarget(MachineGraph* mcgraph,
- Builtins::Name builtin_id) {
- static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
- return mcgraph->graph()->NewNode(
- mcgraph->common()->NumberConstant(builtin_id));
+ObjectAccess ObjectAccessForGCStores(wasm::ValueType type) {
+ return ObjectAccess(
+ MachineType::TypeForRepresentation(type.machine_representation(),
+ !type.is_packed()),
+ type.is_reference() ? kFullWriteBarrier : kNoWriteBarrier);
}
-
} // namespace
JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
@@ -208,7 +177,7 @@ JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
class WasmGraphAssembler : public GraphAssembler {
public:
WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone)
- : GraphAssembler(mcgraph, zone) {}
+ : GraphAssembler(mcgraph, zone), simplified_(zone) {}
template <typename... Args>
Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) {
@@ -223,21 +192,45 @@ class WasmGraphAssembler : public GraphAssembler {
}
template <typename... Args>
- Node* CallBuiltin(Builtins::Name name, Args*... args) {
- // We would like to use gasm_->Call() to implement this method,
- // but this doesn't work currently when we try to call it from functions
- // which set IfSuccess/IfFailure control paths (e.g. within Throw()).
- // TODO(manoskouk): Maybe clean this up at some point and unite with
- // CallRuntimeStub?
+ Node* CallBuiltin(Builtins::Name name, Operator::Properties properties,
+ Args*... args) {
auto* call_descriptor = GetBuiltinCallDescriptor(
- name, temp_zone(), StubCallMode::kCallBuiltinPointer);
- Node* call_target = GetBuiltinPointerTarget(mcgraph(), name);
- Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, args..., effect(), control());
- InitializeEffectControl(call, control());
- return call;
+ name, temp_zone(), StubCallMode::kCallBuiltinPointer, false,
+ properties);
+ Node* call_target = GetBuiltinPointerTarget(name);
+ return Call(call_descriptor, call_target, args...);
}
+ void EnsureEnd() {
+ if (graph()->end() == nullptr) {
+ graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
+ }
+ }
+
+ void MergeControlToEnd(Node* node) {
+ EnsureEnd();
+ NodeProperties::MergeControlToEnd(graph(), mcgraph()->common(), node);
+ }
+
+ void AssertFalse(Node* condition) {
+#if DEBUG
+ if (FLAG_debug_code) {
+ auto ok = MakeLabel();
+ GotoIfNot(condition, &ok);
+ EnsureEnd();
+ Unreachable();
+ Bind(&ok);
+ }
+#endif
+ }
+
+ Node* GetBuiltinPointerTarget(Builtins::Name builtin_id) {
+ static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
+ return NumberConstant(builtin_id);
+ }
+
+ // Sets {true_node} and {false_node} to their corresponding Branch outputs.
+ // Returns the Branch node. Does not change control().
Node* Branch(Node* cond, Node** true_node, Node** false_node,
BranchHint hint) {
DCHECK_NOT_NULL(cond);
@@ -256,6 +249,36 @@ class WasmGraphAssembler : public GraphAssembler {
// Rule of thumb: if access to a given field in an object is required in
// at least two places, put a helper function here.
+ Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
+ return AddNode(graph()->NewNode(
+ simplified_.LoadFromObject(ObjectAccess(type, kNoWriteBarrier)), base,
+ offset, effect(), control()));
+ }
+
+ Node* LoadFromObject(MachineType type, Node* base, int offset) {
+ return LoadFromObject(type, base, IntPtrConstant(offset));
+ }
+
+ Node* LoadImmutable(LoadRepresentation rep, Node* base, Node* offset) {
+ return AddNode(graph()->NewNode(mcgraph()->machine()->LoadImmutable(rep),
+ base, offset));
+ }
+
+ Node* LoadImmutable(LoadRepresentation rep, Node* base, int offset) {
+ return LoadImmutable(rep, base, IntPtrConstant(offset));
+ }
+
+ Node* StoreToObject(ObjectAccess access, Node* base, Node* offset,
+ Node* value) {
+ return AddNode(graph()->NewNode(simplified_.StoreToObject(access), base,
+ offset, value, effect(), control()));
+ }
+
+ Node* StoreToObject(ObjectAccess access, Node* base, int offset,
+ Node* value) {
+ return StoreToObject(access, base, IntPtrConstant(offset), value);
+ }
+
Node* IsI31(Node* object) {
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)),
@@ -269,124 +292,129 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents.
Node* LoadMap(Node* heap_object) {
- return Load(MachineType::TaggedPointer(), heap_object,
- wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
+ return LoadFromObject(MachineType::TaggedPointer(), heap_object,
+ wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
}
Node* LoadInstanceType(Node* map) {
- return Load(MachineType::Uint16(), map,
- wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset));
+ return LoadFromObject(
+ MachineType::Uint16(), map,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset));
}
Node* LoadWasmTypeInfo(Node* map) {
int offset = Map::kConstructorOrBackPointerOrNativeContextOffset;
- return Load(MachineType::TaggedPointer(), map,
- wasm::ObjectAccess::ToTagged(offset));
+ return LoadFromObject(MachineType::TaggedPointer(), map,
+ wasm::ObjectAccess::ToTagged(offset));
}
Node* LoadSupertypes(Node* wasm_type_info) {
- return Load(MachineType::TaggedPointer(), wasm_type_info,
- wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset));
+ return LoadFromObject(
+ MachineType::TaggedPointer(), wasm_type_info,
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset));
}
// FixedArrays.
Node* LoadFixedArrayLengthAsSmi(Node* fixed_array) {
- return Load(MachineType::TaggedSigned(), fixed_array,
- wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset));
- }
- Node* LoadFixedArrayElement(Node* fixed_array, int index,
- MachineType type = MachineType::AnyTagged()) {
- return Load(type, fixed_array,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index));
+ return LoadFromObject(
+ MachineType::TaggedSigned(), fixed_array,
+ wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset));
}
+
Node* LoadFixedArrayElement(Node* fixed_array, Node* index_intptr,
MachineType type = MachineType::AnyTagged()) {
Node* offset = IntAdd(
IntMul(index_intptr, IntPtrConstant(kTaggedSize)),
IntPtrConstant(wasm::ObjectAccess::ToTagged(FixedArray::kHeaderSize)));
- return Load(type, fixed_array, offset);
+ return LoadFromObject(type, fixed_array, offset);
+ }
+
+ Node* LoadFixedArrayElement(Node* array, int index, MachineType type) {
+ return LoadFromObject(
+ type, array,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index));
+ }
+
+ Node* LoadFixedArrayElementSmi(Node* array, int index) {
+ return LoadFixedArrayElement(array, index, MachineType::TaggedSigned());
+ }
+
+ Node* LoadFixedArrayElementPtr(Node* array, int index) {
+ return LoadFixedArrayElement(array, index, MachineType::TaggedPointer());
+ }
+
+ Node* LoadFixedArrayElementAny(Node* array, int index) {
+ return LoadFixedArrayElement(array, index, MachineType::AnyTagged());
+ }
+
+ Node* StoreFixedArrayElement(Node* array, int index, Node* value,
+ ObjectAccess access) {
+ return StoreToObject(
+ access, array,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value);
+ }
+
+ Node* StoreFixedArrayElementSmi(Node* array, int index, Node* value) {
+ return StoreFixedArrayElement(
+ array, index, value,
+ ObjectAccess(MachineType::TaggedSigned(), kNoWriteBarrier));
+ }
+
+ Node* StoreFixedArrayElementAny(Node* array, int index, Node* value) {
+ return StoreFixedArrayElement(
+ array, index, value,
+ ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier));
}
// Functions, SharedFunctionInfos, FunctionData.
Node* LoadSharedFunctionInfo(Node* js_function) {
- return Load(
+ return LoadFromObject(
MachineType::TaggedPointer(), js_function,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
}
Node* LoadContextFromJSFunction(Node* js_function) {
- return Load(MachineType::TaggedPointer(), js_function,
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
+ return LoadFromObject(
+ MachineType::TaggedPointer(), js_function,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
}
Node* LoadFunctionDataFromJSFunction(Node* js_function) {
Node* shared = LoadSharedFunctionInfo(js_function);
- return Load(
+ return LoadFromObject(
MachineType::TaggedPointer(), shared,
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset));
}
Node* LoadExportedFunctionIndexAsSmi(Node* exported_function_data) {
- return Load(MachineType::TaggedSigned(), exported_function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kFunctionIndexOffset));
+ return LoadFromObject(MachineType::TaggedSigned(), exported_function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kFunctionIndexOffset));
}
Node* LoadExportedFunctionInstance(Node* exported_function_data) {
- return Load(MachineType::TaggedPointer(), exported_function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kInstanceOffset));
+ return LoadFromObject(MachineType::TaggedPointer(), exported_function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kInstanceOffset));
}
// JavaScript objects.
Node* LoadJSArrayElements(Node* js_array) {
- return Load(MachineType::AnyTagged(), js_array,
- wasm::ObjectAccess::ToTagged(JSObject::kElementsOffset));
+ return LoadFromObject(
+ MachineType::AnyTagged(), js_array,
+ wasm::ObjectAccess::ToTagged(JSObject::kElementsOffset));
}
// WasmGC objects.
- MachineType FieldType(const wasm::StructType* type, uint32_t field_index,
- bool is_signed) {
- return MachineType::TypeForRepresentation(
- type->field(field_index).machine_representation(), is_signed);
- }
-
Node* FieldOffset(const wasm::StructType* type, uint32_t field_index) {
return IntPtrConstant(wasm::ObjectAccess::ToTagged(
WasmStruct::kHeaderSize + type->field_offset(field_index)));
}
- // It's guaranteed that struct/array fields are aligned to min(field_size,
- // kTaggedSize), with the latter being 4 or 8 depending on platform and
- // pointer compression. So on our most common configurations, 8-byte types
- // must use unaligned loads/stores.
- Node* LoadWithTaggedAlignment(MachineType type, Node* base, Node* offset) {
- if (ElementSizeInBytes(type.representation()) > kTaggedSize) {
- return LoadUnaligned(type, base, offset);
- } else {
- return Load(type, base, offset);
- }
- }
-
- // Same alignment considerations as above.
- Node* StoreWithTaggedAlignment(Node* base, Node* offset, Node* value,
- wasm::ValueType type) {
- MachineRepresentation rep = type.machine_representation();
- if (ElementSizeInBytes(rep) > kTaggedSize) {
- return StoreUnaligned(rep, base, offset, value);
- } else {
- WriteBarrierKind write_barrier =
- type.is_reference_type() ? kPointerWriteBarrier : kNoWriteBarrier;
- StoreRepresentation store_rep(rep, write_barrier);
- return Store(store_rep, base, offset, value);
- }
- }
-
Node* StoreStructField(Node* struct_object, const wasm::StructType* type,
uint32_t field_index, Node* value) {
- return StoreWithTaggedAlignment(struct_object,
- FieldOffset(type, field_index), value,
- type->field(field_index));
+ return StoreToObject(ObjectAccessForGCStores(type->field(field_index)),
+ struct_object, FieldOffset(type, field_index), value);
}
Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type) {
@@ -396,8 +424,9 @@ class WasmGraphAssembler : public GraphAssembler {
}
Node* LoadWasmArrayLength(Node* array) {
- return Load(MachineType::Uint32(), array,
- wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
+ return LoadFromObject(
+ MachineType::Uint32(), array,
+ wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
}
Node* IsDataRefMap(Node* map) {
@@ -430,12 +459,17 @@ class WasmGraphAssembler : public GraphAssembler {
Node* instance_type = LoadInstanceType(map);
return Word32Equal(instance_type, Int32Constant(type));
}
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
};
WasmGraphBuilder::WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
- compiler::SourcePositionTable* source_position_table)
+ compiler::SourcePositionTable* source_position_table, Isolate* isolate)
: gasm_(std::make_unique<WasmGraphAssembler>(mcgraph, zone)),
zone_(zone),
mcgraph_(mcgraph),
@@ -443,7 +477,8 @@ WasmGraphBuilder::WasmGraphBuilder(
has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
- source_position_table_(source_position_table) {
+ source_position_table_(source_position_table),
+ isolate_(isolate) {
DCHECK_IMPLIES(use_trap_handler(), trap_handler::IsTrapHandlerEnabled());
DCHECK_NOT_NULL(mcgraph_);
}
@@ -452,25 +487,45 @@ WasmGraphBuilder::WasmGraphBuilder(
// available.
WasmGraphBuilder::~WasmGraphBuilder() = default;
-Node* WasmGraphBuilder::Start(unsigned params) {
+void WasmGraphBuilder::Start(unsigned params) {
Node* start = graph()->NewNode(mcgraph()->common()->Start(params));
graph()->SetStart(start);
- return start;
-}
-
-Node* WasmGraphBuilder::Param(unsigned index) {
- return gasm_->Parameter(index);
+ SetEffectControl(start);
+ // Initialize parameter nodes.
+ parameters_ = zone_->NewArray<Node*>(params);
+ for (unsigned i = 0; i < params; i++) {
+ parameters_[i] = nullptr;
+ }
+ // Initialize instance node.
+ instance_node_ =
+ use_js_isolate_and_params()
+ ? gasm_->LoadExportedFunctionInstance(
+ gasm_->LoadFunctionDataFromJSFunction(
+ Param(Linkage::kJSCallClosureParamIndex, "%closure")))
+ : Param(wasm::kWasmInstanceParameterIndex);
+}
+
+Node* WasmGraphBuilder::Param(int index, const char* debug_name) {
+ DCHECK_NOT_NULL(graph()->start());
+ // Turbofan allows negative parameter indices.
+ static constexpr int kMinParameterIndex = -1;
+ DCHECK_GE(index, kMinParameterIndex);
+ int array_index = index - kMinParameterIndex;
+ if (parameters_[array_index] == nullptr) {
+ parameters_[array_index] = graph()->NewNode(
+ mcgraph()->common()->Parameter(index, debug_name), graph()->start());
+ }
+ return parameters_[array_index];
}
Node* WasmGraphBuilder::Loop(Node* entry) {
return graph()->NewNode(mcgraph()->common()->Loop(1), entry);
}
-Node* WasmGraphBuilder::TerminateLoop(Node* effect, Node* control) {
+void WasmGraphBuilder::TerminateLoop(Node* effect, Node* control) {
Node* terminate =
graph()->NewNode(mcgraph()->common()->Terminate(), effect, control);
- MergeControlToEnd(mcgraph(), terminate);
- return terminate;
+ gasm_->MergeControlToEnd(terminate);
}
Node* WasmGraphBuilder::LoopExit(Node* loop_node) {
@@ -490,11 +545,10 @@ Node* WasmGraphBuilder::LoopExitValue(Node* value,
value, control());
}
-Node* WasmGraphBuilder::TerminateThrow(Node* effect, Node* control) {
+void WasmGraphBuilder::TerminateThrow(Node* effect, Node* control) {
Node* terminate =
graph()->NewNode(mcgraph()->common()->Throw(), effect, control);
- MergeControlToEnd(mcgraph(), terminate);
- return terminate;
+ gasm_->MergeControlToEnd(terminate);
}
bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
@@ -556,22 +610,7 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
effects_and_control);
}
-Node* WasmGraphBuilder::RefNull() {
- // Technically speaking, this does not generate a valid graph since the effect
- // of the last Load is not consumed.
- // TODO(manoskouk): Remove this code once we implement Load elimination
- // optimization for wasm.
- if (!ref_null_node_.is_set()) {
- Node* current_effect = effect();
- Node* current_control = control();
- SetEffectControl(mcgraph()->graph()->start());
- ref_null_node_.set(LOAD_FULL_POINTER(
- BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(RootIndex::kNullValue)));
- SetEffectControl(current_effect, current_control);
- }
- return ref_null_node_.get();
-}
+Node* WasmGraphBuilder::RefNull() { return LOAD_ROOT(NullValue, null_value); }
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRefFunc,
@@ -589,12 +628,16 @@ Node* WasmGraphBuilder::NoContextConstant() {
return mcgraph()->IntPtrConstant(0);
}
+Node* WasmGraphBuilder::GetInstance() { return instance_node_.get(); }
+
Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
- // The IsolateRoot is loaded from the instance node so that the generated
- // code is Isolate independent. This can be overridden by setting a specific
- // node in {isolate_root_node_} beforehand.
- if (isolate_root_node_.is_set()) return isolate_root_node_.get();
- return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ if (use_js_isolate_and_params()) {
+ return mcgraph()->IntPtrConstant(isolate_->isolate_root());
+ } else {
+ // For wasm functions, the IsolateRoot is loaded from the instance node so
+ // that the generated code is Isolate independent.
+ return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ }
}
Node* WasmGraphBuilder::Int32Constant(int32_t value) {
@@ -613,7 +656,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
Node* limit_address =
LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer());
- Node* limit = gasm_->Load(MachineType::Pointer(), limit_address, 0);
+ Node* limit = gasm_->LoadFromObject(MachineType::Pointer(), limit_address, 0);
Node* check = SetEffect(graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
@@ -625,6 +668,12 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
// representing the stack check code.
+
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
+
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
NoContextDescriptor{}, // descriptor
@@ -632,10 +681,6 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties
StubCallMode::kCallWasmRuntimeStub); // stub call mode
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
@@ -1256,6 +1301,28 @@ Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
return gasm_->Branch(cond, true_node, false_node, BranchHint::kFalse);
}
+Node* WasmGraphBuilder::Select(Node *cond, Node* true_node,
+ Node* false_node, wasm::ValueType type) {
+ MachineOperatorBuilder* m = mcgraph()->machine();
+ wasm::ValueKind kind = type.kind();
+ // Lower to select if supported.
+ if (kind == wasm::kF32 && m->Float32Select().IsSupported()) {
+ return mcgraph()->graph()->NewNode(m->Float32Select().op(), cond,
+ true_node, false_node);
+ }
+ if (kind == wasm::kF64 && m->Float64Select().IsSupported()) {
+ return mcgraph()->graph()->NewNode(m->Float64Select().op(), cond,
+ true_node, false_node);
+ }
+ // Default to control-flow.
+ Node* controls[2];
+ BranchNoHint(cond, &controls[0], &controls[1]);
+ Node* merge = Merge(2, controls);
+ SetControl(merge);
+ Node* inputs[] = {true_node, false_node, merge};
+ return Phi(type, 2, inputs);
+}
+
TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
// TODO(wasm): "!env_" should not happen when compiling an actual wasm
// function.
@@ -1280,58 +1347,54 @@ TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
}
}
-Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
Node* node = SetControl(graph()->NewNode(mcgraph()->common()->TrapIf(trap_id),
cond, effect(), control()));
SetSourcePosition(node, position);
- return node;
}
-Node* WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
Node* node = SetControl(graph()->NewNode(
mcgraph()->common()->TrapUnless(trap_id), cond, effect(), control()));
SetSourcePosition(node, position);
- return node;
}
// Add a check that traps if {node} is equal to {val}.
-Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
- int32_t val,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
+ int32_t val,
+ wasm::WasmCodePosition position) {
Int32Matcher m(node);
- if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
+ if (m.HasResolvedValue() && !m.Is(val)) return;
if (val == 0) {
- return TrapIfFalse(reason, node, position);
+ TrapIfFalse(reason, node, position);
} else {
- return TrapIfTrue(reason, gasm_->Word32Equal(node, Int32Constant(val)),
- position);
+ TrapIfTrue(reason, gasm_->Word32Equal(node, Int32Constant(val)), position);
}
}
// Add a check that traps if {node} is zero.
-Node* WasmGraphBuilder::ZeroCheck32(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position) {
- return TrapIfEq32(reason, node, 0, position);
+void WasmGraphBuilder::ZeroCheck32(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ TrapIfEq32(reason, node, 0, position);
}
// Add a check that traps if {node} is equal to {val}.
-Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
- int64_t val,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
+ int64_t val,
+ wasm::WasmCodePosition position) {
Int64Matcher m(node);
- if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
- return TrapIfTrue(reason, gasm_->Word64Equal(node, Int64Constant(val)),
- position);
+ if (m.HasResolvedValue() && !m.Is(val)) return;
+ TrapIfTrue(reason, gasm_->Word64Equal(node, Int64Constant(val)), position);
}
// Add a check that traps if {node} is zero.
-Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position) {
- return TrapIfEq64(reason, node, 0, position);
+void WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ TrapIfEq64(reason, node, 0, position);
}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
@@ -1365,15 +1428,15 @@ Node* WasmGraphBuilder::Return(Vector<Node*> vals) {
Node* ret = graph()->NewNode(mcgraph()->common()->Return(count), count + 3,
buf.data());
- MergeControlToEnd(mcgraph(), ret);
+ gasm_->MergeControlToEnd(ret);
return ret;
}
-Node* WasmGraphBuilder::Trap(wasm::TrapReason reason,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::Trap(wasm::TrapReason reason,
+ wasm::WasmCodePosition position) {
TrapIfFalse(reason, Int32Constant(0), position);
- Return(Vector<Node*>{});
- return nullptr;
+ // Connect control to end via a Throw() node.
+ TerminateThrow(effect(), control());
}
Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
@@ -2142,7 +2205,7 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return gasm_->Load(type, stack_slot, 0);
+ return gasm_->LoadFromObject(type, stack_slot, 0);
}
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
@@ -2184,13 +2247,12 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
MachineSignature sig(0, 1, sig_types);
Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return gasm_->Load(result_type, stack_slot, 0);
+ return gasm_->LoadFromObject(result_type, stack_slot, 0);
}
namespace {
-ExternalReference convert_ccall_ref(WasmGraphBuilder* builder,
- wasm::WasmOpcode opcode) {
+ExternalReference convert_ccall_ref(wasm::WasmOpcode opcode) {
switch (opcode) {
case wasm::kExprI64SConvertF32:
case wasm::kExprI64SConvertSatF32:
@@ -2216,7 +2278,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
wasm::WasmOpcode opcode) {
const MachineType int_ty = IntConvertType(opcode);
const MachineType float_ty = FloatConvertType(opcode);
- ExternalReference call_ref = convert_ccall_ref(this, opcode);
+ ExternalReference call_ref = convert_ccall_ref(opcode);
int stack_slot_size = std::max(ElementSizeInBytes(int_ty.representation()),
ElementSizeInBytes(float_ty.representation()));
Node* stack_slot =
@@ -2230,7 +2292,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
- return gasm_->Load(int_ty, stack_slot, 0);
+ return gasm_->LoadFromObject(int_ty, stack_slot, 0);
}
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
@@ -2243,7 +2305,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false);
Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
- Node* load = gasm_->Load(int_ty, stack_slot, 0);
+ Node* load = gasm_->LoadFromObject(int_ty, stack_slot, 0);
Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load);
@@ -2251,7 +2313,28 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* WasmGraphBuilder::MemoryGrow(Node* input) {
needs_stack_check_ = true;
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow, input);
+ if (!env_->module->is_memory64) {
+ // For 32-bit memories, just call the builtin.
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow, input);
+ }
+
+ // If the input is not a positive int32, growing will always fail
+ // (growing negative or requesting >= 256 TB).
+ Node* old_effect = effect();
+ Diamond is_32_bit(graph(), mcgraph()->common(),
+ gasm_->Uint64LessThanOrEqual(input, Int64Constant(kMaxInt)),
+ BranchHint::kTrue);
+ is_32_bit.Chain(control());
+
+ SetControl(is_32_bit.if_true);
+
+ Node* grow_result = gasm_->ChangeInt32ToInt64(gasm_->CallRuntimeStub(
+ wasm::WasmCode::kWasmMemoryGrow, gasm_->TruncateInt64ToInt32(input)));
+
+ Node* diamond_result = is_32_bit.Phi(MachineRepresentation::kWord64,
+ grow_result, gasm_->Int64Constant(-1));
+ SetEffectControl(is_32_bit.EffectPhi(effect(), old_effect), is_32_bit.merge);
+ return diamond_result;
}
Node* WasmGraphBuilder::Throw(uint32_t exception_index,
@@ -2307,12 +2390,12 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
case wasm::kOptRef:
case wasm::kRtt:
case wasm::kRttWithDepth:
- STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
+ gasm_->StoreFixedArrayElementAny(values_array, index, value);
++index;
break;
case wasm::kI8:
case wasm::kI16:
- case wasm::kStmt:
+ case wasm::kVoid:
case wasm::kBottom:
UNREACHABLE();
}
@@ -2332,22 +2415,22 @@ void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
Node* value) {
Node* upper_halfword_as_smi =
BuildChangeUint31ToSmi(gasm_->Word32Shr(value, Int32Constant(16)));
- STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, upper_halfword_as_smi);
+ gasm_->StoreFixedArrayElementSmi(values_array, *index, upper_halfword_as_smi);
++(*index);
Node* lower_halfword_as_smi =
BuildChangeUint31ToSmi(gasm_->Word32And(value, Int32Constant(0xFFFFu)));
- STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, lower_halfword_as_smi);
+ gasm_->StoreFixedArrayElementSmi(values_array, *index, lower_halfword_as_smi);
++(*index);
}
Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) {
- Node* upper =
- BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
+ Node* upper = BuildChangeSmiToInt32(
+ gasm_->LoadFixedArrayElementSmi(values_array, *index));
(*index)++;
upper = gasm_->Word32Shl(upper, Int32Constant(16));
- Node* lower =
- BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
+ Node* lower = BuildChangeSmiToInt32(
+ gasm_->LoadFixedArrayElementSmi(values_array, *index));
(*index)++;
Node* value = gasm_->Word32Or(upper, lower);
return value;
@@ -2379,16 +2462,15 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
- Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
+ Node* tag =
+ gasm_->LoadFixedArrayElementPtr(exceptions_table, exception_index);
return tag;
}
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
return gasm_->CallBuiltin(
- Builtins::kWasmGetOwnProperty, except_obj,
- LOAD_FULL_POINTER(
- BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)),
+ Builtins::kWasmGetOwnProperty, Operator::kEliminatable, except_obj,
+ LOAD_ROOT(wasm_exception_tag_symbol, wasm_exception_tag_symbol),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
}
@@ -2396,10 +2478,8 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
const wasm::WasmException* exception,
Vector<Node*> values) {
Node* values_array = gasm_->CallBuiltin(
- Builtins::kWasmGetOwnProperty, except_obj,
- LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(
- RootIndex::kwasm_exception_values_symbol)),
+ Builtins::kWasmGetOwnProperty, Operator::kEliminatable, except_obj,
+ LOAD_ROOT(wasm_exception_values_symbol, wasm_exception_values_symbol),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
@@ -2441,12 +2521,12 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
case wasm::kOptRef:
case wasm::kRtt:
case wasm::kRttWithDepth:
- value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
+ value = gasm_->LoadFixedArrayElementAny(values_array, index);
++index;
break;
case wasm::kI8:
case wasm::kI16:
- case wasm::kStmt:
+ case wasm::kVoid:
case wasm::kBottom:
UNREACHABLE();
}
@@ -2743,7 +2823,7 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
- return gasm_->Load(result_type, stack_slot, 0);
+ return gasm_->LoadFromObject(result_type, stack_slot, 0);
}
template <typename... Args>
@@ -2765,8 +2845,7 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
Node* instance_node, const Operator* op,
Node* frame_state) {
if (instance_node == nullptr) {
- DCHECK_NOT_NULL(instance_node_);
- instance_node = instance_node_.get();
+ instance_node = GetInstance();
}
needs_stack_check_ = true;
const size_t params = sig->parameter_count();
@@ -2809,6 +2888,8 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
+ // TODO(manoskouk): Don't always set control if we ever add properties to wasm
+ // calls.
SetControl(call);
size_t ret_count = sig->return_count();
@@ -2838,7 +2919,9 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
- MergeControlToEnd(mcgraph(), call);
+ // TODO(manoskouk): {call} will not always be a control node if we ever add
+ // properties to wasm calls.
+ gasm_->MergeControlToEnd(call);
return call;
}
@@ -2848,28 +2931,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
wasm::WasmCodePosition position,
int func_index,
IsReturnCall continuation) {
- // Load the imported function refs array from the instance.
- Node* imported_function_refs =
- LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
- Node* ref_node =
- LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
-
- // Load the target from the imported_targets array at a known offset.
- Node* imported_targets =
- LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
- func_index * kSystemPointerSize);
- args[0] = target_node;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
- switch (continuation) {
- case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
- case kReturnCall:
- DCHECK(rets.empty());
- return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
- }
+ return BuildImportCall(sig, args, rets, position,
+ gasm_->Uint32Constant(func_index), continuation);
}
Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
@@ -2891,8 +2954,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
- func_index_times_pointersize);
+ Node* target_node = gasm_->LoadFromObject(
+ MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -2938,34 +3001,34 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
Node** ift_targets,
Node** ift_instances) {
if (table_index == 0) {
- *ift_size =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
- *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds,
- MachineType::Pointer());
- *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets,
- MachineType::Pointer());
- *ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableRefs,
- MachineType::TaggedPointer());
+ *ift_size = LOAD_MUTABLE_INSTANCE_FIELD(IndirectFunctionTableSize,
+ MachineType::Uint32());
+ *ift_sig_ids = LOAD_MUTABLE_INSTANCE_FIELD(IndirectFunctionTableSigIds,
+ MachineType::Pointer());
+ *ift_targets = LOAD_MUTABLE_INSTANCE_FIELD(IndirectFunctionTableTargets,
+ MachineType::Pointer());
+ *ift_instances = LOAD_MUTABLE_INSTANCE_FIELD(IndirectFunctionTableRefs,
+ MachineType::TaggedPointer());
return;
}
- Node* ift_tables =
- LOAD_INSTANCE_FIELD(IndirectFunctionTables, MachineType::TaggedPointer());
- Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index);
+ Node* ift_tables = LOAD_MUTABLE_INSTANCE_FIELD(IndirectFunctionTables,
+ MachineType::TaggedPointer());
+ Node* ift_table = gasm_->LoadFixedArrayElementAny(ift_tables, table_index);
- *ift_size = gasm_->Load(
+ *ift_size = gasm_->LoadFromObject(
MachineType::Int32(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset));
- *ift_sig_ids = gasm_->Load(
+ *ift_sig_ids = gasm_->LoadFromObject(
MachineType::Pointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset));
- *ift_targets = gasm_->Load(
+ *ift_targets = gasm_->LoadFromObject(
MachineType::Pointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset));
- *ift_instances = gasm_->Load(
+ *ift_instances = gasm_->LoadFromObject(
MachineType::TaggedPointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
}
@@ -3005,28 +3068,35 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
key = gasm_->Word32And(key, mask);
}
- Node* int32_scaled_key =
- Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
-
- Node* loaded_sig =
- gasm_->Load(MachineType::Int32(), ift_sig_ids, int32_scaled_key);
- // Check that the dynamic type of the function is a subtype of its static
- // (table) type. Currently, the only subtyping between function types is
- // $t <: funcref for all $t: function_type.
- // TODO(7748): Expand this with function subtyping.
- const bool needs_typechecking =
- env_->module->tables[table_index].type == wasm::kWasmFuncRef;
- if (needs_typechecking) {
- int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
- Node* sig_match =
- gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- } else {
- // We still have to check that the entry is initialized.
- // TODO(9495): Skip this check for non-nullable tables when they are
- // allowed.
- Node* function_is_null = gasm_->Word32Equal(loaded_sig, Int32Constant(-1));
- TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
+ const wasm::ValueType table_type = env_->module->tables[table_index].type;
+ // Check that the table entry is not null and that the type of the function is
+ // a subtype of the function type declared at the call site. In the absence of
+ // function subtyping, the latter can only happen if the table type is (ref
+ // null? func). Also, subtyping reduces to normalized signature equality
+ // checking.
+ // TODO(7748): Expand this with function subtyping once we have that.
+ const bool needs_signature_check =
+ table_type.is_reference_to(wasm::HeapType::kFunc) ||
+ table_type.is_nullable();
+ if (needs_signature_check) {
+ Node* int32_scaled_key =
+ Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
+
+ Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
+ int32_scaled_key);
+
+ if (table_type.is_reference_to(wasm::HeapType::kFunc)) {
+ int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
+ Node* sig_match =
+ gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ } else {
+ // If the table entries are nullable, we still have to check that the
+ // entry is initialized.
+ Node* function_is_null =
+ gasm_->Word32Equal(loaded_sig, Int32Constant(-1));
+ TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
+ }
}
Node* key_intptr = Uint32ToUintptr(key);
@@ -3037,8 +3107,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* intptr_scaled_key =
gasm_->IntMul(key_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
- Node* target =
- gasm_->Load(MachineType::Pointer(), ift_targets, intptr_scaled_key);
+ Node* target = gasm_->LoadFromObject(MachineType::Pointer(), ift_targets,
+ intptr_scaled_key);
args[0] = target;
const UseRetpoline use_retpoline =
@@ -3056,10 +3126,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* WasmGraphBuilder::BuildLoadJumpTableOffsetFromExportedFunctionData(
Node* function_data) {
- Node* jump_table_offset_smi =
- gasm_->Load(MachineType::TaggedSigned(), function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kJumpTableOffsetOffset));
+ Node* jump_table_offset_smi = gasm_->LoadFromObject(
+ MachineType::TaggedSigned(), function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kJumpTableOffsetOffset));
return BuildChangeSmiToIntPtr(jump_table_offset_smi);
}
@@ -3095,11 +3165,11 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
auto imported_label = gasm_->MakeLabel();
- // Check if callee is a locally defined or imported function it its module.
- Node* imported_function_refs =
- gasm_->Load(MachineType::TaggedPointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset));
+ // Check if callee is a locally defined or imported function in its module.
+ Node* imported_function_refs = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), callee_instance,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionRefsOffset));
Node* imported_functions_num =
gasm_->LoadFixedArrayLengthAsSmi(imported_function_refs);
gasm_->GotoIf(gasm_->SmiLessThan(function_index, imported_functions_num),
@@ -3107,9 +3177,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
{
// Function locally defined in module.
Node* jump_table_start =
- gasm_->Load(MachineType::Pointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset));
+ gasm_->LoadFromObject(MachineType::Pointer(), callee_instance,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kJumpTableStartOffset));
Node* jump_table_offset =
BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
Node* jump_table_slot =
@@ -3128,15 +3198,15 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
imported_function_refs, function_index_intptr,
MachineType::TaggedPointer());
- Node* imported_function_targets =
- gasm_->Load(MachineType::Pointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionTargetsOffset));
+ Node* imported_function_targets = gasm_->LoadFromObject(
+ MachineType::Pointer(), callee_instance,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionTargetsOffset));
- Node* target_node =
- gasm_->Load(MachineType::Pointer(), imported_function_targets,
- gasm_->IntMul(function_index_intptr,
- gasm_->IntPtrConstant(kSystemPointerSize)));
+ Node* target_node = gasm_->LoadFromObject(
+ MachineType::Pointer(), imported_function_targets,
+ gasm_->IntMul(function_index_intptr,
+ gasm_->IntPtrConstant(kSystemPointerSize)));
gasm_->Goto(&end_label, target_node, imported_instance);
}
@@ -3149,21 +3219,22 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
// (current WasmInstanceObject, function_data->callable()).
gasm_->Bind(&js_label);
- Node* wrapper_code =
- gasm_->Load(MachineType::TaggedPointer(), function_data,
- wasm::ObjectAccess::ToTagged(
- WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
+ Node* wrapper_code = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
Node* call_target = gasm_->IntAdd(
wrapper_code,
gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
- Node* callable = gasm_->Load(
+ Node* callable = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// TODO(manoskouk): Find an elegant way to avoid allocating this pair for
// every call.
- Node* function_instance_node = gasm_->CallBuiltin(
- Builtins::kWasmAllocatePair, instance_node_.get(), callable);
+ Node* function_instance_node =
+ gasm_->CallBuiltin(Builtins::kWasmAllocatePair, Operator::kEliminatable,
+ GetInstance(), callable);
gasm_->Goto(&end_label, call_target, function_instance_node);
}
@@ -3226,13 +3297,10 @@ Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
kReturnCall);
}
-Node* WasmGraphBuilder::BrOnNull(Node* ref_object, Node** null_node,
- Node** non_null_node) {
+void WasmGraphBuilder::BrOnNull(Node* ref_object, Node** null_node,
+ Node** non_null_node) {
BranchExpectFalse(gasm_->WordEqual(ref_object, RefNull()), null_node,
non_null_node);
- // Return value is not used, but we need it for compatibility
- // with graph-builder-interface.
- return nullptr;
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -3329,15 +3397,14 @@ Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
void WasmGraphBuilder::InitInstanceCache(
WasmInstanceCacheNodes* instance_cache) {
- DCHECK_NOT_NULL(instance_node_);
// Load the memory start.
instance_cache->mem_start =
- LOAD_INSTANCE_FIELD(MemoryStart, MachineType::UintPtr());
+ LOAD_MUTABLE_INSTANCE_FIELD(MemoryStart, MachineType::UintPtr());
// Load the memory size.
instance_cache->mem_size =
- LOAD_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
+ LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
if (untrusted_code_mitigations_) {
// Load the memory mask.
@@ -3455,42 +3522,22 @@ void WasmGraphBuilder::SetEffectControl(Node* effect, Node* control) {
}
Node* WasmGraphBuilder::GetImportedMutableGlobals() {
- if (imported_mutable_globals_ == nullptr) {
- // Load imported_mutable_globals_ from the instance object at runtime.
- imported_mutable_globals_ =
- LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr());
- }
- return imported_mutable_globals_.get();
+ return LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr());
}
void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
const wasm::WasmGlobal& global,
Node** base_node,
Node** offset_node) {
- DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- *base_node =
- gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
- Int32Constant(global.index * sizeof(Address)));
+ *base_node = gasm_->LoadFromObject(
+ MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Int32Constant(global.index * sizeof(Address)));
*offset_node = Int32Constant(0);
} else {
- if (globals_start_ == nullptr) {
- // Load globals_start from the instance object at runtime.
- // TODO(wasm): we currently generate only one load of the {globals_start}
- // start per graph, which means it can be placed anywhere by the
- // scheduler. This is legal because the globals_start should never change.
- // However, in some cases (e.g. if the instance object is already in a
- // register), it is slightly more efficient to reload this value from the
- // instance object. Since this depends on register allocation, it is not
- // possible to express in the graph, and would essentially constitute a
- // "mem2reg" optimization in TurboFan.
- globals_start_ = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::UintPtr()),
- instance_node_.get(),
- Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
- graph()->start(), graph()->start());
- }
- *base_node = globals_start_.get();
+ Node* globals_start =
+ LOAD_INSTANCE_FIELD(GlobalsStart, MachineType::UintPtr());
+ *base_node = globals_start;
*offset_node = Int32Constant(global.offset);
if (mem_type == MachineType::Simd128() && global.offset != 0) {
@@ -3506,12 +3553,13 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
// Load the base from the ImportedMutableGlobalsBuffer of the instance.
Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
MachineType::TaggedPointer());
- *base = LOAD_FIXED_ARRAY_SLOT_ANY(buffers, global.index);
+ *base = gasm_->LoadFixedArrayElementAny(buffers, global.index);
// For the offset we need the index of the global in the buffer, and then
// calculate the actual offset from the index. Load the index from the
// ImportedMutableGlobals array of the instance.
- Node* index = gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Node* index =
+ gasm_->LoadFromObject(MachineType::UintPtr(), GetImportedMutableGlobals(),
Int32Constant(global.index * sizeof(Address)));
// From the index, calculate the actual offset in the FixedArray. This
@@ -3562,8 +3610,9 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
DCHECK_EQ(1, fun->result_size);
auto centry_id =
Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
- Node* centry_stub = LOAD_FULL_POINTER(
- isolate_root, IsolateData::builtin_slot_offset(centry_id));
+ Node* centry_stub =
+ gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
+ IsolateData::builtin_slot_offset(centry_id));
// TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3594,16 +3643,16 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
- return gasm_->Load(MachineType::AnyTagged(), base, offset);
+ return gasm_->LoadFromObject(MachineType::AnyTagged(), base, offset);
}
Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
- return LOAD_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset);
+ return gasm_->LoadFixedArrayElementAny(globals_buffer, global.offset);
}
MachineType mem_type = global.type.machine_type();
@@ -3613,6 +3662,8 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
+ // TODO(manoskouk): Cannot use LoadFromObject here due to
+ // GetGlobalBaseAndOffset pointer arithmetic.
Node* result = gasm_->Load(mem_type, base, offset);
#if defined(V8_TARGET_BIG_ENDIAN)
result = BuildChangeEndiannessLoad(result, mem_type, global.type);
@@ -3620,20 +3671,23 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
return result;
}
-Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
+void WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
- return STORE_RAW_NODE_OFFSET(
- base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
+ gasm_->StoreToObject(
+ ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier), base,
+ offset, val);
+ return;
}
Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
- return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset, val);
+ gasm_->StoreFixedArrayElementAny(globals_buffer, global.offset, val);
+ return;
}
MachineType mem_type = global.type.machine_type();
@@ -3648,8 +3702,9 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type);
#endif
-
- return gasm_->Store(store_rep, base, offset, val);
+ // TODO(manoskouk): Cannot use StoreToObject here due to
+ // GetGlobalBaseAndOffset pointer arithmetic.
+ gasm_->Store(store_rep, base, offset, val);
}
Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
@@ -3658,10 +3713,10 @@ Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
gasm_->IntPtrConstant(table_index), index);
}
-Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
- wasm::WasmCodePosition position) {
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet,
- gasm_->IntPtrConstant(table_index), index, val);
+void WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
+ wasm::WasmCodePosition position) {
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet,
+ gasm_->IntPtrConstant(table_index), index, val);
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
@@ -3815,14 +3870,13 @@ const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
return mcgraph()->machine()->UnalignedStore(store_rep);
}
-Node* WasmGraphBuilder::TraceFunctionEntry(wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TraceFunctionEntry(wasm::WasmCodePosition position) {
Node* call = BuildCallToRuntime(Runtime::kWasmTraceEnter, nullptr, 0);
SetSourcePosition(call, position);
- return call;
}
-Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
+ wasm::WasmCodePosition position) {
Node* info = gasm_->IntPtrConstant(0);
size_t num_returns = vals.size();
if (num_returns == 1) {
@@ -3837,13 +3891,12 @@ Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
Node* call = BuildCallToRuntime(Runtime::kWasmTraceExit, &info, 1);
SetSourcePosition(call, position);
- return call;
}
-Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
- MachineRepresentation rep,
- Node* index, uintptr_t offset,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::TraceMemoryOperation(bool is_store,
+ MachineRepresentation rep,
+ Node* index, uintptr_t offset,
+ wasm::WasmCodePosition position) {
int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
TNode<RawPtrT> info =
gasm_->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign);
@@ -3865,7 +3918,6 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
Node* call =
BuildCallToRuntime(Runtime::kWasmTraceMemory, args, arraysize(args));
SetSourcePosition(call, position);
- return call;
}
namespace {
@@ -4115,17 +4167,6 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
return load;
}
-Node* WasmGraphBuilder::Prefetch(Node* index, uint64_t offset,
- uint32_t alignment, bool temporal) {
- uintptr_t capped_offset = static_cast<uintptr_t>(offset);
- const Operator* prefetchOp =
- temporal ? mcgraph()->machine()->PrefetchTemporal()
- : mcgraph()->machine()->PrefetchNonTemporal();
- Node* prefetch = SetEffect(graph()->NewNode(
- prefetchOp, MemBuffer(capped_offset), index, effect(), control()));
- return prefetch;
-}
-
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint64_t offset,
uint32_t alignment,
@@ -4177,12 +4218,11 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
- uint64_t offset, uint32_t alignment,
- Node* val, uint8_t laneidx,
- wasm::WasmCodePosition position,
- wasm::ValueType type) {
- Node* store;
+void WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
+ uint64_t offset, uint32_t alignment, Node* val,
+ uint8_t laneidx,
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
has_simd_ = true;
index = BoundsCheckMem(i::ElementSizeInBytes(mem_rep), index, offset,
position, kCanOmitBoundsCheck);
@@ -4206,13 +4246,13 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
} else {
UNREACHABLE();
}
- store = StoreMem(mem_rep, index, offset, alignment, output, position, type);
+ StoreMem(mem_rep, index, offset, alignment, output, position, type);
#else
MachineType memtype = MachineType(mem_rep, MachineSemantic::kNone);
MemoryAccessKind load_kind =
GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
- store = SetEffect(graph()->NewNode(
+ Node* store = SetEffect(graph()->NewNode(
mcgraph()->machine()->StoreLane(load_kind, mem_rep, laneidx),
MemBuffer(capped_offset), index, val, effect(), control()));
@@ -4223,16 +4263,12 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
}
-
- return store;
}
-Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
- uint64_t offset, uint32_t alignment, Node* val,
- wasm::WasmCodePosition position,
- wasm::ValueType type) {
- Node* store;
-
+void WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
+ uint64_t offset, uint32_t alignment, Node* val,
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
if (mem_rep == MachineRepresentation::kSimd128) {
has_simd_ = true;
}
@@ -4249,46 +4285,25 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
if (mem_rep == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
if (use_trap_handler()) {
- store =
+ Node* store =
gasm_->ProtectedStore(mem_rep, MemBuffer(capped_offset), index, val);
SetSourcePosition(store, position);
} else {
- store = gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
- MemBuffer(capped_offset), index, val);
+ gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
+ MemBuffer(capped_offset), index, val);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
DCHECK(!use_trap_handler());
UnalignedStoreRepresentation rep(mem_rep);
- store = gasm_->StoreUnaligned(rep, MemBuffer(capped_offset), index, val);
+ gasm_->StoreUnaligned(rep, MemBuffer(capped_offset), index, val);
}
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
}
-
- return store;
}
-namespace {
-Node* GetAsmJsOOBValue(MachineRepresentation rep, MachineGraph* mcgraph) {
- switch (rep) {
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- return mcgraph->Int32Constant(0);
- case MachineRepresentation::kWord64:
- return mcgraph->Int64Constant(0);
- case MachineRepresentation::kFloat32:
- return mcgraph->Float32Constant(std::numeric_limits<float>::quiet_NaN());
- case MachineRepresentation::kFloat64:
- return mcgraph->Float64Constant(std::numeric_limits<double>::quiet_NaN());
- default:
- UNREACHABLE();
- }
-}
-} // namespace
-
Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_start = instance_cache_->mem_start;
@@ -4317,8 +4332,28 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
index, effect(), bounds_check.if_true);
SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge);
- return bounds_check.Phi(type.representation(), load,
- GetAsmJsOOBValue(type.representation(), mcgraph()));
+
+ Node* oob_value;
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ oob_value = Int32Constant(0);
+ break;
+ case MachineRepresentation::kWord64:
+ oob_value = Int64Constant(0);
+ break;
+ case MachineRepresentation::kFloat32:
+ oob_value = Float32Constant(std::numeric_limits<float>::quiet_NaN());
+ break;
+ case MachineRepresentation::kFloat64:
+ oob_value = Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ return bounds_check.Phi(type.representation(), load, oob_value);
}
Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
@@ -4489,7 +4524,8 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
- mcgraph()->zone(), sig, std::move(lowering_special_case_));
+ gasm_->simplified(), mcgraph()->zone(), sig,
+ std::move(lowering_special_case_));
r.LowerGraph();
}
@@ -4497,12 +4533,6 @@ void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
LowerInt64(CreateMachineSignature(mcgraph()->zone(), sig_, origin));
}
-void WasmGraphBuilder::SimdScalarLoweringForTesting() {
- SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_,
- kCalledFromWasm))
- .LowerGraph();
-}
-
void WasmGraphBuilder::SetSourcePosition(Node* node,
wasm::WasmCodePosition position) {
DCHECK_NE(position, wasm::kNoCodePosition);
@@ -4630,9 +4660,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Add:
return graph()->NewNode(mcgraph()->machine()->F32x4Add(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4AddHoriz:
- return graph()->NewNode(mcgraph()->machine()->F32x4AddHoriz(), inputs[0],
- inputs[1]);
case wasm::kExprF32x4Sub:
return graph()->NewNode(mcgraph()->machine()->F32x4Sub(), inputs[0],
inputs[1]);
@@ -4772,9 +4799,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI64x2ExtMulHighI32x4U:
return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulHighI32x4U(),
inputs[0], inputs[1]);
- case wasm::kExprI64x2SignSelect:
- return graph()->NewNode(mcgraph()->machine()->I64x2SignSelect(),
- inputs[0], inputs[1], inputs[2]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4800,9 +4824,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4Add:
return graph()->NewNode(mcgraph()->machine()->I32x4Add(), inputs[0],
inputs[1]);
- case wasm::kExprI32x4AddHoriz:
- return graph()->NewNode(mcgraph()->machine()->I32x4AddHoriz(), inputs[0],
- inputs[1]);
case wasm::kExprI32x4Sub:
return graph()->NewNode(mcgraph()->machine()->I32x4Sub(), inputs[0],
inputs[1]);
@@ -4879,9 +4900,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4ExtMulHighI16x8U:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulHighI16x8U(),
inputs[0], inputs[1]);
- case wasm::kExprI32x4SignSelect:
- return graph()->NewNode(mcgraph()->machine()->I32x4SignSelect(),
- inputs[0], inputs[1], inputs[2]);
case wasm::kExprI32x4ExtAddPairwiseI16x8S:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8S(),
inputs[0]);
@@ -4919,9 +4937,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8AddSatS:
return graph()->NewNode(mcgraph()->machine()->I16x8AddSatS(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8AddHoriz:
- return graph()->NewNode(mcgraph()->machine()->I16x8AddHoriz(), inputs[0],
- inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(mcgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
@@ -5013,9 +5028,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8ExtMulHighI8x16U:
return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulHighI8x16U(),
inputs[0], inputs[1]);
- case wasm::kExprI16x8SignSelect:
- return graph()->NewNode(mcgraph()->machine()->I16x8SignSelect(),
- inputs[0], inputs[1], inputs[2]);
case wasm::kExprI16x8ExtAddPairwiseI8x16S:
return graph()->NewNode(mcgraph()->machine()->I16x8ExtAddPairwiseI8x16S(),
inputs[0]);
@@ -5047,9 +5059,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16SubSatS:
return graph()->NewNode(mcgraph()->machine()->I8x16SubSatS(), inputs[0],
inputs[1]);
- case wasm::kExprI8x16Mul:
- return graph()->NewNode(mcgraph()->machine()->I8x16Mul(), inputs[0],
- inputs[1]);
case wasm::kExprI8x16MinS:
return graph()->NewNode(mcgraph()->machine()->I8x16MinS(), inputs[0],
inputs[1]);
@@ -5113,9 +5122,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I8x16Abs(), inputs[0]);
case wasm::kExprI8x16BitMask:
return graph()->NewNode(mcgraph()->machine()->I8x16BitMask(), inputs[0]);
- case wasm::kExprI8x16SignSelect:
- return graph()->NewNode(mcgraph()->machine()->I8x16SignSelect(),
- inputs[0], inputs[1], inputs[2]);
case wasm::kExprS128And:
return graph()->NewNode(mcgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -5133,16 +5139,16 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprV64x2AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
- case wasm::kExprV32x4AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V32x4AllTrue(), inputs[0]);
- case wasm::kExprV16x8AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V16x8AllTrue(), inputs[0]);
+ case wasm::kExprI64x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->I64x2AllTrue(), inputs[0]);
+ case wasm::kExprI32x4AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->I32x4AllTrue(), inputs[0]);
+ case wasm::kExprI16x8AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->I16x8AllTrue(), inputs[0]);
case wasm::kExprV128AnyTrue:
return graph()->NewNode(mcgraph()->machine()->V128AnyTrue(), inputs[0]);
- case wasm::kExprV8x16AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
+ case wasm::kExprI8x16AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->I8x16AllTrue(), inputs[0]);
case wasm::kExprI8x16Swizzle:
return graph()->NewNode(mcgraph()->machine()->I8x16Swizzle(), inputs[0],
inputs[1]);
@@ -5402,14 +5408,14 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
}
}
-Node* WasmGraphBuilder::AtomicFence() {
- return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(),
- effect(), control()));
+void WasmGraphBuilder::AtomicFence() {
+ SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(), effect(),
+ control()));
}
-Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
- Node* src, Node* size,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
+ Node* src, Node* size,
+ wasm::WasmCodePosition position) {
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
@@ -5418,7 +5424,7 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
gasm_->ExternalConstant(ExternalReference::wasm_memory_init());
Node* stack_slot = StoreArgsInStackSlot(
- {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineRepresentation::kWord32, src},
{MachineRepresentation::kWord32,
@@ -5427,22 +5433,20 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
- return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
+ Node* call = BuildCCall(&sig, function, stack_slot);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
-Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
+ wasm::WasmCodePosition position) {
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
- auto store_rep =
- StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier);
- return gasm_->Store(store_rep, seg_size_array,
- mcgraph()->IntPtrConstant(data_segment_index << 2),
- Int32Constant(0));
+ auto access = ObjectAccess(MachineType::Int32(), kNoWriteBarrier);
+ gasm_->StoreToObject(access, seg_size_array, data_segment_index << 2,
+ Int32Constant(0));
}
Node* WasmGraphBuilder::StoreArgsInStackSlot(
@@ -5466,51 +5470,51 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
return stack_slot;
}
-Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position) {
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_copy());
Node* stack_slot = StoreArgsInStackSlot(
- {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineRepresentation::kWord32, src},
{MachineRepresentation::kWord32, size}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
- return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
+ Node* call = BuildCCall(&sig, function, stack_slot);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
-Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
+ wasm::WasmCodePosition position) {
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_fill());
Node* stack_slot = StoreArgsInStackSlot(
- {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineRepresentation::kWord32, value},
{MachineRepresentation::kWord32, size}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
- return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
+ Node* call = BuildCCall(&sig, function, stack_slot);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
-Node* WasmGraphBuilder::TableInit(uint32_t table_index,
- uint32_t elem_segment_index, Node* dst,
- Node* src, Node* size,
- wasm::WasmCodePosition position) {
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, dst, src, size,
- gasm_->NumberConstant(table_index),
- gasm_->NumberConstant(elem_segment_index));
+void WasmGraphBuilder::TableInit(uint32_t table_index,
+ uint32_t elem_segment_index, Node* dst,
+ Node* src, Node* size,
+ wasm::WasmCodePosition position) {
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, dst, src, size,
+ gasm_->NumberConstant(table_index),
+ gasm_->NumberConstant(elem_segment_index));
}
-Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
+ wasm::WasmCodePosition position) {
// The elem segment index must be in bounds since it is required by
// validation.
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
@@ -5519,57 +5523,52 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
auto store_rep =
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier);
- return gasm_->Store(store_rep, dropped_elem_segments, elem_segment_index,
- Int32Constant(1));
+ gasm_->Store(store_rep, dropped_elem_segments, elem_segment_index,
+ Int32Constant(1));
}
-Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
- uint32_t table_src_index, Node* dst,
- Node* src, Node* size,
- wasm::WasmCodePosition position) {
- return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, dst, src, size,
- gasm_->NumberConstant(table_dst_index),
- gasm_->NumberConstant(table_src_index));
+void WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
+ uint32_t table_src_index, Node* dst, Node* src,
+ Node* size, wasm::WasmCodePosition position) {
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, dst, src, size,
+ gasm_->NumberConstant(table_dst_index),
+ gasm_->NumberConstant(table_src_index));
}
Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
Node* delta) {
- Node* args[] = {
- gasm_->NumberConstant(table_index), value,
- BuildConvertUint32ToSmiWithSaturation(delta, FLAG_wasm_max_table_size)};
- Node* result =
- BuildCallToRuntime(Runtime::kWasmTableGrow, args, arraysize(args));
- return BuildChangeSmiToInt32(result);
+ return BuildChangeSmiToInt32(gasm_->CallRuntimeStub(
+ wasm::WasmCode::kWasmTableGrow,
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), delta,
+ value));
}
Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer());
- Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index);
+ Node* table = gasm_->LoadFixedArrayElementAny(tables, table_index);
int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
WasmTableObject::kCurrentLengthOffset + 1;
- Node* length_smi = gasm_->Load(
+ Node* length_smi = gasm_->LoadFromObject(
assert_size(length_field_size, MachineType::TaggedSigned()), table,
wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset));
return BuildChangeSmiToInt32(length_smi);
}
-Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
- Node* value, Node* count) {
- Node* args[] = {
- gasm_->NumberConstant(table_index),
- BuildConvertUint32ToSmiWithSaturation(start, FLAG_wasm_max_table_size),
- value,
- BuildConvertUint32ToSmiWithSaturation(count, FLAG_wasm_max_table_size)};
-
- return BuildCallToRuntime(Runtime::kWasmTableFill, args, arraysize(args));
+void WasmGraphBuilder::TableFill(uint32_t table_index, Node* start, Node* value,
+ Node* count) {
+ gasm_->CallRuntimeStub(
+ wasm::WasmCode::kWasmTableFill,
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), start,
+ count, value);
}
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type,
Node* rtt, Vector<Node*> fields) {
- Node* s = gasm_->CallBuiltin(Builtins::kWasmAllocateStructWithRtt, rtt);
+ Node* s = gasm_->CallBuiltin(Builtins::kWasmAllocateStructWithRtt,
+ Operator::kEliminatable, rtt);
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
@@ -5586,9 +5585,9 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
position);
wasm::ValueType element_type = type->element_type();
- Node* a =
- gasm_->CallBuiltin(Builtins::kWasmAllocateArrayWithRtt, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ Node* a = gasm_->CallBuiltin(
+ Builtins::kWasmAllocateArrayWithRtt, Operator::kEliminatable, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
@@ -5597,15 +5596,15 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
// Loops need the graph's end to have been set up.
- EnsureEnd(mcgraph());
+ gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done);
- gasm_->StoreWithTaggedAlignment(a, offset, initial_value,
- type->element_type());
+ gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()), a,
+ offset, initial_value);
offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset);
}
@@ -5616,26 +5615,14 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
- return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index);
+ return gasm_->LoadFixedArrayElementPtr(maps_list, type_index);
}
Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt) {
- return gasm_->CallBuiltin(Builtins::kWasmAllocateRtt,
+ return gasm_->CallBuiltin(Builtins::kWasmAllocateRtt, Operator::kEliminatable,
Int32Constant(type_index), parent_rtt);
}
-void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
-#if DEBUG
- if (FLAG_debug_code) {
- auto ok = gasm->MakeLabel();
- gasm->GotoIfNot(condition, &ok);
- EnsureEnd(mcgraph);
- gasm->Unreachable();
- gasm->Bind(&ok);
- }
-#endif
-}
-
WasmGraphBuilder::Callbacks WasmGraphBuilder::TestCallbacks(
GraphAssemblerLabel<1>* label) {
return {// succeed_if
@@ -5760,7 +5747,7 @@ void WasmGraphBuilder::FuncCheck(Node* object, bool object_can_be_null,
BranchHint::kTrue);
}
-Node* WasmGraphBuilder::BrOnCastAbs(
+void WasmGraphBuilder::BrOnCastAbs(
Node** match_control, Node** match_effect, Node** no_match_control,
Node** no_match_effect, std::function<void(Callbacks)> type_checker) {
SmallNodeVector no_match_controls, no_match_effects, match_controls,
@@ -5786,10 +5773,6 @@ Node* WasmGraphBuilder::BrOnCastAbs(
// EffectPhis need their control dependency as an additional input.
no_match_effects.emplace_back(*no_match_control);
*no_match_effect = EffectPhi(count, no_match_effects.data());
-
- // Return value is not used, but we need it for compatibility
- // with graph-builder-interface.
- return nullptr;
}
Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
@@ -5811,15 +5794,15 @@ Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
return object;
}
-Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
- ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
- return BrOnCastAbs(match_control, match_effect, no_match_control,
- no_match_effect, [=](Callbacks callbacks) -> void {
- return TypeCheck(object, rtt, config, false, callbacks);
- });
+void WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ return TypeCheck(object, rtt, config, false, callbacks);
+ });
}
Node* WasmGraphBuilder::RefIsData(Node* object, bool object_can_be_null) {
@@ -5839,16 +5822,15 @@ Node* WasmGraphBuilder::RefAsData(Node* object, bool object_can_be_null,
return object;
}
-Node* WasmGraphBuilder::BrOnData(Node* object, Node* /*rtt*/,
- ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
- return BrOnCastAbs(match_control, match_effect, no_match_control,
- no_match_effect, [=](Callbacks callbacks) -> void {
- return DataCheck(object, config.object_can_be_null,
- callbacks);
- });
+void WasmGraphBuilder::BrOnData(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ return DataCheck(object, config.object_can_be_null, callbacks);
+ });
}
Node* WasmGraphBuilder::RefIsFunc(Node* object, bool object_can_be_null) {
@@ -5868,16 +5850,15 @@ Node* WasmGraphBuilder::RefAsFunc(Node* object, bool object_can_be_null,
return object;
}
-Node* WasmGraphBuilder::BrOnFunc(Node* object, Node* /*rtt*/,
- ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
- return BrOnCastAbs(match_control, match_effect, no_match_control,
- no_match_effect, [=](Callbacks callbacks) -> void {
- return FuncCheck(object, config.object_can_be_null,
- callbacks);
- });
+void WasmGraphBuilder::BrOnFunc(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ return FuncCheck(object, config.object_can_be_null, callbacks);
+ });
}
Node* WasmGraphBuilder::RefIsI31(Node* object) { return gasm_->IsI31(object); }
@@ -5888,20 +5869,17 @@ Node* WasmGraphBuilder::RefAsI31(Node* object,
return object;
}
-Node* WasmGraphBuilder::BrOnI31(Node* object, Node* /* rtt */,
- ObjectReferenceKnowledge /* config */,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
+void WasmGraphBuilder::BrOnI31(Node* object, Node* /* rtt */,
+ ObjectReferenceKnowledge /* config */,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
gasm_->Branch(gasm_->IsI31(object), match_control, no_match_control,
BranchHint::kTrue);
SetControl(*no_match_control);
*match_effect = effect();
*no_match_effect = effect();
-
- // Unused return value, needed for typing of BUILD in graph-builder-interface.
- return nullptr;
}
Node* WasmGraphBuilder::StructGet(Node* struct_object,
@@ -5913,23 +5891,24 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(struct_object, RefNull()), position);
}
- MachineType machine_type =
- gasm_->FieldType(struct_type, field_index, is_signed);
+ // It is not enough to invoke ValueType::machine_type(), because the
+ // signedness has to be determined by {is_signed}.
+ MachineType machine_type = MachineType::TypeForRepresentation(
+ struct_type->field(field_index).machine_representation(), is_signed);
Node* offset = gasm_->FieldOffset(struct_type, field_index);
- return gasm_->LoadWithTaggedAlignment(machine_type, struct_object, offset);
+ return gasm_->LoadFromObject(machine_type, struct_object, offset);
}
-Node* WasmGraphBuilder::StructSet(Node* struct_object,
- const wasm::StructType* struct_type,
- uint32_t field_index, Node* field_value,
- CheckForNull null_check,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::StructSet(Node* struct_object,
+ const wasm::StructType* struct_type,
+ uint32_t field_index, Node* field_value,
+ CheckForNull null_check,
+ wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(struct_object, RefNull()), position);
}
- return gasm_->StoreStructField(struct_object, struct_type, field_index,
- field_value);
+ gasm_->StoreStructField(struct_object, struct_type, field_index, field_value);
}
void WasmGraphBuilder::BoundsCheck(Node* array, Node* index,
@@ -5951,21 +5930,21 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
MachineType machine_type = MachineType::TypeForRepresentation(
type->element_type().machine_representation(), is_signed);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
- return gasm_->LoadWithTaggedAlignment(machine_type, array_object, offset);
+ return gasm_->LoadFromObject(machine_type, array_object, offset);
}
-Node* WasmGraphBuilder::ArraySet(Node* array_object,
- const wasm::ArrayType* type, Node* index,
- Node* value, CheckForNull null_check,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,
+ Node* index, Node* value,
+ CheckForNull null_check,
+ wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(array_object, RefNull()), position);
}
BoundsCheck(array_object, index, position);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
- return gasm_->StoreWithTaggedAlignment(array_object, offset, value,
- type->element_type());
+ gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()),
+ array_object, offset, value);
}
Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
@@ -6040,14 +6019,16 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
namespace {
+// A non-null {isolate} signifies that the generated code is treated as being in
+// a JS frame for functions like BuildIsolateRoot().
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
- const wasm::WasmModule* module,
+ const wasm::WasmModule* module, Isolate* isolate,
compiler::SourcePositionTable* spt,
StubCallMode stub_mode, wasm::WasmFeatures features)
- : WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt),
+ : WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt, isolate),
module_(module),
stub_mode_(stub_mode),
enabled_features_(features) {}
@@ -6083,20 +6064,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(wasm_stub,
RelocInfo::WASM_STUB_CALL)
- : GetBuiltinPointerTarget(mcgraph(), builtin_id);
+ : gasm_->GetBuiltinPointerTarget(builtin_id);
}
- Node* BuildLoadUndefinedValueFromInstance() {
- if (undefined_value_node_ == nullptr) {
- Node* isolate_root =
- LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
- undefined_value_node_ = gasm_->Load(
- MachineType::Pointer(), isolate_root,
- Int32Constant(
- IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
- }
- return undefined_value_node_.get();
- }
+ Node* UndefinedValue() { return LOAD_ROOT(UndefinedValue, undefined_value); }
Node* BuildChangeInt32ToNumber(Node* value) {
// We expect most integers at runtime to be Smis, so it is important for
@@ -6239,53 +6210,66 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
switch (type.kind()) {
case wasm::kI32:
return BuildChangeInt32ToNumber(node);
- case wasm::kS128:
- UNREACHABLE();
- case wasm::kI64: {
+ case wasm::kI64:
return BuildChangeInt64ToBigInt(node);
- }
case wasm::kF32:
return BuildChangeFloat32ToNumber(node);
case wasm::kF64:
return BuildChangeFloat64ToNumber(node);
case wasm::kRef:
- case wasm::kOptRef: {
- uint32_t representation = type.heap_representation();
- if (representation == wasm::HeapType::kExtern ||
- representation == wasm::HeapType::kFunc) {
- return node;
- }
- if (representation == wasm::HeapType::kData) {
- // TODO(7748): Update this when JS interop is settled.
- return BuildAllocateObjectWrapper(node);
- }
- if (representation == wasm::HeapType::kAny) {
- // Only wrap {node} if it is an array or struct.
- // TODO(7748): Update this when JS interop is settled.
- auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
- gasm_->GotoIfNot(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &done,
- node);
- Node* wrapped = BuildAllocateObjectWrapper(node);
- gasm_->Goto(&done, wrapped);
- gasm_->Bind(&done);
- return done.PhiAt(0);
- }
- if (type.has_index() && module_->has_signature(type.ref_index())) {
- // Typed function
- return node;
+ case wasm::kOptRef:
+ switch (type.heap_representation()) {
+ case wasm::HeapType::kExtern:
+ case wasm::HeapType::kFunc:
+ return node;
+ case wasm::HeapType::kData:
+ case wasm::HeapType::kEq:
+ case wasm::HeapType::kI31:
+ // TODO(7748): Update this when JS interop is settled.
+ if (type.kind() == wasm::kOptRef) {
+ auto done =
+ gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ // Do not wrap {null}.
+ gasm_->GotoIf(gasm_->WordEqual(node, RefNull()), &done, node);
+ gasm_->Goto(&done, BuildAllocateObjectWrapper(node));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ } else {
+ return BuildAllocateObjectWrapper(node);
+ }
+ case wasm::HeapType::kAny: {
+ // Only wrap {node} if it is an array/struct/i31, i.e., do not wrap
+ // functions and null.
+ // TODO(7748): Update this when JS interop is settled.
+ auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ gasm_->GotoIf(IsSmi(node), &done, BuildAllocateObjectWrapper(node));
+ // This includes the case where {node == null}.
+ gasm_->GotoIfNot(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &done,
+ node);
+ gasm_->Goto(&done, BuildAllocateObjectWrapper(node));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
+ default:
+ DCHECK(type.has_index());
+ if (module_->has_signature(type.ref_index())) {
+ // Typed function
+ return node;
+ }
+ // If this is reached, then IsJSCompatibleSignature() is too
+ // permissive.
+ // TODO(7748): Figure out a JS interop story for arrays and structs.
+ UNREACHABLE();
}
- // If this is reached, then IsJSCompatibleSignature() is too permissive.
- // TODO(7748): Figure out a JS interop story for arrays and structs.
- UNREACHABLE();
- }
case wasm::kRtt:
case wasm::kRttWithDepth:
- // TODO(7748): Figure out what to do for RTTs.
- UNIMPLEMENTED();
case wasm::kI8:
case wasm::kI16:
- case wasm::kStmt:
+ case wasm::kS128:
+ case wasm::kVoid:
case wasm::kBottom:
+ // If this is reached, then IsJSCompatibleSignature() is too permissive.
+ // TODO(7748): Figure out what to do for RTTs.
UNREACHABLE();
}
}
@@ -6295,29 +6279,28 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// once we have a proper WasmGC <-> JS interaction story.
Node* BuildAllocateObjectWrapper(Node* input) {
return gasm_->CallBuiltin(
- Builtins::kWasmAllocateObjectWrapper, input,
+ Builtins::kWasmAllocateObjectWrapper, Operator::kEliminatable, input,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
}
- enum UnpackFailureBehavior : bool { kReturnInput, kReturnNull };
-
- Node* BuildUnpackObjectWrapper(Node* input, UnpackFailureBehavior failure) {
+ // Assumes {input} has been checked for validity against the target wasm type.
+ // Returns the value of the property associated with
+ // {wasm_wrapped_object_symbol} in {input}, or {input} itself if the property
+ // is not found.
+ Node* BuildUnpackObjectWrapper(Node* input) {
Node* obj = gasm_->CallBuiltin(
- Builtins::kWasmGetOwnProperty, input,
- LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(
- RootIndex::kwasm_wrapped_object_symbol)),
+ Builtins::kWasmGetOwnProperty, Operator::kEliminatable, input,
+ LOAD_ROOT(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
// Invalid object wrappers (i.e. any other JS object that doesn't have the
// magic hidden property) will return {undefined}. Map that to {null} or
// {input}, depending on the value of {failure}.
- Node* undefined = BuildLoadUndefinedValueFromInstance();
+ Node* undefined = UndefinedValue();
Node* is_undefined = gasm_->WordEqual(obj, undefined);
Diamond check(graph(), mcgraph()->common(), is_undefined,
BranchHint::kFalse);
check.Chain(control());
- return check.Phi(MachineRepresentation::kTagged,
- failure == kReturnInput ? input : RefNull(), obj);
+ return check.Phi(MachineRepresentation::kTagged, input, obj);
}
Node* BuildChangeInt64ToBigInt(Node* input) {
@@ -6361,12 +6344,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ValueType type) {
// Make sure ValueType fits in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- Node* inputs[] = {instance_node_.get(), input,
+ Node* inputs[] = {GetInstance(), input,
mcgraph()->IntPtrConstant(
IntToSmi(static_cast<int>(type.raw_bit_field())))};
- Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidRefValue, js_context, inputs, 3)));
+ Node* check = BuildChangeSmiToInt32(BuildCallToRuntimeWithContext(
+ Runtime::kWasmIsValidRefValue, js_context, inputs, 3));
Diamond type_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
type_check.Chain(control());
@@ -6389,21 +6372,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::HeapType::kExtern:
return input;
case wasm::HeapType::kAny:
- // If this is a wrapper for arrays/structs, unpack it.
+ // If this is a wrapper for arrays/structs/i31s, unpack it.
// TODO(7748): Update this when JS interop has settled.
- return BuildUnpackObjectWrapper(input, kReturnInput);
+ return BuildUnpackObjectWrapper(input);
case wasm::HeapType::kFunc:
BuildCheckValidRefValue(input, js_context, type);
return input;
case wasm::HeapType::kData:
- // TODO(7748): Update this when JS interop has settled.
- BuildCheckValidRefValue(input, js_context, type);
- return BuildUnpackObjectWrapper(input, kReturnNull);
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
- // If this is reached, then IsJSCompatibleSignature() is too
- // permissive.
- UNREACHABLE();
+ // TODO(7748): Update this when JS interop has settled.
+ BuildCheckValidRefValue(input, js_context, type);
+ // This will just return {input} if the object is not wrapped, i.e.
+ // if it is null (given the check just above).
+ return BuildUnpackObjectWrapper(input);
default:
if (module_->has_signature(type.ref_index())) {
BuildCheckValidRefValue(input, js_context, type);
@@ -6428,15 +6410,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// i64 values can only come from BigInt.
return BuildChangeBigIntToInt64(input, js_context, frame_state);
- case wasm::kRtt: // TODO(7748): Implement.
+ case wasm::kRtt:
case wasm::kRttWithDepth:
case wasm::kS128:
case wasm::kI8:
case wasm::kI16:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
+ // If this is reached, then IsJSCompatibleSignature() is too permissive.
+ // TODO(7748): Figure out what to do for RTTs.
UNREACHABLE();
- break;
}
}
@@ -6449,8 +6432,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* HeapNumberToFloat64(Node* input) {
- return gasm_->Load(MachineType::Float64(), input,
- wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
+ return gasm_->LoadFromObject(
+ MachineType::Float64(), input,
+ wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
}
Node* FromJSFast(Node* input, wasm::ValueType type) {
@@ -6488,7 +6472,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kI8:
case wasm::kI16:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
UNREACHABLE();
break;
}
@@ -6497,8 +6481,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
bool new_value) {
if (FLAG_debug_code) {
- Node* flag_value =
- gasm_->Load(MachineType::Pointer(), thread_in_wasm_flag_address, 0);
+ Node* flag_value = gasm_->LoadFromObject(MachineType::Pointer(),
+ thread_in_wasm_flag_address, 0);
Node* check =
gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
@@ -6518,9 +6502,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
flag_check.merge);
}
- gasm_->Store(
- StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- thread_in_wasm_flag_address, 0, Int32Constant(new_value ? 1 : 0));
+ gasm_->StoreToObject(ObjectAccess(MachineType::Int32(), kNoWriteBarrier),
+ thread_in_wasm_flag_address, 0,
+ Int32Constant(new_value ? 1 : 0));
}
void BuildModifyThreadInWasmFlag(bool new_value) {
@@ -6528,8 +6512,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = BuildLoadIsolateRoot();
Node* thread_in_wasm_flag_address =
- gasm_->Load(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
+ gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
}
@@ -6544,8 +6528,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot();
thread_in_wasm_flag_address_ =
- gasm->Load(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
+ gasm->LoadFromObject(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
thread_in_wasm_flag_address_, true);
@@ -6567,8 +6551,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* iterable, Node* context) {
Node* length = BuildChangeUint31ToSmi(
mcgraph()->Uint32Constant(static_cast<uint32_t>(sig->return_count())));
- return gasm_->CallBuiltin(Builtins::kIterableToFixedArrayForWasm, iterable,
- length, context);
+ return gasm_->CallBuiltin(Builtins::kIterableToFixedArrayForWasm,
+ Operator::kEliminatable, iterable, length,
+ context);
}
// Generate a call to the AllocateJSArray builtin.
@@ -6577,8 +6562,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// we make sure this is true based on statically known limits.
STATIC_ASSERT(wasm::kV8MaxWasmFunctionMultiReturns <=
JSArray::kInitialMaxFastElementArray);
- return SetControl(gasm_->CallBuiltin(Builtins::kWasmAllocateJSArray,
- array_length, context));
+ return gasm_->CallBuiltin(Builtins::kWasmAllocateJSArray,
+ Operator::kEliminatable, array_length, context);
}
Node* BuildCallAndReturn(bool is_import, Node* js_context,
@@ -6620,13 +6605,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* jsval;
if (sig_->return_count() == 0) {
- // We do not use {BuildLoadUndefinedValueFromInstance} here because it
- // would create an invalid graph.
- Node* isolate_root =
- LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
- jsval = gasm_->Load(
- MachineType::Pointer(), isolate_root,
- IsolateData::root_slot_offset(RootIndex::kUndefinedValue));
+ jsval = UndefinedValue();
} else if (sig_->return_count() == 1) {
jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion()
? rets[0]
@@ -6641,7 +6620,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < return_count; ++i) {
Node* value = ToJS(rets[i], sig_->GetReturn(i));
- STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value);
+ gasm_->StoreFixedArrayElementAny(fixed_array, i, value);
}
}
return jsval;
@@ -6661,7 +6640,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kI8:
case wasm::kI16:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
return false;
case wasm::kI32:
case wasm::kF32:
@@ -6691,12 +6670,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kF64: {
auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done);
- Node* map =
- gasm_->Load(MachineType::TaggedPointer(), input,
- wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
- Node* heap_number_map = LOAD_FULL_POINTER(
- BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(RootIndex::kHeapNumberMap));
+ Node* map = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), input,
+ wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
+ Node* heap_number_map = LOAD_ROOT(HeapNumberMap, heap_number_map);
Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
gasm_->GotoIf(is_heap_number, &done);
gasm_->Goto(slow_path);
@@ -6712,7 +6689,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kI8:
case wasm::kI16:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
UNREACHABLE();
break;
}
@@ -6724,25 +6701,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
const int wasm_param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
- SetEffectControl(Start(wasm_param_count + 5));
+ Start(wasm_param_count + 5);
// Create the js_closure and js_context parameters.
- Node* js_closure =
- graph()->NewNode(mcgraph()->common()->Parameter(
- Linkage::kJSCallClosureParamIndex, "%closure"),
- graph()->start());
- Node* js_context = graph()->NewNode(
- mcgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(wasm_param_count + 1),
- "%context"),
- graph()->start());
-
- // Create the instance_node node to pass as parameter. It is loaded from
- // an actual reference to an instance or a placeholder reference,
- // called {WasmExportedFunction} via the {WasmExportedFunctionData}
- // structure.
+ Node* js_closure = Param(Linkage::kJSCallClosureParamIndex, "%closure");
+ Node* js_context = Param(
+ Linkage::GetJSCallContextParamIndex(wasm_param_count + 1), "%context");
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(js_closure);
- instance_node_.set(gasm_->LoadExportedFunctionInstance(function_data));
if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
// Throw a TypeError. Use the js_context of the calling javascript
@@ -6830,9 +6795,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* undefined_node) {
// Check function strict bit.
Node* shared_function_info = gasm_->LoadSharedFunctionInfo(callable_node);
- Node* flags =
- gasm_->Load(MachineType::Int32(), shared_function_info,
- wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
+ Node* flags = gasm_->LoadFromObject(
+ MachineType::Int32(), shared_function_info,
+ wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
Node* strict_check =
Binop(wasm::kExprI32And, flags,
Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
@@ -6843,8 +6808,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BranchHint::kNone);
Node* old_effect = effect();
SetControl(strict_d.if_false);
- Node* global_proxy =
- LOAD_FIXED_ARRAY_SLOT_PTR(native_context, Context::GLOBAL_PROXY_INDEX);
+ Node* global_proxy = gasm_->LoadFixedArrayElementPtr(
+ native_context, Context::GLOBAL_PROXY_INDEX);
SetEffectControl(strict_d.EffectPhi(old_effect, global_proxy),
strict_d.merge);
return strict_d.Phi(MachineRepresentation::kTagged, undefined_node,
@@ -6855,9 +6820,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- SetEffectControl(Start(wasm_count + 4));
-
- instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
+ Start(wasm_count + 4);
Node* native_context =
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer());
@@ -6875,7 +6838,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The callable is passed as the last parameter, after Wasm arguments.
Node* callable_node = Param(wasm_count + 1);
- Node* undefined_node = BuildLoadUndefinedValueFromInstance();
+ Node* undefined_node = UndefinedValue();
Node* call = nullptr;
@@ -6953,7 +6916,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
args[pos++] =
- GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
+ gasm_->GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
args[pos++] = callable_node;
args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = undefined_node; // receiver
@@ -6998,7 +6961,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context);
base::SmallVector<Node*, 8> wasm_values(sig_->return_count());
for (unsigned i = 0; i < sig_->return_count(); ++i) {
- wasm_values[i] = FromJS(LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i),
+ wasm_values[i] = FromJS(gasm_->LoadFixedArrayElementAny(fixed_array, i),
native_context, sig_->GetReturn(i));
}
BuildModifyThreadInWasmFlag(true);
@@ -7049,8 +7012,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildModifyThreadInWasmFlag(false);
Node* isolate_root = BuildLoadIsolateRoot();
Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
- STORE_RAW(isolate_root, Isolate::c_entry_fp_offset(), fp_value,
- MachineType::PointerRepresentation(), kNoWriteBarrier);
+ gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ isolate_root, Isolate::c_entry_fp_offset(), fp_value);
// TODO(jkummerow): Load the address from the {host_data}, and cache
// wrappers per signature.
@@ -7106,22 +7070,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
}
- void BuildJSToJSWrapper(Isolate* isolate) {
+ void BuildJSToJSWrapper() {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
int param_count = 1 /* closure */ + 1 /* receiver */ + wasm_count +
1 /* new.target */ + 1 /* #arg */ + 1 /* context */;
- SetEffectControl(Start(param_count));
+ Start(param_count);
Node* closure = Param(Linkage::kJSCallClosureParamIndex);
Node* context = Param(Linkage::GetJSCallContextParamIndex(wasm_count + 1));
- // Since JS-to-JS wrappers are specific to one Isolate, it is OK to embed
- // values (for undefined and root) directly into the instruction stream.
- isolate_root_node_ = mcgraph()->IntPtrConstant(isolate->isolate_root());
- undefined_value_node_ = graph()->NewNode(mcgraph()->common()->HeapConstant(
- isolate->factory()->undefined_value()));
-
// Throw a TypeError if the signature is incompatible with JavaScript.
if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, context,
@@ -7132,18 +7090,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load the original callable from the closure.
Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure);
- Node* callable = LOAD_TAGGED_ANY(
- func_data,
+ Node* callable = gasm_->LoadFromObject(
+ MachineType::AnyTagged(), func_data,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// Call the underlying closure.
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] =
- GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
+ args[pos++] = gasm_->GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = Int32Constant(wasm_count); // argument count
- args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver
+ args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = UndefinedValue(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
@@ -7167,7 +7124,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert return JS values to wasm numbers and back to JS values.
Node* jsval;
if (sig_->return_count() == 0) {
- jsval = BuildLoadUndefinedValueFromInstance();
+ jsval = UndefinedValue();
} else if (sig_->return_count() == 1) {
jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
} else {
@@ -7179,9 +7136,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval);
for (unsigned i = 0; i < sig_->return_count(); ++i) {
const auto& type = sig_->GetReturn(i);
- Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i);
+ Node* elem = gasm_->LoadFixedArrayElementAny(fixed_array, i);
Node* cast = ToJS(FromJS(elem, context, type), type);
- STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast);
+ gasm_->StoreFixedArrayElementAny(result_fixed_array, i, cast);
}
}
Return(jsval);
@@ -7189,7 +7146,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildCWasmEntry() {
// +1 offset for first parameter index being -1.
- SetEffectControl(Start(CWasmEntryParameters::kNumParameters + 1));
+ Start(CWasmEntryParameters::kNumParameters + 1);
Node* code_entry = Param(CWasmEntryParameters::kCodeEntry);
Node* object_ref = Param(CWasmEntryParameters::kObjectRef);
@@ -7197,9 +7154,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp);
Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
- STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
- c_entry_fp, MachineType::PointerRepresentation(),
- kNoWriteBarrier);
+ gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
+ c_entry_fp);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
base::SmallVector<Node*, 16> args(wasm_arg_count + 4);
@@ -7265,7 +7223,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
};
Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
- mcgraph()->common(), mcgraph()->zone(), &c_entry_sig);
+ mcgraph()->common(), gasm_->simplified(),
+ mcgraph()->zone(), &c_entry_sig);
r.LowerGraph();
}
}
@@ -7273,7 +7232,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
private:
const wasm::WasmModule* module_;
StubCallMode stub_mode_;
- SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> int32_to_heapnumber_operator_;
SetOncePointer<const Operator> tagged_non_smi_to_int32_operator_;
SetOncePointer<const Operator> float32_to_number_operator_;
@@ -7288,11 +7246,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildInlinedJSToWasmWrapper(
Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
- const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
- StubCallMode stub_mode, wasm::WasmFeatures features,
- const JSWasmCallData* js_wasm_call_data, Node* frame_state) {
- WasmWrapperGraphBuilder builder(zone, mcgraph, signature, module, spt,
- stub_mode, features);
+ const wasm::WasmModule* module, Isolate* isolate,
+ compiler::SourcePositionTable* spt, StubCallMode stub_mode,
+ wasm::WasmFeatures features, const JSWasmCallData* js_wasm_call_data,
+ Node* frame_state) {
+ WasmWrapperGraphBuilder builder(zone, mcgraph, signature, module, isolate,
+ spt, stub_mode, features);
builder.BuildJSToWasmWrapper(false, js_wasm_call_data, frame_state);
}
@@ -7313,8 +7272,8 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
InstructionSelector::AlignmentRequirements());
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
- StubCallMode::kCallBuiltinPointer,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, isolate,
+ nullptr, StubCallMode::kCallBuiltinPointer,
enabled_features);
builder.BuildJSToWasmWrapper(is_import);
@@ -7525,9 +7484,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
source_positions);
// Set up the graph start.
- Node* start = builder.Start(static_cast<int>(sig->parameter_count() + 1 + 1));
- builder.SetEffectControl(start);
- builder.set_instance_node(builder.Param(wasm::kWasmInstanceParameterIndex));
+ builder.Start(static_cast<int>(sig->parameter_count() + 1 + 1));
// Generate either a unop or a binop.
Node* node = nullptr;
@@ -7593,7 +7550,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
source_positions ? zone.New<SourcePositionTable>(graph) : nullptr;
WasmWrapperGraphBuilder builder(
- &zone, mcgraph, sig, env->module, source_position_table,
+ &zone, mcgraph, sig, env->module, nullptr, source_position_table,
StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
builder.BuildWasmToJSWrapper(kind, expected_arity);
@@ -7638,16 +7595,14 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
InstructionSelector::AlignmentRequirements()));
WasmWrapperGraphBuilder builder(
- &zone, mcgraph, sig, native_module->module(), source_positions,
+ &zone, mcgraph, sig, native_module->module(), nullptr, source_positions,
StubCallMode::kCallWasmRuntimeStub, native_module->enabled_features());
// Set up the graph start.
int param_count = static_cast<int>(sig->parameter_count()) +
1 /* offset for first parameter index being -1 */ +
1 /* Wasm instance */ + 1 /* kExtraCallableParam */;
- Node* start = builder.Start(param_count);
- builder.SetEffectControl(start);
- builder.set_instance_node(builder.Param(wasm::kWasmInstanceParameterIndex));
+ builder.Start(param_count);
builder.BuildCapiCallWrapper(address);
// Run the compiler pipeline to generate machine code.
@@ -7689,7 +7644,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, nullptr,
- StubCallMode::kCallWasmRuntimeStub,
+ nullptr, StubCallMode::kCallWasmRuntimeStub,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildWasmToJSWrapper(kind, expected_arity);
@@ -7736,10 +7691,10 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
InstructionSelector::AlignmentRequirements());
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
- StubCallMode::kCallBuiltinPointer,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, isolate,
+ nullptr, StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
- builder.BuildJSToJSWrapper(isolate);
+ builder.BuildJSToJSWrapper();
int wasm_count = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
@@ -7783,7 +7738,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
- StubCallMode::kCallBuiltinPointer,
+ nullptr, StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildCWasmEntry();
@@ -7829,14 +7784,15 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
const wasm::FunctionBody& func_body,
int func_index, wasm::WasmFeatures* detected,
MachineGraph* mcgraph,
+ std::vector<compiler::WasmLoopInfo>* loop_infos,
NodeOriginTable* node_origins,
SourcePositionTable* source_positions) {
// Create a TF graph during decoding.
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
- wasm::VoidResult graph_construction_result =
- wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
- &builder, detected, func_body, node_origins);
+ wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
+ allocator, env->enabled_features, env->module, &builder, detected,
+ func_body, loop_infos, node_origins);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7852,7 +7808,8 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
WasmGraphBuilder::kCalledFromWasm);
if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
- SimdScalarLowering(mcgraph, sig).LowerGraph();
+ SimplifiedOperatorBuilder simplified(mcgraph->zone());
+ SimdScalarLowering(mcgraph, &simplified, sig).LowerGraph();
// SimdScalarLowering changes all v128 to 4 i32, so update the machine
// signature for the call to LowerInt64.
@@ -7943,9 +7900,12 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
: nullptr;
SourcePositionTable* source_positions =
mcgraph->zone()->New<SourcePositionTable>(mcgraph->graph());
+
+ std::vector<WasmLoopInfo> loop_infos;
+
if (!BuildGraphForWasmFunction(wasm_engine->allocator(), env, func_body,
- func_index, detected, mcgraph, node_origins,
- source_positions)) {
+ func_index, detected, mcgraph, &loop_infos,
+ node_origins, source_positions)) {
return wasm::WasmCompilationResult{};
}
@@ -7966,7 +7926,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
Pipeline::GenerateCodeForWasmFunction(
&info, wasm_engine, mcgraph, call_descriptor, source_positions,
- node_origins, func_body, env->module, func_index);
+ node_origins, func_body, env->module, func_index, &loop_infos);
if (counters) {
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
@@ -7984,8 +7944,9 @@ class LinkageLocationAllocator {
public:
template <size_t kNumGpRegs, size_t kNumFpRegs>
constexpr LinkageLocationAllocator(const Register (&gp)[kNumGpRegs],
- const DoubleRegister (&fp)[kNumFpRegs])
- : allocator_(wasm::LinkageAllocator(gp, fp)) {}
+ const DoubleRegister (&fp)[kNumFpRegs],
+ int slot_offset)
+ : allocator_(wasm::LinkageAllocator(gp, fp)), slot_offset_(slot_offset) {}
LinkageLocation Next(MachineRepresentation rep) {
MachineType type = MachineType::TypeForRepresentation(rep);
@@ -7999,15 +7960,19 @@ class LinkageLocationAllocator {
return LinkageLocation::ForRegister(reg_code, type);
}
// Cannot use register; use stack slot.
- int index = -1 - allocator_.NextStackSlot(rep);
+ int index = -1 - (slot_offset_ + allocator_.NextStackSlot(rep));
return LinkageLocation::ForCallerFrameSlot(index, type);
}
- void SetStackOffset(int offset) { allocator_.SetStackOffset(offset); }
int NumStackSlots() const { return allocator_.NumStackSlots(); }
+ void EndSlotArea() { allocator_.EndSlotArea(); }
private:
wasm::LinkageAllocator allocator_;
+ // Since params and returns are in different stack frames, we must allocate
+ // them separately. Parameter slots don't need an offset, but return slots
+ // must be offset to just before the param slots, using this |slot_offset_|.
+ int slot_offset_;
};
} // namespace
@@ -8025,8 +7990,8 @@ CallDescriptor* GetWasmCallDescriptor(
fsig->parameter_count() + extra_params);
// Add register and/or stack parameter(s).
- LinkageLocationAllocator params(wasm::kGpParamRegisters,
- wasm::kFpParamRegisters);
+ LinkageLocationAllocator params(
+ wasm::kGpParamRegisters, wasm::kFpParamRegisters, 0 /* no slot offset */);
// The instance object.
locations.AddParam(params.Next(MachineRepresentation::kTaggedPointer));
@@ -8043,6 +8008,10 @@ CallDescriptor* GetWasmCallDescriptor(
auto l = params.Next(param);
locations.AddParamAt(i + param_offset, l);
}
+
+ // End the untagged area, so tagged slots come after.
+ params.EndSlotArea();
+
for (size_t i = 0; i < parameter_count; i++) {
MachineRepresentation param = fsig->GetParam(i).machine_representation();
// Skip untagged parameters.
@@ -8058,22 +8027,20 @@ CallDescriptor* GetWasmCallDescriptor(
kJSFunctionRegister.code(), MachineType::TaggedPointer()));
}
+ int parameter_slots = AddArgumentPaddingSlots(params.NumStackSlots());
+
// Add return location(s).
LinkageLocationAllocator rets(wasm::kGpReturnRegisters,
- wasm::kFpReturnRegisters);
-
- int parameter_slots = params.NumStackSlots();
- if (ShouldPadArguments(parameter_slots)) parameter_slots++;
-
- rets.SetStackOffset(parameter_slots);
+ wasm::kFpReturnRegisters, parameter_slots);
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
MachineRepresentation ret = fsig->GetReturn(i).machine_representation();
- auto l = rets.Next(ret);
- locations.AddReturn(l);
+ locations.AddReturn(rets.Next(ret));
}
+ int return_slots = rets.NumStackSlots();
+
const RegList kCalleeSaveRegisters = 0;
const RegList kCalleeSaveFPRegisters = 0;
@@ -8100,7 +8067,7 @@ CallDescriptor* GetWasmCallDescriptor(
target_type, // target MachineType
target_loc, // target location
locations.Build(), // location_sig
- parameter_slots, // stack_parameter_count
+ parameter_slots, // parameter slot count
compiler::Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
@@ -8108,7 +8075,7 @@ CallDescriptor* GetWasmCallDescriptor(
"wasm-call", // debug name
StackArgumentOrder::kDefault, // order of the arguments in the stack
0, // allocatable registers
- rets.NumStackSlots() - parameter_slots); // stack_return_count
+ return_slots); // return slot count
}
namespace {
@@ -8141,8 +8108,9 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
(call_descriptor->GetInputLocation(call_descriptor->InputCount() - 1) ==
LinkageLocation::ForRegister(kJSFunctionRegister.code(),
MachineType::TaggedPointer()));
- LinkageLocationAllocator params(wasm::kGpParamRegisters,
- wasm::kFpParamRegisters);
+ LinkageLocationAllocator params(
+ wasm::kGpParamRegisters, wasm::kFpParamRegisters, 0 /* no slot offset */);
+
for (size_t i = 0, e = call_descriptor->ParameterCount() -
(has_callable_param ? 1 : 0);
i < e; i++) {
@@ -8160,9 +8128,11 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
kJSFunctionRegister.code(), MachineType::TaggedPointer()));
}
+ int parameter_slots = AddArgumentPaddingSlots(params.NumStackSlots());
+
LinkageLocationAllocator rets(wasm::kGpReturnRegisters,
- wasm::kFpReturnRegisters);
- rets.SetStackOffset(params.NumStackSlots());
+ wasm::kFpReturnRegisters, parameter_slots);
+
for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
if (call_descriptor->GetReturnType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
@@ -8174,20 +8144,22 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
}
}
- return zone->New<CallDescriptor>( // --
- call_descriptor->kind(), // kind
- call_descriptor->GetInputType(0), // target MachineType
- call_descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.NumStackSlots(), // stack_parameter_count
- call_descriptor->properties(), // properties
- call_descriptor->CalleeSavedRegisters(), // callee-saved registers
- call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- call_descriptor->flags(), // flags
- call_descriptor->debug_name(), // debug name
- call_descriptor->GetStackArgumentOrder(), // stack order
- call_descriptor->AllocatableRegisters(), // allocatable registers
- rets.NumStackSlots() - params.NumStackSlots()); // stack_return_count
+ int return_slots = rets.NumStackSlots();
+
+ return zone->New<CallDescriptor>( // --
+ call_descriptor->kind(), // kind
+ call_descriptor->GetInputType(0), // target MachineType
+ call_descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ parameter_slots, // parameter slot count
+ call_descriptor->properties(), // properties
+ call_descriptor->CalleeSavedRegisters(), // callee-saved registers
+ call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ call_descriptor->flags(), // flags
+ call_descriptor->debug_name(), // debug name
+ call_descriptor->GetStackArgumentOrder(), // stack order
+ call_descriptor->AllocatableRegisters(), // allocatable registers
+ return_slots); // return slot count
}
} // namespace
@@ -8223,18 +8195,9 @@ AssemblerOptions WasmStubAssemblerOptions() {
#undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE
-#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD
-#undef LOAD_TAGGED_POINTER
-#undef LOAD_TAGGED_ANY
-#undef LOAD_FIXED_ARRAY_SLOT
-#undef LOAD_FIXED_ARRAY_SLOT_SMI
-#undef LOAD_FIXED_ARRAY_SLOT_PTR
-#undef LOAD_FIXED_ARRAY_SLOT_ANY
-#undef STORE_RAW
-#undef STORE_RAW_NODE_OFFSET
-#undef STORE_FIXED_ARRAY_SLOT_SMI
-#undef STORE_FIXED_ARRAY_SLOT_ANY
+#undef LOAD_MUTABLE_INSTANCE_FIELD
+#undef LOAD_ROOT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index e6614f1c67f..a77e76a05d9 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_COMPILER_WASM_COMPILER_H_
#define V8_COMPILER_WASM_COMPILER_H_
@@ -177,6 +181,17 @@ struct WasmInstanceCacheNodes {
Node* mem_mask;
};
+struct WasmLoopInfo {
+ Node* header;
+ uint32_t nesting_depth;
+ bool is_innermost;
+
+ WasmLoopInfo(Node* header, uint32_t nesting_depth, bool is_innermost)
+ : header(header),
+ nesting_depth(nesting_depth),
+ is_innermost(is_innermost) {}
+};
+
// Abstracts details of building TurboFan graph nodes for wasm to separate
// the wasm decoder from the internal details of TurboFan.
class WasmGraphBuilder {
@@ -206,21 +221,22 @@ class WasmGraphBuilder {
V8_EXPORT_PRIVATE WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
- compiler::SourcePositionTable* spt = nullptr);
+ compiler::SourcePositionTable* spt = nullptr)
+ : WasmGraphBuilder(env, zone, mcgraph, sig, spt, nullptr) {}
V8_EXPORT_PRIVATE ~WasmGraphBuilder();
//-----------------------------------------------------------------------
// Operations independent of {control} or {effect}.
//-----------------------------------------------------------------------
- Node* Start(unsigned params);
- Node* Param(unsigned index);
+ void Start(unsigned params);
+ Node* Param(int index, const char* debug_name = nullptr);
Node* Loop(Node* entry);
- Node* TerminateLoop(Node* effect, Node* control);
+ void TerminateLoop(Node* effect, Node* control);
Node* LoopExit(Node* loop_node);
// Assumes current control() is the corresponding loop exit.
Node* LoopExitValue(Node* value, MachineRepresentation representation);
- Node* TerminateThrow(Node* effect, Node* control);
+ void TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
template <typename... Nodes>
Node* Merge(Node* fst, Nodes*... args);
@@ -266,18 +282,21 @@ class WasmGraphBuilder {
Node* BranchNoHint(Node* cond, Node** true_node, Node** false_node);
Node* BranchExpectFalse(Node* cond, Node** true_node, Node** false_node);
- Node* TrapIfTrue(wasm::TrapReason reason, Node* cond,
+ void TrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position);
+ void TrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position);
- Node* TrapIfFalse(wasm::TrapReason reason, Node* cond,
- wasm::WasmCodePosition position);
- Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+ Node* Select(Node *cond, Node* true_node, Node* false_node,
+ wasm::ValueType type);
+
+ void TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+ wasm::WasmCodePosition position);
+ void ZeroCheck32(wasm::TrapReason reason, Node* node,
wasm::WasmCodePosition position);
- Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position);
- Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+ void TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+ wasm::WasmCodePosition position);
+ void ZeroCheck64(wasm::TrapReason reason, Node* node,
wasm::WasmCodePosition position);
- Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position);
Node* Switch(unsigned count, Node* key);
Node* IfValue(int32_t value, Node* sw);
@@ -289,10 +308,10 @@ class WasmGraphBuilder {
return Return(ArrayVector(arr));
}
- Node* TraceFunctionEntry(wasm::WasmCodePosition position);
- Node* TraceFunctionExit(Vector<Node*> vals, wasm::WasmCodePosition position);
+ void TraceFunctionEntry(wasm::WasmCodePosition position);
+ void TraceFunctionExit(Vector<Node*> vals, wasm::WasmCodePosition position);
- Node* Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
+ void Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position);
@@ -309,26 +328,22 @@ class WasmGraphBuilder {
Node* ReturnCallRef(uint32_t sig_index, Vector<Node*> args,
CheckForNull null_check, wasm::WasmCodePosition position);
- // Return value is not expected to be used,
- // but we need it for compatibility with graph-builder-interface.
- Node* BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
+ void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
Node* Invert(Node* node);
Node* GlobalGet(uint32_t index);
- Node* GlobalSet(uint32_t index, Node* val);
+ void GlobalSet(uint32_t index, Node* val);
Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
- Node* TableSet(uint32_t table_index, Node* index, Node* val,
- wasm::WasmCodePosition position);
+ void TableSet(uint32_t table_index, Node* index, Node* val,
+ wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
//-----------------------------------------------------------------------
Node* CurrentMemoryPages();
- Node* TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
- uintptr_t offset, wasm::WasmCodePosition);
- Node* Prefetch(Node* index, uint64_t offset, uint32_t alignment,
- bool temporal);
+ void TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
+ uintptr_t offset, wasm::WasmCodePosition);
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
@@ -345,18 +360,14 @@ class WasmGraphBuilder {
Node* LoadLane(wasm::ValueType type, MachineType memtype, Node* value,
Node* index, uint64_t offset, uint32_t alignment,
uint8_t laneidx, wasm::WasmCodePosition position);
- Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint64_t offset,
- uint32_t alignment, Node* val, wasm::WasmCodePosition position,
- wasm::ValueType type);
- Node* StoreLane(MachineRepresentation mem_rep, Node* index, uint64_t offset,
- uint32_t alignment, Node* val, uint8_t laneidx,
- wasm::WasmCodePosition position, wasm::ValueType type);
+ void StoreMem(MachineRepresentation mem_rep, Node* index, uint64_t offset,
+ uint32_t alignment, Node* val, wasm::WasmCodePosition position,
+ wasm::ValueType type);
+ void StoreLane(MachineRepresentation mem_rep, Node* index, uint64_t offset,
+ uint32_t alignment, Node* val, uint8_t laneidx,
+ wasm::WasmCodePosition position, wasm::ValueType type);
static void PrintDebugName(Node* node);
- void set_instance_node(Node* instance_node) {
- this->instance_node_ = instance_node;
- }
-
Node* effect();
Node* control();
Node* SetEffect(Node* node);
@@ -397,8 +408,6 @@ class WasmGraphBuilder {
V8_EXPORT_PRIVATE void LowerInt64(Signature<MachineRepresentation>* sig);
V8_EXPORT_PRIVATE void LowerInt64(CallOrigin origin);
- V8_EXPORT_PRIVATE void SimdScalarLoweringForTesting();
-
void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
Node* S128Zero();
@@ -415,33 +424,33 @@ class WasmGraphBuilder {
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint64_t offset,
wasm::WasmCodePosition position);
- Node* AtomicFence();
+ void AtomicFence();
- Node* MemoryInit(uint32_t data_segment_index, Node* dst, Node* src,
- Node* size, wasm::WasmCodePosition position);
- Node* MemoryCopy(Node* dst, Node* src, Node* size,
- wasm::WasmCodePosition position);
- Node* DataDrop(uint32_t data_segment_index, wasm::WasmCodePosition position);
- Node* MemoryFill(Node* dst, Node* fill, Node* size,
- wasm::WasmCodePosition position);
+ void MemoryInit(uint32_t data_segment_index, Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position);
+ void MemoryCopy(Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position);
+ void DataDrop(uint32_t data_segment_index, wasm::WasmCodePosition position);
+ void MemoryFill(Node* dst, Node* fill, Node* size,
+ wasm::WasmCodePosition position);
- Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
- Node* src, Node* size, wasm::WasmCodePosition position);
- Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
- Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst,
- Node* src, Node* size, wasm::WasmCodePosition position);
+ void TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
+ Node* src, Node* size, wasm::WasmCodePosition position);
+ void ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
+ void TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst,
+ Node* src, Node* size, wasm::WasmCodePosition position);
Node* TableGrow(uint32_t table_index, Node* value, Node* delta);
Node* TableSize(uint32_t table_index);
- Node* TableFill(uint32_t table_index, Node* start, Node* value, Node* count);
+ void TableFill(uint32_t table_index, Node* start, Node* value, Node* count);
Node* StructNewWithRtt(uint32_t struct_index, const wasm::StructType* type,
Node* rtt, Vector<Node*> fields);
Node* StructGet(Node* struct_object, const wasm::StructType* struct_type,
uint32_t field_index, CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position);
- Node* StructSet(Node* struct_object, const wasm::StructType* struct_type,
- uint32_t field_index, Node* value, CheckForNull null_check,
- wasm::WasmCodePosition position);
+ void StructSet(Node* struct_object, const wasm::StructType* struct_type,
+ uint32_t field_index, Node* value, CheckForNull null_check,
+ wasm::WasmCodePosition position);
Node* ArrayNewWithRtt(uint32_t array_index, const wasm::ArrayType* type,
Node* length, Node* initial_value, Node* rtt,
wasm::WasmCodePosition position);
@@ -449,9 +458,9 @@ class WasmGraphBuilder {
Node* ArrayGet(Node* array_object, const wasm::ArrayType* type, Node* index,
CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position);
- Node* ArraySet(Node* array_object, const wasm::ArrayType* type, Node* index,
- Node* value, CheckForNull null_check,
- wasm::WasmCodePosition position);
+ void ArraySet(Node* array_object, const wasm::ArrayType* type, Node* index,
+ Node* value, CheckForNull null_check,
+ wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position);
Node* I31New(Node* input);
@@ -463,26 +472,26 @@ class WasmGraphBuilder {
Node* RefTest(Node* object, Node* rtt, ObjectReferenceKnowledge config);
Node* RefCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
wasm::WasmCodePosition position);
- Node* BrOnCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control, Node** no_match_effect);
+ void BrOnCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
Node* RefIsData(Node* object, bool object_can_be_null);
Node* RefAsData(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position);
- Node* BrOnData(Node* object, Node* rtt, ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control, Node** no_match_effect);
+ void BrOnData(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
Node* RefIsFunc(Node* object, bool object_can_be_null);
Node* RefAsFunc(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position);
- Node* BrOnFunc(Node* object, Node* rtt, ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control, Node** no_match_effect);
- Node* RefIsI31(Node* object);
- Node* RefAsI31(Node* object, wasm::WasmCodePosition position);
- Node* BrOnI31(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ void BrOnFunc(Node* object, Node* rtt, ObjectReferenceKnowledge config,
Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect);
+ Node* RefIsI31(Node* object);
+ Node* RefAsI31(Node* object, wasm::WasmCodePosition position);
+ void BrOnI31(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
bool has_simd() const { return has_simd_; }
@@ -499,8 +508,15 @@ class WasmGraphBuilder {
void RemoveBytecodePositionDecorator();
protected:
+ V8_EXPORT_PRIVATE WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone,
+ MachineGraph* mcgraph,
+ const wasm::FunctionSig* sig,
+ compiler::SourcePositionTable* spt,
+ Isolate* isolate);
+
Node* NoContextConstant();
+ Node* GetInstance();
Node* BuildLoadIsolateRoot();
// MemBuffer is only called with valid offsets (after bounds checking), so the
@@ -518,9 +534,9 @@ class WasmGraphBuilder {
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
- wasm::ValueType wasmtype = wasm::kWasmStmt);
+ wasm::ValueType wasmtype = wasm::kWasmVoid);
Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
- wasm::ValueType wasmtype = wasm::kWasmStmt);
+ wasm::ValueType wasmtype = wasm::kWasmVoid);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -650,9 +666,9 @@ class WasmGraphBuilder {
void DataCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
void FuncCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
- Node* BrOnCastAbs(Node** match_control, Node** match_effect,
- Node** no_match_control, Node** no_match_effect,
- std::function<void(Callbacks)> type_checker);
+ void BrOnCastAbs(Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect,
+ std::function<void(Callbacks)> type_checker);
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
@@ -712,16 +728,14 @@ class WasmGraphBuilder {
MachineGraph* const mcgraph_;
wasm::CompilationEnv* const env_;
+ Node** parameters_;
+
WasmInstanceCacheNodes* instance_cache_ = nullptr;
- SetOncePointer<Node> instance_node_;
- SetOncePointer<Node> ref_null_node_;
- SetOncePointer<Node> globals_start_;
- SetOncePointer<Node> imported_mutable_globals_;
SetOncePointer<Node> stack_check_code_node_;
- SetOncePointer<Node> isolate_root_node_;
SetOncePointer<const Operator> stack_check_call_operator_;
+ bool use_js_isolate_and_params() const { return isolate_ != nullptr; }
bool has_simd_ = false;
bool needs_stack_check_ = false;
const bool untrusted_code_mitigations_ = true;
@@ -731,6 +745,8 @@ class WasmGraphBuilder {
compiler::WasmDecorator* decorator_ = nullptr;
compiler::SourcePositionTable* const source_position_table_ = nullptr;
+ Isolate* const isolate_;
+ SetOncePointer<Node> instance_node_;
std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
CallDescriptor* i32_atomic_wait_descriptor_ = nullptr;
@@ -741,9 +757,10 @@ enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
- const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
- StubCallMode stub_mode, wasm::WasmFeatures features,
- const JSWasmCallData* js_wasm_call_data, Node* frame_state);
+ const wasm::WasmModule* module, Isolate* isolate,
+ compiler::SourcePositionTable* spt, StubCallMode stub_mode,
+ wasm::WasmFeatures features, const JSWasmCallData* js_wasm_call_data,
+ Node* frame_state);
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
diff --git a/deps/v8/src/d8/OWNERS b/deps/v8/src/d8/OWNERS
index 03aa62dda98..a96bac9f5da 100644
--- a/deps/v8/src/d8/OWNERS
+++ b/deps/v8/src/d8/OWNERS
@@ -1,6 +1,5 @@
-binji@chromium.org
-bmeurer@chromium.org
cbruni@chromium.org
clemensb@chromium.org
+marja@chromium.org
ulan@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
new file mode 100644
index 00000000000..e5eb5e419bf
--- /dev/null
+++ b/deps/v8/src/d8/d8-test.cc
@@ -0,0 +1,230 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/d8/d8.h"
+
+#include "include/v8-fast-api-calls.h"
+
+// This file exposes a d8.test.fast_c_api object, which adds testing facility
+// for writing mjsunit tests that exercise fast API calls. The fast_c_api object
+// contains an `add_all` method with the following signature:
+// double add_all(bool /*should_fallback*/, int32_t, uint32_t,
+// int64_t, uint64_t, float, double), that is wired as a "fast API" method.
+// The fast_c_api object also supports querying the number of fast/slow calls
+// and resetting these counters.
+
+// Make sure to sync the following with src/compiler/globals.h.
+#if defined(V8_TARGET_ARCH_X64)
+#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+#endif
+
+namespace v8 {
+namespace {
+class FastCApiObject {
+ public:
+ static double AddAllFastCallback(ApiObject receiver, bool should_fallback,
+ int32_t arg_i32, uint32_t arg_u32,
+ int64_t arg_i64, uint64_t arg_u64,
+ float arg_f32, double arg_f64,
+ FastApiCallbackOptions& options) {
+ Value* receiver_value = reinterpret_cast<Value*>(&receiver);
+ CHECK(receiver_value->IsObject());
+ FastCApiObject* self = UnwrapObject(Object::Cast(receiver_value));
+ self->fast_call_count_++;
+
+ if (should_fallback) {
+ options.fallback = 1;
+ return 0;
+ }
+
+ return static_cast<double>(arg_i32) + static_cast<double>(arg_u32) +
+ static_cast<double>(arg_i64) + static_cast<double>(arg_u64) +
+ static_cast<double>(arg_f32) + arg_f64;
+ }
+ static void AddAllSlowCallback(const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+
+ FastCApiObject* self = UnwrapObject(*args.This());
+ self->slow_call_count_++;
+
+ HandleScope handle_scope(isolate);
+
+ double sum = 0;
+ if (args.Length() > 1) {
+ sum += args[1]->Int32Value(isolate->GetCurrentContext()).FromJust();
+ }
+ if (args.Length() > 2) {
+ sum += args[2]->Uint32Value(isolate->GetCurrentContext()).FromJust();
+ }
+ if (args.Length() > 3) {
+ sum += args[3]->IntegerValue(isolate->GetCurrentContext()).FromJust();
+ }
+ if (args.Length() > 4) {
+ sum += args[4]->IntegerValue(isolate->GetCurrentContext()).FromJust();
+ }
+ if (args.Length() > 5) {
+ sum += args[5]->NumberValue(isolate->GetCurrentContext()).FromJust();
+ } else {
+ sum += std::numeric_limits<double>::quiet_NaN();
+ }
+ if (args.Length() > 6) {
+ sum += args[6]->NumberValue(isolate->GetCurrentContext()).FromJust();
+ } else {
+ sum += std::numeric_limits<double>::quiet_NaN();
+ }
+
+ args.GetReturnValue().Set(Number::New(isolate, sum));
+ }
+
+ static int Add32BitIntFastCallback(ApiObject receiver, bool should_fallback,
+ int32_t arg_i32, uint32_t arg_u32,
+ FastApiCallbackOptions& options) {
+ Value* receiver_value = reinterpret_cast<Value*>(&receiver);
+ CHECK(receiver_value->IsObject());
+ FastCApiObject* self = UnwrapObject(Object::Cast(receiver_value));
+ self->fast_call_count_++;
+
+ if (should_fallback) {
+ options.fallback = 1;
+ return 0;
+ }
+
+ return arg_i32 + arg_u32;
+ }
+ static void Add32BitIntSlowCallback(const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+
+ FastCApiObject* self = UnwrapObject(*args.This());
+ self->slow_call_count_++;
+
+ HandleScope handle_scope(isolate);
+
+ double sum = 0;
+ if (args.Length() > 1) {
+ sum += args[1]->Int32Value(isolate->GetCurrentContext()).FromJust();
+ }
+ if (args.Length() > 2) {
+ sum += args[2]->Uint32Value(isolate->GetCurrentContext()).FromJust();
+ }
+
+ args.GetReturnValue().Set(Number::New(isolate, sum));
+ }
+
+ static void FastCallCount(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(*args.This());
+ args.GetReturnValue().Set(
+ Number::New(args.GetIsolate(), self->fast_call_count()));
+ }
+ static void SlowCallCount(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(*args.This());
+ args.GetReturnValue().Set(
+ Number::New(args.GetIsolate(), self->slow_call_count()));
+ }
+ static void ResetCounts(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(*args.This());
+ self->reset_counts();
+ args.GetReturnValue().Set(Undefined(args.GetIsolate()));
+ }
+ static void SupportsFPParams(const FunctionCallbackInfo<Value>& info) {
+ FastCApiObject* self = UnwrapObject(*info.This());
+ info.GetReturnValue().Set(self->supports_fp_params_);
+ }
+
+ int fast_call_count() const { return fast_call_count_; }
+ int slow_call_count() const { return slow_call_count_; }
+ void reset_counts() {
+ fast_call_count_ = 0;
+ slow_call_count_ = 0;
+ }
+
+ static const int kV8WrapperObjectIndex = 1;
+
+ private:
+ static FastCApiObject* UnwrapObject(Object* object) {
+ i::Address addr = *reinterpret_cast<i::Address*>(object);
+ auto instance_type = i::Internals::GetInstanceType(addr);
+ if (instance_type != i::Internals::kJSObjectType &&
+ instance_type != i::Internals::kJSApiObjectType &&
+ instance_type != i::Internals::kJSSpecialApiObjectType) {
+ return nullptr;
+ }
+ FastCApiObject* wrapped = reinterpret_cast<FastCApiObject*>(
+ object->GetAlignedPointerFromInternalField(kV8WrapperObjectIndex));
+ CHECK_NOT_NULL(wrapped);
+ return wrapped;
+ }
+ int fast_call_count_ = 0, slow_call_count_ = 0;
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ bool supports_fp_params_ = true;
+#else // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ bool supports_fp_params_ = false;
+#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+};
+
+// The object is statically initialized for simplicity, typically the embedder
+// will take care of managing their C++ objects lifetime.
+thread_local FastCApiObject kFastCApiObject;
+} // namespace
+
+// TODO(mslekova): Rename the fast_c_api helper to FastCAPI.
+void CreateObject(const FunctionCallbackInfo<Value>& info) {
+ if (!info.IsConstructCall()) {
+ info.GetIsolate()->ThrowException(
+ v8::Exception::Error(String::NewFromUtf8Literal(
+ info.GetIsolate(),
+ "FastCAPI helper must be constructed with new.")));
+ return;
+ }
+ Local<Object> api_object = info.Holder();
+ api_object->SetAlignedPointerInInternalField(
+ FastCApiObject::kV8WrapperObjectIndex,
+ reinterpret_cast<void*>(&kFastCApiObject));
+ api_object->SetAccessorProperty(
+ String::NewFromUtf8Literal(info.GetIsolate(), "supports_fp_params"),
+ FunctionTemplate::New(info.GetIsolate(), FastCApiObject::SupportsFPParams)
+ ->GetFunction(api_object->GetCreationContext().ToLocalChecked())
+ .ToLocalChecked());
+}
+
+Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
+ Local<FunctionTemplate> api_obj_ctor =
+ FunctionTemplate::New(isolate, CreateObject);
+ Local<Signature> signature = Signature::New(isolate, api_obj_ctor);
+ {
+ CFunction add_all_c_func =
+ CFunction::Make(FastCApiObject::AddAllFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_all",
+ FunctionTemplate::New(isolate, FastCApiObject::AddAllSlowCallback,
+ Local<Value>(), signature, 1,
+ ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &add_all_c_func));
+ CFunction add_32bit_int_c_func =
+ CFunction::Make(FastCApiObject::Add32BitIntFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_32bit_int",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::Add32BitIntSlowCallback, Local<Value>(),
+ signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &add_32bit_int_c_func));
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "fast_call_count",
+ FunctionTemplate::New(isolate, FastCApiObject::FastCallCount,
+ Local<Value>(), signature));
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "slow_call_count",
+ FunctionTemplate::New(isolate, FastCApiObject::SlowCallCount,
+ Local<Value>(), signature));
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "reset_counts",
+ FunctionTemplate::New(isolate, FastCApiObject::ResetCounts,
+ Local<Value>(), signature));
+ }
+ api_obj_ctor->InstanceTemplate()->SetInternalFieldCount(
+ FastCApiObject::kV8WrapperObjectIndex + 1);
+
+ return api_obj_ctor;
+}
+
+} // namespace v8
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 63bcdf871f1..d13e424bb15 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -59,7 +59,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
-#include "src/wasm/wasm-engine.h"
+#include "src/web-snapshot/web-snapshot.h"
#ifdef V8_FUZZILLI
#include "src/d8/cov.h"
@@ -724,6 +724,27 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
if (!HandleUnhandledPromiseRejections(isolate)) success = false;
}
data->realm_current_ = data->realm_switch_;
+
+ if (options.web_snapshot_config) {
+ std::vector<std::string> exports;
+ if (!ReadLines(options.web_snapshot_config, exports)) {
+ Throw(isolate, "Web snapshots: unable to read config");
+ CHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+
+ i::WebSnapshotSerializer serializer(isolate);
+ i::WebSnapshotData snapshot_data;
+ if (serializer.TakeSnapshot(context, exports, snapshot_data)) {
+ DCHECK_NOT_NULL(snapshot_data.buffer);
+ WriteChars("web.snap", snapshot_data.buffer, snapshot_data.buffer_size);
+ } else {
+ CHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ }
}
Local<Value> result;
if (!maybe_result.ToLocal(&result)) {
@@ -1234,10 +1255,6 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
Local<Value> module_namespace = root_module->GetModuleNamespace();
if (i::FLAG_harmony_top_level_await) {
Local<Promise> result_promise(result.As<Promise>());
- if (result_promise->State() == Promise::kRejected) {
- resolver->Reject(realm, result_promise->Result()).ToChecked();
- return;
- }
// Setup callbacks, and then chain them to the result promise.
// ModuleResolutionData will be deleted by the callbacks.
@@ -1326,6 +1343,37 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
return true;
}
+bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
+ HandleScope handle_scope(isolate);
+
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+
+ std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
+
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ int length = 0;
+ std::unique_ptr<uint8_t[]> snapshot_data(
+ reinterpret_cast<uint8_t*>(ReadChars(absolute_path.c_str(), &length)));
+ if (length == 0) {
+ Throw(isolate, "Error reading the web snapshot");
+ DCHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ i::WebSnapshotDeserializer deserializer(isolate);
+ if (!deserializer.UseWebSnapshot(snapshot_data.get(),
+ static_cast<size_t>(length))) {
+ DCHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ DCHECK(!try_catch.HasCaught());
+ return true;
+}
+
PerIsolateData::PerIsolateData(Isolate* isolate)
: isolate_(isolate), realms_(nullptr) {
isolate->SetData(0, this);
@@ -1763,6 +1811,92 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(result);
}
+void Shell::TestVerifySourcePositions(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ // Check if the argument is a valid function.
+ if (args.Length() != 1) {
+ Throw(isolate, "Expected function as single argument.");
+ return;
+ }
+ auto arg_handle = Utils::OpenHandle(*args[0]);
+ if (!arg_handle->IsHeapObject() || !i::Handle<i::HeapObject>::cast(arg_handle)
+ ->IsJSFunctionOrBoundFunction()) {
+ Throw(isolate, "Expected function as single argument.");
+ return;
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope handle_scope(isolate);
+
+ auto callable = i::Handle<i::JSFunctionOrBoundFunction>::cast(arg_handle);
+ while (callable->IsJSBoundFunction()) {
+ auto bound_function = i::Handle<i::JSBoundFunction>::cast(callable);
+ auto bound_target = bound_function->bound_target_function();
+ callable =
+ handle(i::JSFunctionOrBoundFunction::cast(bound_target), i_isolate);
+ }
+
+ i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(callable);
+ if (!function->shared().HasBytecodeArray()) {
+ Throw(isolate, "Function has no BytecodeArray attached.");
+ return;
+ }
+ i::Handle<i::BytecodeArray> bytecodes =
+ handle(function->shared().GetBytecodeArray(i_isolate), i_isolate);
+ i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes);
+ bool has_baseline = function->shared().HasBaselineData();
+ i::Handle<i::ByteArray> bytecode_offsets;
+ std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
+ if (has_baseline) {
+ bytecode_offsets =
+ handle(i::ByteArray::cast(
+ function->shared().GetCode().bytecode_offset_table()),
+ i_isolate);
+ offset_iterator = std::make_unique<i::baseline::BytecodeOffsetIterator>(
+ bytecode_offsets, bytecodes);
+ // A freshly initiated BytecodeOffsetIterator points to the prologue.
+ DCHECK_EQ(offset_iterator->current_pc_start_offset(), 0);
+ DCHECK_EQ(offset_iterator->current_bytecode_offset(),
+ i::kFunctionEntryBytecodeOffset);
+ offset_iterator->Advance();
+ }
+ while (!bytecode_iterator.done()) {
+ if (has_baseline) {
+ if (offset_iterator->current_bytecode_offset() !=
+ bytecode_iterator.current_offset()) {
+ Throw(isolate, "Baseline bytecode offset mismatch.");
+ return;
+ }
+ // Check that we map every address to this bytecode correctly.
+ // The start address is exclusive and the end address inclusive.
+ for (i::Address pc = offset_iterator->current_pc_start_offset() + 1;
+ pc <= offset_iterator->current_pc_end_offset(); ++pc) {
+ i::baseline::BytecodeOffsetIterator pc_lookup(bytecode_offsets,
+ bytecodes);
+ pc_lookup.AdvanceToPCOffset(pc);
+ if (pc_lookup.current_bytecode_offset() !=
+ bytecode_iterator.current_offset()) {
+ Throw(isolate, "Baseline bytecode offset mismatch for PC lookup.");
+ return;
+ }
+ }
+ }
+ bytecode_iterator.Advance();
+ if (has_baseline && !bytecode_iterator.done()) {
+ if (offset_iterator->done()) {
+ Throw(isolate, "Missing bytecode(s) in baseline offset mapping.");
+ return;
+ }
+ offset_iterator->Advance();
+ }
+ }
+ if (has_baseline && !offset_iterator->done()) {
+ Throw(isolate, "Excess offsets in baseline offset mapping.");
+ return;
+ }
+}
+
// async_hooks.createHook() registers functions to be called for different
// lifetime events of each async operation.
void Shell::AsyncHooksCreateHook(
@@ -1792,20 +1926,6 @@ void Shell::AsyncHooksTriggerAsyncId(
PerIsolateData::Get(isolate)->GetAsyncHooks()->GetTriggerAsyncId()));
}
-void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = args.GetIsolate();
- Local<Context> context = isolate->GetCurrentContext();
- HandleScope handle_scope(isolate);
-
- context->SetPromiseHooks(
- args[0]->IsFunction() ? args[0].As<Function>() : Local<Function>(),
- args[1]->IsFunction() ? args[1].As<Function>() : Local<Function>(),
- args[2]->IsFunction() ? args[2].As<Function>() : Local<Function>(),
- args[3]->IsFunction() ? args[3].As<Function>() : Local<Function>());
-
- args.GetReturnValue().Set(v8::Undefined(isolate));
-}
-
void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
@@ -2242,7 +2362,7 @@ void Shell::Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args) {
.FromMaybe(0);
switch (arg) {
case 0:
- V8_IMMEDIATE_CRASH();
+ IMMEDIATE_CRASH();
break;
case 1:
CHECK(0);
@@ -2546,6 +2666,7 @@ Local<ObjectTemplate> Shell::CreateTestRunnerTemplate(Isolate* isolate) {
// installed on the global object can be hidden with the --omit-quit flag
// (e.g. on asan bots).
test_template->Set(isolate, "quit", FunctionTemplate::New(isolate, Quit));
+
return test_template;
}
@@ -2597,12 +2718,20 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
d8_template->Set(isolate, "log", log_template);
}
{
- Local<ObjectTemplate> promise_template = ObjectTemplate::New(isolate);
- promise_template->Set(
- isolate, "setHooks",
- FunctionTemplate::New(isolate, SetPromiseHooks, Local<Value>(),
- Local<Signature>(), 4));
- d8_template->Set(isolate, "promise", promise_template);
+ Local<ObjectTemplate> test_template = ObjectTemplate::New(isolate);
+ test_template->Set(
+ isolate, "verifySourcePositions",
+ FunctionTemplate::New(isolate, TestVerifySourcePositions));
+ // Correctness fuzzing will attempt to compare results of tests with and
+ // without turbo_fast_api_calls, so we don't expose the fast_c_api
+ // constructor when --correctness_fuzzer_suppressions is on.
+ if (i::FLAG_turbo_fast_api_calls &&
+ !i::FLAG_correctness_fuzzer_suppressions) {
+ test_template->Set(isolate, "fast_c_api",
+ Shell::CreateTestFastCApiTemplate(isolate));
+ }
+
+ d8_template->Set(isolate, "test", test_template);
}
return d8_template;
}
@@ -3010,9 +3139,9 @@ static FILE* FOpen(const char* path, const char* mode) {
#endif
}
-static char* ReadChars(const char* name, int* size_out) {
- if (Shell::options.read_from_tcp_port >= 0) {
- return Shell::ReadCharsFromTcpPort(name, size_out);
+char* Shell::ReadChars(const char* name, int* size_out) {
+ if (options.read_from_tcp_port >= 0) {
+ return ReadCharsFromTcpPort(name, size_out);
}
FILE* file = FOpen(name, "rb");
@@ -3037,6 +3166,20 @@ static char* ReadChars(const char* name, int* size_out) {
return chars;
}
+bool Shell::ReadLines(const char* name, std::vector<std::string>& lines) {
+ int length;
+ const char* data = reinterpret_cast<const char*>(ReadChars(name, &length));
+ if (data == nullptr) {
+ return false;
+ }
+ std::stringstream stream(data);
+ std::string line;
+ while (std::getline(stream, line, '\n')) {
+ lines.emplace_back(line);
+ }
+ return true;
+}
+
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
static_assert(sizeof(char) == sizeof(uint8_t),
"char and uint8_t should both have 1 byte");
@@ -3087,6 +3230,13 @@ Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
return result;
}
+void Shell::WriteChars(const char* name, uint8_t* buffer, size_t buffer_size) {
+ FILE* file = base::Fopen(name, "w");
+ if (file == nullptr) return;
+ fwrite(buffer, 1, buffer_size, file);
+ base::Fclose(file);
+}
+
void Shell::RunShell(Isolate* isolate) {
HandleScope outer_scope(isolate);
v8::Local<v8::Context> context =
@@ -3344,6 +3494,15 @@ bool SourceGroup::Execute(Isolate* isolate) {
break;
}
continue;
+ } else if (strcmp(arg, "--web-snapshot") == 0 && i + 1 < end_offset_) {
+ // Treat the next file as a web snapshot.
+ arg = argv_[++i];
+ Shell::set_script_executed();
+ if (!Shell::ExecuteWebSnapshot(isolate, arg)) {
+ success = false;
+ break;
+ }
+ continue;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
continue;
@@ -3896,6 +4055,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.cpu_profiler = true;
options.cpu_profiler_print = true;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--web-snapshot-config=", 22) == 0) {
+ options.web_snapshot_config = argv[i] + 22;
+ argv[i] = nullptr;
#ifdef V8_FUZZILLI
} else if (strcmp(argv[i], "--no-fuzzilli-enable-builtins-coverage") == 0) {
options.fuzzilli_enable_builtins_coverage = false;
@@ -3907,10 +4069,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
options.fuzzy_module_file_extensions = true;
argv[i] = nullptr;
-#ifdef V8_ENABLE_SYSTEM_INSTRUMENTATION
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
} else if (strcmp(argv[i], "--enable-system-instrumentation") == 0) {
options.enable_system_instrumentation = true;
options.trace_enabled = true;
+ i::FLAG_interpreted_frames_native_stack = true;
argv[i] = nullptr;
#endif
}
@@ -3923,10 +4086,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
const char* usage =
"Synopsis:\n"
" shell [options] [--shell] [<file>...]\n"
- " d8 [options] [-e <string>] [--shell] [[--module] <file>...]\n\n"
+ " d8 [options] [-e <string>] [--shell] [[--module|--web-snapshot]"
+ " <file>...]\n\n"
" -e execute a string in V8\n"
" --shell run an interactive JavaScript shell\n"
- " --module execute a file as a JavaScript module\n\n";
+ " --module execute a file as a JavaScript module\n"
+ " --web-snapshot execute a file as a web snapshot\n\n";
using HelpOptions = i::FlagList::HelpOptions;
i::FLAG_abort_on_contradictory_flags = true;
i::FlagList::SetFlagsFromCommandLine(&argc, argv, true,
@@ -3948,8 +4113,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->End(i);
current++;
current->Begin(argv, i + 1);
- } else if (strcmp(str, "--module") == 0) {
- // Pass on to SourceGroup, which understands this option.
+ } else if (strcmp(str, "--module") == 0 ||
+ strcmp(str, "--web-snapshot") == 0) {
+ // Pass on to SourceGroup, which understands these options.
} else if (strncmp(str, "--", 2) == 0) {
if (!i::FLAG_correctness_fuzzer_suppressions) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
@@ -4488,13 +4654,16 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = std::make_unique<platform::tracing::TracingController>();
- const char* trace_path =
- options.trace_path ? options.trace_path : "v8_trace.json";
- trace_file.open(trace_path);
- if (!trace_file.good()) {
- printf("Cannot open trace file '%s' for writing: %s.\n", trace_path,
- strerror(errno));
- return 1;
+
+ if (!options.enable_system_instrumentation) {
+ const char* trace_path =
+ options.trace_path ? options.trace_path : "v8_trace.json";
+ trace_file.open(trace_path);
+ if (!trace_file.good()) {
+ printf("Cannot open trace file '%s' for writing: %s.\n", trace_path,
+ strerror(errno));
+ return 1;
+ }
}
#ifdef V8_USE_PERFETTO
@@ -4598,12 +4767,14 @@ int Shell::Main(int argc, char* argv[]) {
create_params.add_histogram_sample_callback = AddHistogramSample;
}
+#if V8_ENABLE_WEBASSEMBLY
if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
constexpr bool use_default_trap_handler = true;
if (!v8::V8::EnableWebAssemblyTrapHandler(use_default_trap_handler)) {
FATAL("Could not register trap handler");
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
Isolate* isolate = Isolate::New(create_params);
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 6ea9681d699..d5f13817d35 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -401,6 +401,8 @@ class ShellOptions {
"fuzzy-module-file-extensions", true};
DisallowReassignment<bool> enable_system_instrumentation = {
"enable-system-instrumentation", false};
+ DisallowReassignment<const char*> web_snapshot_config = {
+ "web-snapshot-config", nullptr};
};
class Shell : public i::AllStatic {
@@ -420,6 +422,7 @@ class Shell : public i::AllStatic {
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
+ static bool ExecuteWebSnapshot(Isolate* isolate, const char* file_name);
static void ReportException(Isolate* isolate, Local<Message> message,
Local<Value> exception);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
@@ -470,6 +473,8 @@ class Shell : public i::AllStatic {
const PropertyCallbackInfo<void>& info);
static void LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void TestVerifySourcePositions(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
static void AsyncHooksCreateHook(
const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -478,8 +483,6 @@ class Shell : public i::AllStatic {
static void AsyncHooksTriggerAsyncId(
const v8::FunctionCallbackInfo<v8::Value>& args);
- static void SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args);
-
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -489,11 +492,14 @@ class Shell : public i::AllStatic {
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static char* ReadChars(const char* name, int* size_out);
+ static bool ReadLines(const char* name, std::vector<std::string>& lines);
static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
static Local<String> ReadFromStdin(Isolate* isolate);
static void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
+ static void WriteChars(const char* name, uint8_t* buffer, size_t buffer_size);
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args);
static void WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -626,6 +632,7 @@ class Shell : public i::AllStatic {
static Local<ObjectTemplate> CreatePerformanceTemplate(Isolate* isolate);
static Local<ObjectTemplate> CreateRealmTemplate(Isolate* isolate);
static Local<ObjectTemplate> CreateD8Template(Isolate* isolate);
+ static Local<FunctionTemplate> CreateTestFastCApiTemplate(Isolate* isolate);
static MaybeLocal<Context> CreateRealm(
const v8::FunctionCallbackInfo<v8::Value>& args, int index,
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 5b933520393..c4ba5afaeaf 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,6 +1,8 @@
bmeurer@chromium.org
jgruber@chromium.org
-mvstanton@chromium.org
+kimanh@chromium.org
+leese@chromium.org
+pfaffe@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index ab5df9b3c9f..4f317fcc89b 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -10,7 +10,6 @@
#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
-#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
@@ -19,6 +18,10 @@
#include "src/objects/contexts.h"
#include "src/snapshot/snapshot.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -77,26 +80,8 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
// Get the frame where the debugging is performed.
StackTraceFrameIterator it(isolate, frame_id);
- if (it.is_javascript()) {
- JavaScriptFrame* frame = it.javascript_frame();
- // This is not a lot different than DebugEvaluate::Global, except that
- // variables accessible by the function we are evaluating from are
- // materialized and included on top of the native context. Changes to
- // the materialized object are written back afterwards.
- // Note that the native context is taken from the original context chain,
- // which may not be the current native context of the isolate.
- ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
- if (isolate->has_pending_exception()) return {};
-
- Handle<Context> context = context_builder.evaluation_context();
- Handle<JSObject> receiver(context->global_proxy(), isolate);
- MaybeHandle<Object> maybe_result =
- Evaluate(isolate, context_builder.outer_info(), context, receiver,
- source, throw_on_side_effect);
- if (!maybe_result.is_null()) context_builder.UpdateValues();
- return maybe_result;
- } else {
- CHECK(it.is_wasm());
+#if V8_ENABLE_WEBASSEMBLY
+ if (it.is_wasm()) {
WasmFrame* frame = WasmFrame::cast(it.frame());
Handle<SharedFunctionInfo> outer_info(
isolate->native_context()->empty_function().shared(), isolate);
@@ -108,6 +93,26 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
return Evaluate(isolate, outer_info, context, context_extension, source,
throw_on_side_effect);
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CHECK(it.is_javascript());
+ JavaScriptFrame* frame = it.javascript_frame();
+ // This is not a lot different than DebugEvaluate::Global, except that
+ // variables accessible by the function we are evaluating from are
+ // materialized and included on top of the native context. Changes to
+ // the materialized object are written back afterwards.
+ // Note that the native context is taken from the original context chain,
+ // which may not be the current native context of the isolate.
+ ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
+ if (isolate->has_pending_exception()) return {};
+
+ Handle<Context> context = context_builder.evaluation_context();
+ Handle<JSObject> receiver(context->global_proxy(), isolate);
+ MaybeHandle<Object> maybe_result =
+ Evaluate(isolate, context_builder.outer_info(), context, receiver, source,
+ throw_on_side_effect);
+ if (!maybe_result.is_null()) context_builder.UpdateValues();
+ return maybe_result;
}
MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 8a3e4acb886..2e5c9443cab 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -6,7 +6,6 @@
#include "src/builtins/accessors.h"
#include "src/execution/frames-inl.h"
-#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -30,9 +29,13 @@ FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
function_ = summary.AsJavaScript().function();
}
+#if V8_ENABLE_WEBASSEMBLY
JavaScriptFrame* js_frame =
frame->is_java_script() ? javascript_frame() : nullptr;
DCHECK(js_frame || frame->is_wasm());
+#else
+ JavaScriptFrame* js_frame = javascript_frame();
+#endif // V8_ENABLE_WEBASSEMBLY
is_optimized_ = frame_->is_optimized();
// Calculate the deoptimized frame.
@@ -67,7 +70,9 @@ Handle<Object> FrameInspector::GetContext() {
: handle(frame_->context(), isolate_);
}
+#if V8_ENABLE_WEBASSEMBLY
bool FrameInspector::IsWasm() { return frame_->is_wasm(); }
+#endif // V8_ENABLE_WEBASSEMBLY
bool FrameInspector::IsJavaScript() { return frame_->is_java_script(); }
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 5d21d0a22de..03f670e4995 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -38,7 +38,9 @@ class FrameInspector {
Handle<String> GetFunctionName() { return function_name_; }
+#if V8_ENABLE_WEBASSEMBLY
bool IsWasm();
+#endif // V8_ENABLE_WEBASSEMBLY
bool IsJavaScript();
JavaScriptFrame* javascript_frame();
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index a53f912625b..bf74d379b6a 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -9,12 +9,16 @@
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-property-iterator.h"
#include "src/debug/debug-type-profile.h"
-#include "src/debug/debug-wasm-objects-inl.h"
#include "src/debug/debug.h"
#include "src/execution/vm-state-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/regexp/regexp-stack.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects-inl.h"
+#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
// Has to be the last include (doesn't have include guards):
#include "src/api/api-macros.h"
@@ -283,7 +287,9 @@ int Script::ColumnOffset() const {
std::vector<int> Script::LineEnds() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
+#if V8_ENABLE_WEBASSEMBLY
+ if (script->type() == i::Script::TYPE_WASM) return {};
+#endif // V8_ENABLE_WEBASSEMBLY
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
@@ -348,9 +354,11 @@ MaybeLocal<String> Script::Source() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
+#if V8_ENABLE_WEBASSEMBLY
bool Script::IsWasm() const {
return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
}
+#endif // V8_ENABLE_WEBASSEMBLY
bool Script::IsModule() const {
return Utils::OpenHandle(this)->origin_options().IsModule();
@@ -374,11 +382,13 @@ bool Script::GetPossibleBreakpoints(
std::vector<BreakLocation>* locations) const {
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
+#if V8_ENABLE_WEBASSEMBLY
if (script->type() == i::Script::TYPE_WASM) {
i::wasm::NativeModule* native_module = script->wasm_native_module();
return i::WasmScript::GetPossibleBreakpoints(native_module, start, end,
locations);
}
+#endif // V8_ENABLE_WEBASSEMBLY
i::Isolate* isolate = script->GetIsolate();
i::Script::InitLineEnds(isolate, script);
@@ -424,10 +434,12 @@ bool Script::GetPossibleBreakpoints(
int Script::GetSourceOffset(const Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
+#if V8_ENABLE_WEBASSEMBLY
if (script->type() == i::Script::TYPE_WASM) {
DCHECK_EQ(0, location.GetLineNumber());
return location.GetColumnNumber();
}
+#endif // V8_ENABLE_WEBASSEMBLY
int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
int column = location.GetColumnNumber();
@@ -479,11 +491,13 @@ bool Script::SetBreakpoint(Local<String> condition, Location* location,
bool Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (script->type() == i::Script::TYPE_WASM) {
int position = i::WasmScript::kOnEntryBreakpointPosition;
return isolate->debug()->SetBreakPointForScript(
script, isolate->factory()->empty_string(), &position, id);
}
+#endif // V8_ENABLE_WEBASSEMBLY
i::SharedFunctionInfo::ScriptIterator it(isolate, *script);
for (i::SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
if (sfi.is_toplevel()) {
@@ -494,11 +508,13 @@ bool Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
return false;
}
+#if V8_ENABLE_WEBASSEMBLY
void Script::RemoveWasmBreakpoint(BreakpointId id) {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
isolate->debug()->RemoveBreakpointForWasmScript(script, id);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void RemoveBreakpoint(Isolate* v8_isolate, BreakpointId id) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -516,6 +532,7 @@ void ForceGarbageCollection(
isolate->LowMemoryNotification();
}
+#if V8_ENABLE_WEBASSEMBLY
WasmScript* WasmScript::Cast(Script* script) {
CHECK(script->IsWasm());
return static_cast<WasmScript*>(script);
@@ -632,6 +649,7 @@ int WasmScript::CodeOffset() const {
module->code.offset() != 0);
return module->code.offset();
}
+#endif // V8_ENABLE_WEBASSEMBLY
Location::Location(int line_number, int column_number)
: line_number_(line_number),
@@ -664,14 +682,18 @@ void GetLoadedScripts(Isolate* v8_isolate,
i::Script::Iterator iterator(isolate);
for (i::Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- if (script.type() == i::Script::TYPE_NORMAL ||
- script.type() == i::Script::TYPE_WASM) {
- if (script.HasValidSource()) {
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Script> script_handle(script, isolate);
- scripts.Append(ToApiHandle<Script>(script_handle));
- }
+#if V8_ENABLE_WEBASSEMBLY
+ if (script.type() != i::Script::TYPE_NORMAL &&
+ script.type() != i::Script::TYPE_WASM) {
+ continue;
}
+#else
+ if (script.type() != i::Script::TYPE_NORMAL) continue;
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (!script.HasValidSource()) continue;
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script_handle(script, isolate);
+ scripts.Append(ToApiHandle<Script>(script_handle));
}
}
}
@@ -698,6 +720,7 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
+#if V8_ENABLE_WEBASSEMBLY
void TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
@@ -707,6 +730,7 @@ void TierUpAllModulesPerIsolate(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void SetDebugDelegate(Isolate* v8_isolate, DebugDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -746,12 +770,27 @@ void AccessorPair::CheckCast(Value* that) {
"Value is not a v8::debug::AccessorPair");
}
+#if V8_ENABLE_WEBASSEMBLY
void WasmValueObject::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsWasmValueObject(), "v8::debug::WasmValueObject::Cast",
"Value is not a v8::debug::WasmValueObject");
}
+bool WasmValueObject::IsWasmValueObject(Local<Value> that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*that);
+ return obj->IsWasmValueObject();
+}
+
+Local<String> WasmValueObject::type() const {
+ i::Handle<i::WasmValueObject> object =
+ i::Handle<i::WasmValueObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = object->GetIsolate();
+ i::Handle<i::String> type(object->type(), isolate);
+ return Utils::ToLocal(type);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -1114,11 +1153,6 @@ bool AccessorPair::IsAccessorPair(Local<Value> that) {
return obj->IsAccessorPair();
}
-bool WasmValueObject::IsWasmValueObject(Local<Value> that) {
- i::Handle<i::Object> obj = Utils::OpenHandle(*that);
- return obj->IsWasmValueObject();
-}
-
MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*p);
i::Isolate* isolate = promise->GetIsolate();
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index f04a91be325..66c7f3997e8 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -154,7 +154,6 @@ class V8_EXPORT_PRIVATE Script {
MaybeLocal<String> SourceMappingURL() const;
Maybe<int> ContextId() const;
MaybeLocal<String> Source() const;
- bool IsWasm() const;
bool IsModule() const;
bool GetPossibleBreakpoints(
const debug::Location& start, const debug::Location& end,
@@ -166,10 +165,14 @@ class V8_EXPORT_PRIVATE Script {
LiveEditResult* result) const;
bool SetBreakpoint(v8::Local<v8::String> condition, debug::Location* location,
BreakpointId* id) const;
+#if V8_ENABLE_WEBASSEMBLY
+ bool IsWasm() const;
void RemoveWasmBreakpoint(BreakpointId id);
+#endif // V8_ENABLE_WEBASSEMBLY
bool SetBreakpointOnScriptEntry(BreakpointId* id) const;
};
+#if V8_ENABLE_WEBASSEMBLY
// Specialization for wasm Scripts.
class WasmScript : public Script {
public:
@@ -190,6 +193,7 @@ class WasmScript : public Script {
int CodeOffset() const;
int CodeLength() const;
};
+#endif // V8_ENABLE_WEBASSEMBLY
V8_EXPORT_PRIVATE void GetLoadedScripts(
Isolate* isolate,
@@ -228,8 +232,10 @@ class DebugDelegate {
V8_EXPORT_PRIVATE void SetDebugDelegate(Isolate* isolate,
DebugDelegate* listener);
+#if V8_ENABLE_WEBASSEMBLY
V8_EXPORT_PRIVATE void TierDownAllModulesPerIsolate(Isolate* isolate);
V8_EXPORT_PRIVATE void TierUpAllModulesPerIsolate(Isolate* isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
class AsyncEventDelegate {
public:
@@ -616,15 +622,24 @@ class PropertyIterator {
virtual bool is_array_index() = 0;
};
+#if V8_ENABLE_WEBASSEMBLY
class V8_EXPORT_PRIVATE WasmValueObject : public v8::Object {
public:
WasmValueObject() = delete;
static bool IsWasmValueObject(v8::Local<v8::Value> obj);
- V8_INLINE static WasmValueObject* Cast(v8::Value* obj);
+ static WasmValueObject* Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmValueObject*>(value);
+ }
+
+ v8::Local<v8::String> type() const;
private:
static void CheckCast(v8::Value* obj);
};
+#endif // V8_ENABLE_WEBASSEMBLY
AccessorPair* AccessorPair::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
@@ -633,13 +648,6 @@ AccessorPair* AccessorPair::Cast(v8::Value* value) {
return static_cast<AccessorPair*>(value);
}
-WasmValueObject* WasmValueObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<WasmValueObject*>(value);
-}
-
MaybeLocal<Message> GetMessageFromPromise(Local<Promise> promise);
} // namespace debug
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 30b46b19875..03d49fac5ca 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -35,8 +35,10 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
}
context_ = Handle<Context>::cast(frame_inspector->GetContext());
+#if V8_ENABLE_WEBASSEMBLY
// We should not instantiate a ScopeIterator for wasm frames.
DCHECK_NE(Script::TYPE_WASM, frame_inspector->GetScript()->type());
+#endif // V8_ENABLE_WEBASSEMBLY
TryParseAndRetrieveScopes(strategy);
}
@@ -740,8 +742,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
if (VisitContextLocals(visitor, scope_info, context_, ScopeTypeModule))
return;
- int count_index = scope_info->ModuleVariableCountIndex();
- int module_variable_count = Smi::cast(scope_info->get(count_index)).value();
+ int module_variable_count = scope_info->ModuleVariableCount();
Handle<SourceTextModule> module(context_->module(), isolate_);
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 1d7e37029d9..9904f781f92 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -7,12 +7,15 @@
#include "src/api/api-inl.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
-#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
std::unique_ptr<debug::StackTraceIterator> debug::StackTraceIterator::Create(
@@ -119,9 +122,11 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
v8::Local<v8::Value> DebugStackTraceIterator::GetReturnValue() const {
CHECK(!Done());
+#if V8_ENABLE_WEBASSEMBLY
if (frame_inspector_ && frame_inspector_->IsWasm()) {
return v8::Local<v8::Value>();
}
+#endif // V8_ENABLE_WEBASSEMBLY
CHECK_NOT_NULL(iterator_.frame());
bool is_optimized = iterator_.frame()->is_optimized();
if (is_optimized || !is_top_frame_ ||
@@ -159,16 +164,19 @@ v8::Local<v8::Function> DebugStackTraceIterator::GetFunction() const {
std::unique_ptr<v8::debug::ScopeIterator>
DebugStackTraceIterator::GetScopeIterator() const {
DCHECK(!Done());
- CommonFrame* frame = iterator_.frame();
- if (frame->is_wasm()) {
- return GetWasmScopeIterator(WasmFrame::cast(frame));
+#if V8_ENABLE_WEBASSEMBLY
+ if (iterator_.frame()->is_wasm()) {
+ return GetWasmScopeIterator(WasmFrame::cast(iterator_.frame()));
}
+#endif // V8_ENABLE_WEBASSEMBLY
return std::make_unique<DebugScopeIterator>(isolate_, frame_inspector_.get());
}
bool DebugStackTraceIterator::Restart() {
DCHECK(!Done());
+#if V8_ENABLE_WEBASSEMBLY
if (iterator_.is_wasm()) return false;
+#endif // V8_ENABLE_WEBASSEMBLY
return LiveEdit::RestartFrame(iterator_.javascript_frame());
}
diff --git a/deps/v8/src/debug/debug-wasm-objects-inl.h b/deps/v8/src/debug/debug-wasm-objects-inl.h
index 77ca8c9a0bb..18c6b4e9d42 100644
--- a/deps/v8/src/debug/debug-wasm-objects-inl.h
+++ b/deps/v8/src/debug/debug-wasm-objects-inl.h
@@ -20,6 +20,7 @@ OBJECT_CONSTRUCTORS_IMPL(WasmValueObject, JSObject)
CAST_ACCESSOR(WasmValueObject)
+ACCESSORS(WasmValueObject, type, String, kTypeOffset)
ACCESSORS(WasmValueObject, value, Object, kValueOffset)
} // namespace internal
diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index c3560a4d15d..070221f433d 100644
--- a/deps/v8/src/debug/debug-wasm-objects.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -76,16 +76,16 @@ enum DebugProxyId {
kContextProxy,
kLocalsProxy,
kStackProxy,
- kLastProxyId = kStackProxy,
+ kStructProxy,
+ kArrayProxy,
+ kLastProxyId = kArrayProxy,
kNumProxies = kLastProxyId + 1,
kNumInstanceProxies = kLastInstanceProxyId + 1
};
-constexpr int kFirstWasmValueMapIndex = kNumProxies;
-constexpr int kLastWasmValueMapIndex =
- kFirstWasmValueMapIndex + WasmValueObject::kNumTypes - 1;
-constexpr int kNumDebugMaps = kLastWasmValueMapIndex + 1;
+constexpr int kWasmValueMapIndex = kNumProxies;
+constexpr int kNumDebugMaps = kWasmValueMapIndex + 1;
Handle<FixedArray> GetOrCreateDebugMaps(Isolate* isolate) {
Handle<FixedArray> maps = isolate->wasm_debug_maps();
@@ -98,11 +98,14 @@ Handle<FixedArray> GetOrCreateDebugMaps(Isolate* isolate) {
// Creates a Map for the given debug proxy |id| using the |create_template_fn|
// on-demand and caches this map in the global object. The map is derived from
-// the FunctionTemplate returned by |create_template_fn| and has it's prototype
-// set to |null| and is marked non-extensible.
+// the FunctionTemplate returned by |create_template_fn| and has its prototype
+// set to |null| and is marked non-extensible (by default).
+// TODO(bmeurer): remove the extensibility opt-out and replace it with a proper
+// way to add non-intercepted named properties.
Handle<Map> GetOrCreateDebugProxyMap(
Isolate* isolate, DebugProxyId id,
- v8::Local<v8::FunctionTemplate> (*create_template_fn)(v8::Isolate*)) {
+ v8::Local<v8::FunctionTemplate> (*create_template_fn)(v8::Isolate*),
+ bool make_non_extensible = true) {
auto maps = GetOrCreateDebugMaps(isolate);
CHECK_LE(kNumProxies, maps->length());
if (!maps->is_the_hole(isolate, id)) {
@@ -113,7 +116,9 @@ Handle<Map> GetOrCreateDebugProxyMap(
.ToHandleChecked();
auto map = JSFunction::GetDerivedMap(isolate, fun, fun).ToHandleChecked();
Map::SetPrototype(isolate, map, isolate->factory()->null_value());
- map->set_is_extensible(false);
+ if (make_non_extensible) {
+ map->set_is_extensible(false);
+ }
maps->set(id, *map);
return map;
}
@@ -124,9 +129,10 @@ template <typename T, DebugProxyId id, typename Provider>
struct IndexedDebugProxy {
static constexpr DebugProxyId kId = id;
- static Handle<JSObject> Create(Isolate* isolate, Handle<Provider> provider) {
- auto object_map =
- GetOrCreateDebugProxyMap(isolate, kId, &T::CreateTemplate);
+ static Handle<JSObject> Create(Isolate* isolate, Handle<Provider> provider,
+ bool make_map_non_extensible = true) {
+ auto object_map = GetOrCreateDebugProxyMap(isolate, kId, &T::CreateTemplate,
+ make_map_non_extensible);
auto object = isolate->factory()->NewJSObjectFromMap(object_map);
object->SetEmbedderField(kProviderField, *provider);
return object;
@@ -340,9 +346,12 @@ struct GlobalsProxy : NamedDebugProxy<GlobalsProxy, kGlobalsProxy> {
static Handle<Object> Get(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t index) {
+ Handle<WasmModuleObject> module(instance->module_object(), isolate);
return WasmValueObject::New(
- isolate, WasmInstanceObject::GetGlobalValue(
- instance, instance->module()->globals[index]));
+ isolate,
+ WasmInstanceObject::GetGlobalValue(instance,
+ instance->module()->globals[index]),
+ module);
}
static Handle<String> GetName(Isolate* isolate,
@@ -420,10 +429,14 @@ struct LocalsProxy : NamedDebugProxy<LocalsProxy, kLocalsProxy, FixedArray> {
int count = debug_info->GetNumLocals(frame->pc());
auto function = debug_info->GetFunctionAtAddress(frame->pc());
auto values = isolate->factory()->NewFixedArray(count + 2);
+ Handle<WasmModuleObject> module_object(
+ frame->wasm_instance().module_object(), isolate);
for (int i = 0; i < count; ++i) {
auto value = WasmValueObject::New(
- isolate, debug_info->GetLocalValue(i, frame->pc(), frame->fp(),
- frame->callee_fp()));
+ isolate,
+ debug_info->GetLocalValue(i, frame->pc(), frame->fp(),
+ frame->callee_fp(), isolate),
+ module_object);
values->set(i, *value);
}
values->set(count + 0, frame->wasm_instance().module_object());
@@ -467,10 +480,14 @@ struct StackProxy : IndexedDebugProxy<StackProxy, kStackProxy, FixedArray> {
frame->wasm_instance().module_object().native_module()->GetDebugInfo();
int count = debug_info->GetStackDepth(frame->pc());
auto values = isolate->factory()->NewFixedArray(count);
+ Handle<WasmModuleObject> module_object(
+ frame->wasm_instance().module_object(), isolate);
for (int i = 0; i < count; ++i) {
auto value = WasmValueObject::New(
- isolate, debug_info->GetStackValue(i, frame->pc(), frame->fp(),
- frame->callee_fp()));
+ isolate,
+ debug_info->GetStackValue(i, frame->pc(), frame->fp(),
+ frame->callee_fp(), isolate),
+ module_object);
values->set(i, *value);
}
return IndexedDebugProxy::Create(isolate, values);
@@ -715,7 +732,10 @@ class DebugWasmScopeIterator final : public debug::ScopeIterator {
return Utils::ToLocal(LocalsProxy::Create(frame_));
}
case debug::ScopeIterator::ScopeTypeWasmExpressionStack: {
- return Utils::ToLocal(StackProxy::Create(frame_));
+ auto object = isolate->factory()->NewJSObjectWithNullProto();
+ auto stack = StackProxy::Create(frame_);
+ JSObject::AddProperty(isolate, object, "stack", stack, FROZEN);
+ return Utils::ToLocal(object);
}
default:
UNREACHABLE();
@@ -753,53 +773,75 @@ Handle<String> WasmSimd128ToString(Isolate* isolate, wasm::Simd128 s128) {
return isolate->factory()->NewStringFromAsciiChecked(buffer.data());
}
-Handle<String> Type2String(Isolate* isolate, WasmValueObject::Type type) {
- switch (type) {
- case WasmValueObject::kExternRef:
- return isolate->factory()->InternalizeString(
- StaticCharVector("externref"));
- case WasmValueObject::kF32:
- return isolate->factory()->InternalizeString(StaticCharVector("f32"));
- case WasmValueObject::kF64:
- return isolate->factory()->InternalizeString(StaticCharVector("f64"));
- case WasmValueObject::kI32:
- return isolate->factory()->InternalizeString(StaticCharVector("i32"));
- case WasmValueObject::kI64:
- return isolate->factory()->InternalizeString(StaticCharVector("i64"));
- case WasmValueObject::kV128:
- return isolate->factory()->InternalizeString(StaticCharVector("v128"));
- case WasmValueObject::kNumTypes:
- break;
+Handle<String> GetRefTypeName(Isolate* isolate, wasm::ValueType type,
+ wasm::NativeModule* module) {
+ const char* nullable = type.kind() == wasm::kOptRef ? " null" : "";
+ EmbeddedVector<char, 64> type_name;
+ size_t len;
+ if (type.heap_type().is_generic()) {
+ const char* generic_name = "";
+ wasm::HeapType::Representation heap_rep = type.heap_representation();
+ switch (heap_rep) {
+ case wasm::HeapType::kFunc:
+ generic_name = "func";
+ break;
+ case wasm::HeapType::kExtern:
+ generic_name = "extern";
+ break;
+ case wasm::HeapType::kEq:
+ generic_name = "eq";
+ break;
+ case wasm::HeapType::kI31:
+ generic_name = "i31";
+ break;
+ case wasm::HeapType::kData:
+ generic_name = "data";
+ break;
+ case wasm::HeapType::kAny:
+ generic_name = "any";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ len = SNPrintF(type_name, "(ref%s %s)", nullable, generic_name);
+ } else {
+ int type_index = type.ref_index();
+ wasm::ModuleWireBytes module_wire_bytes(module->wire_bytes());
+ Vector<const char> name_vec = module_wire_bytes.GetNameOrNull(
+ module->GetDebugInfo()->GetTypeName(type_index));
+ if (name_vec.empty()) {
+ len = SNPrintF(type_name, "(ref%s $type%u)", nullable, type_index);
+ } else {
+ len = SNPrintF(type_name, "(ref%s $", nullable);
+ Vector<char> suffix = type_name.SubVector(len, type_name.size());
+ StrNCpy(suffix, name_vec.data(), name_vec.size());
+ len += std::min(suffix.size(), name_vec.size());
+ if (len < type_name.size()) {
+ type_name[len] = ')';
+ len++;
+ }
+ }
}
- UNREACHABLE();
+ return isolate->factory()->InternalizeString(type_name.SubVector(0, len));
}
} // namespace
// static
-Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate, Type type,
+Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate,
+ Handle<String> type,
Handle<Object> value) {
- int map_index = kFirstWasmValueMapIndex + type;
- DCHECK_LE(kFirstWasmValueMapIndex, map_index);
- DCHECK_LE(map_index, kLastWasmValueMapIndex);
auto maps = GetOrCreateDebugMaps(isolate);
- if (maps->is_the_hole(isolate, map_index)) {
- auto type_name = Type2String(isolate, type);
- auto shared = isolate->factory()->NewSharedFunctionInfoForBuiltin(
- type_name, Builtins::kIllegal);
- shared->set_language_mode(LanguageMode::kStrict);
- auto constructor =
- Factory::JSFunctionBuilder{isolate, shared, isolate->native_context()}
- .set_map(isolate->strict_function_map())
- .Build();
+ if (maps->is_the_hole(isolate, kWasmValueMapIndex)) {
Handle<Map> map = isolate->factory()->NewMap(
WASM_VALUE_OBJECT_TYPE, WasmValueObject::kSize,
- TERMINAL_FAST_ELEMENTS_KIND, 1);
+ TERMINAL_FAST_ELEMENTS_KIND, 2);
Map::EnsureDescriptorSlack(isolate, map, 2);
{ // type
- Descriptor d = Descriptor::DataConstant(
+ Descriptor d = Descriptor::DataField(
+ isolate,
isolate->factory()->InternalizeString(StaticCharVector("type")),
- type_name, FROZEN);
+ WasmValueObject::kTypeIndex, FROZEN, Representation::Tagged());
map->AppendDescriptor(isolate, &d);
}
{ // value
@@ -809,36 +851,195 @@ Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate, Type type,
WasmValueObject::kValueIndex, FROZEN, Representation::Tagged());
map->AppendDescriptor(isolate, &d);
}
- map->set_constructor_or_back_pointer(*constructor);
map->set_is_extensible(false);
- maps->set(map_index, *map);
+ maps->set(kWasmValueMapIndex, *map);
}
- Handle<Map> value_map(Map::cast(maps->get(map_index)), isolate);
+ Handle<Map> value_map =
+ handle(Map::cast(maps->get(kWasmValueMapIndex)), isolate);
Handle<WasmValueObject> object = Handle<WasmValueObject>::cast(
isolate->factory()->NewJSObjectFromMap(value_map));
+ object->set_type(*type);
object->set_value(*value);
return object;
}
+// This class implements a proxy for a single inspectable Wasm struct.
+struct StructProxy : NamedDebugProxy<StructProxy, kStructProxy, FixedArray> {
+ static constexpr char const* kClassName = "Struct";
+
+ static const int kObjectIndex = 0;
+ static const int kModuleIndex = 1;
+ static const int kTypeIndexIndex = 2;
+ static const int kLength = 3;
+
+ static Handle<JSObject> Create(Isolate* isolate, const wasm::WasmValue& value,
+ Handle<WasmModuleObject> module) {
+ Handle<FixedArray> data = isolate->factory()->NewFixedArray(kLength);
+ data->set(kObjectIndex, *value.to_ref());
+ data->set(kModuleIndex, *module);
+ int struct_type_index = value.type().ref_index();
+ data->set(kTypeIndexIndex, Smi::FromInt(struct_type_index));
+ return NamedDebugProxy::Create(isolate, data);
+ }
+
+ static uint32_t Count(Isolate* isolate, Handle<FixedArray> data) {
+ return WasmStruct::cast(data->get(kObjectIndex)).type()->field_count();
+ }
+
+ static Handle<Object> Get(Isolate* isolate, Handle<FixedArray> data,
+ uint32_t index) {
+ Handle<WasmStruct> obj(WasmStruct::cast(data->get(kObjectIndex)), isolate);
+ Handle<WasmModuleObject> module(
+ WasmModuleObject::cast(data->get(kModuleIndex)), isolate);
+ return WasmValueObject::New(isolate, obj->GetFieldValue(index), module);
+ }
+
+ static Handle<String> GetName(Isolate* isolate, Handle<FixedArray> data,
+ uint32_t index) {
+ wasm::NativeModule* native_module =
+ WasmModuleObject::cast(data->get(kModuleIndex)).native_module();
+ int struct_type_index = Smi::ToInt(Smi::cast(data->get(kTypeIndexIndex)));
+ wasm::ModuleWireBytes module_wire_bytes(native_module->wire_bytes());
+ Vector<const char> name_vec = module_wire_bytes.GetNameOrNull(
+ native_module->GetDebugInfo()->GetFieldName(struct_type_index, index));
+ return GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$field", index);
+ }
+};
+
+// This class implements a proxy for a single inspectable Wasm array.
+struct ArrayProxy : IndexedDebugProxy<ArrayProxy, kArrayProxy, FixedArray> {
+ static constexpr char const* kClassName = "Array";
+
+ static const int kObjectIndex = 0;
+ static const int kModuleIndex = 1;
+ static const int kLength = 2;
+
+ static Handle<JSObject> Create(Isolate* isolate, const wasm::WasmValue& value,
+ Handle<WasmModuleObject> module) {
+ Handle<FixedArray> data = isolate->factory()->NewFixedArray(kLength);
+ data->set(kObjectIndex, *value.to_ref());
+ data->set(kModuleIndex, *module);
+ Handle<JSObject> proxy = IndexedDebugProxy::Create(
+ isolate, data, false /* leave map extensible */);
+ uint32_t length = WasmArray::cast(*value.to_ref()).length();
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
+ Object::SetProperty(isolate, proxy, isolate->factory()->length_string(),
+ length_obj, StoreOrigin::kNamed,
+ Just(ShouldThrow::kThrowOnError))
+ .Check();
+ return proxy;
+ }
+
+ static v8::Local<v8::FunctionTemplate> CreateTemplate(v8::Isolate* isolate) {
+ Local<v8::FunctionTemplate> templ =
+ IndexedDebugProxy::CreateTemplate(isolate);
+ templ->InstanceTemplate()->Set(isolate, "length",
+ v8::Number::New(isolate, 0));
+ return templ;
+ }
+
+ static uint32_t Count(Isolate* isolate, Handle<FixedArray> data) {
+ return WasmArray::cast(data->get(kObjectIndex)).length();
+ }
+
+ static Handle<Object> Get(Isolate* isolate, Handle<FixedArray> data,
+ uint32_t index) {
+ Handle<WasmArray> array(WasmArray::cast(data->get(kObjectIndex)), isolate);
+ Handle<WasmModuleObject> module(
+ WasmModuleObject::cast(data->get(kModuleIndex)), isolate);
+ return WasmValueObject::New(isolate, array->GetElement(index), module);
+ }
+};
+
// static
-Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate,
- const wasm::WasmValue& value) {
+Handle<WasmValueObject> WasmValueObject::New(
+ Isolate* isolate, const wasm::WasmValue& value,
+ Handle<WasmModuleObject> module_object) {
+ Handle<String> t;
+ Handle<Object> v;
switch (value.type().kind()) {
- case wasm::kF32:
- return New(isolate, kF32, isolate->factory()->NewNumber(value.to_f32()));
- case wasm::kF64:
- return New(isolate, kF64, isolate->factory()->NewNumber(value.to_f64()));
- case wasm::kI32:
- return New(isolate, kI32, isolate->factory()->NewNumber(value.to_i32()));
- case wasm::kI64:
- return New(isolate, kI64, BigInt::FromInt64(isolate, value.to_i64()));
- case wasm::kRef:
- return New(isolate, kExternRef, value.to_externref());
- case wasm::kS128:
- return New(isolate, kV128, WasmSimd128ToString(isolate, value.to_s128()));
- default:
+ case wasm::kI8: {
+ // This can't be reached for most "top-level" things, only via nested
+ // calls for struct/array fields.
+ t = isolate->factory()->InternalizeString(StaticCharVector("i8"));
+ v = isolate->factory()->NewNumber(value.to_i8_unchecked());
+ break;
+ }
+ case wasm::kI16: {
+ // This can't be reached for most "top-level" things, only via nested
+ // calls for struct/array fields.
+ t = isolate->factory()->InternalizeString(StaticCharVector("i16"));
+ v = isolate->factory()->NewNumber(value.to_i16_unchecked());
+ break;
+ }
+ case wasm::kI32: {
+ t = isolate->factory()->InternalizeString(StaticCharVector("i32"));
+ v = isolate->factory()->NewNumberFromInt(value.to_i32_unchecked());
+ break;
+ }
+ case wasm::kI64: {
+ t = isolate->factory()->InternalizeString(StaticCharVector("i64"));
+ v = BigInt::FromInt64(isolate, value.to_i64_unchecked());
+ break;
+ }
+ case wasm::kF32: {
+ t = isolate->factory()->InternalizeString(StaticCharVector("f32"));
+ v = isolate->factory()->NewNumber(value.to_f32_unchecked());
+ break;
+ }
+ case wasm::kF64: {
+ t = isolate->factory()->InternalizeString(StaticCharVector("f64"));
+ v = isolate->factory()->NewNumber(value.to_f64_unchecked());
+ break;
+ }
+ case wasm::kS128: {
+ t = isolate->factory()->InternalizeString(StaticCharVector("v128"));
+ v = WasmSimd128ToString(isolate, value.to_s128_unchecked());
+ break;
+ }
+ case wasm::kOptRef:
+ if (value.type().is_reference_to(wasm::HeapType::kExtern)) {
+ t = isolate->factory()->InternalizeString(
+ StaticCharVector("externref"));
+ v = value.to_ref();
+ break;
+ }
+ V8_FALLTHROUGH;
+ case wasm::kRef: {
+ t = GetRefTypeName(isolate, value.type(), module_object->native_module());
+ Handle<Object> ref = value.to_ref();
+ if (ref->IsWasmStruct()) {
+ v = StructProxy::Create(isolate, value, module_object);
+ } else if (ref->IsWasmArray()) {
+ v = ArrayProxy::Create(isolate, value, module_object);
+ } else if (ref->IsJSFunction() || ref->IsSmi() || ref->IsNull()) {
+ v = ref;
+ } else {
+ // Fail gracefully.
+ EmbeddedVector<char, 64> error;
+ int len = SNPrintF(error, "unimplemented object type: %d",
+ HeapObject::cast(*ref).map().instance_type());
+ v = isolate->factory()->InternalizeString(error.SubVector(0, len));
+ }
+ break;
+ }
+ case wasm::kRtt:
+ case wasm::kRttWithDepth: {
+ // TODO(7748): Expose RTTs to DevTools.
+ t = isolate->factory()->InternalizeString(StaticCharVector("rtt"));
+ v = isolate->factory()->InternalizeString(
+ StaticCharVector("(unimplemented)"));
+ break;
+ }
+ case wasm::kVoid:
+ case wasm::kBottom:
UNREACHABLE();
}
+ return New(isolate, t, v);
}
Handle<JSObject> GetWasmDebugProxy(WasmFrame* frame) {
diff --git a/deps/v8/src/debug/debug-wasm-objects.h b/deps/v8/src/debug/debug-wasm-objects.h
index 98c9e6db164..6eb075b9b6a 100644
--- a/deps/v8/src/debug/debug-wasm-objects.h
+++ b/deps/v8/src/debug/debug-wasm-objects.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_DEBUG_DEBUG_WASM_OBJECTS_H_
#define V8_DEBUG_DEBUG_WASM_OBJECTS_H_
@@ -32,6 +36,7 @@ class WasmValueObject : public JSObject {
public:
DECL_CAST(WasmValueObject)
+ DECL_ACCESSORS(type, String)
DECL_ACCESSORS(value, Object)
// Dispatched behavior.
@@ -40,20 +45,21 @@ class WasmValueObject : public JSObject {
// Layout description.
#define WASM_VALUE_FIELDS(V) \
+ V(kTypeOffset, kTaggedSize) \
V(kValueOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_VALUE_FIELDS)
#undef WASM_VALUE_FIELDS
// Indices of in-object properties.
- static constexpr int kValueIndex = 0;
-
- enum Type { kExternRef, kF32, kF64, kI32, kI64, kV128, kNumTypes };
+ static constexpr int kTypeIndex = 0;
+ static constexpr int kValueIndex = 1;
- static Handle<WasmValueObject> New(Isolate* isolate, Type type,
+ static Handle<WasmValueObject> New(Isolate* isolate, Handle<String> type,
Handle<Object> value);
static Handle<WasmValueObject> New(Isolate* isolate,
- const wasm::WasmValue& value);
+ const wasm::WasmValue& value,
+ Handle<WasmModuleObject> module);
OBJECT_CONSTRUCTORS(WasmValueObject, JSObject);
};
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 1e2e18170d5..0b873f7c8c3 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -27,7 +27,6 @@
#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
#include "src/init/bootstrapper.h"
-#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
@@ -36,9 +35,13 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/slots.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -217,29 +220,6 @@ BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
}
int BreakIterator::BreakIndexFromPosition(int source_position) {
- // TODO(crbug.com/901819): When there's no exact match, we
- // should always pick the first match (in execution order)
- // to ensure that when setting a breakpoint on a line, we
- // really break as early as possible in that line. With
- // generators that's currently broken because of the way
- // the implicit yield is handled, this will be fixed in
- // a follow up CL.
- if (IsGeneratorFunction(debug_info_->shared().kind()) ||
- IsModule(debug_info_->shared().kind())) {
- int distance = kMaxInt;
- int closest_break = break_index();
- while (!Done()) {
- int next_position = position();
- if (source_position <= next_position &&
- next_position - source_position < distance) {
- closest_break = break_index();
- distance = next_position - source_position;
- if (distance == 0) break;
- }
- Next();
- }
- return closest_break;
- }
int first_break = break_index();
bool first = true;
while (!Done()) {
@@ -313,7 +293,7 @@ void BreakIterator::SetDebugBreak() {
DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
Handle<BytecodeArray> bytecode_array(debug_info_->DebugBytecodeArray(),
isolate());
- interpreter::BytecodeArrayAccessor(bytecode_array, code_offset())
+ interpreter::BytecodeArrayIterator(bytecode_array, code_offset())
.ApplyDebugBreak();
}
@@ -338,12 +318,12 @@ BreakLocation BreakIterator::GetBreakLocation() {
// bytecode array, and we'll read the actual generator object off the
// interpreter stack frame in GetGeneratorObjectForSuspendedFrame.
BytecodeArray bytecode_array = debug_info_->OriginalBytecodeArray();
- interpreter::BytecodeArrayAccessor accessor(
+ interpreter::BytecodeArrayIterator iterator(
handle(bytecode_array, isolate()), code_offset());
- DCHECK_EQ(accessor.current_bytecode(),
+ DCHECK_EQ(iterator.current_bytecode(),
interpreter::Bytecode::kSuspendGenerator);
- interpreter::Register generator_obj_reg = accessor.GetRegisterOperand(0);
+ interpreter::Register generator_obj_reg = iterator.GetRegisterOperand(0);
generator_object_reg_index = generator_obj_reg.index();
}
return BreakLocation(code, type, code_offset(), position_,
@@ -586,7 +566,6 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
// break location, we also do not trigger one for debugger statements, nor
// an exception event on exception at this location.
FrameSummary summary = FrameSummary::GetTop(frame);
- DCHECK(!summary.IsWasm());
Handle<JSFunction> function = summary.AsJavaScript().function();
if (!function->shared().HasBreakInfo()) return false;
Handle<DebugInfo> debug_info(function->shared().GetDebugInfo(), isolate_);
@@ -668,20 +647,31 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
*id = ++thread_local_.last_breakpoint_id_;
Handle<BreakPoint> break_point =
isolate_->factory()->NewBreakPoint(*id, condition);
+#if V8_ENABLE_WEBASSEMBLY
if (script->type() == Script::TYPE_WASM) {
RecordWasmScriptWithBreakpoints(script);
return WasmScript::SetBreakPoint(script, source_position, break_point);
}
+#endif // V8_ENABLE_WEBASSEMBLY
HandleScope scope(isolate_);
- // Obtain shared function info for the function.
+ // Obtain shared function info for the innermost function containing this
+ // position.
Handle<Object> result =
- FindSharedFunctionInfoInScript(script, *source_position);
+ FindInnermostContainingFunctionInfo(script, *source_position);
if (result->IsUndefined(isolate_)) return false;
- // Set the breakpoint in the function.
auto shared = Handle<SharedFunctionInfo>::cast(result);
+ if (!EnsureBreakInfo(shared)) return false;
+ PrepareFunctionForDebugExecution(shared);
+
+ // Find the nested shared function info that is closest to the position within
+ // the containing function.
+ shared = FindClosestSharedFunctionInfoFromPosition(*source_position, script,
+ shared);
+
+ // Set the breakpoint in the function.
return SetBreakpoint(shared, break_point, source_position);
}
@@ -774,6 +764,7 @@ bool Debug::SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
Handle<BreakPoint> breakpoint =
isolate_->factory()->NewBreakPoint(*id, condition);
int source_position = 0;
+#if V8_ENABLE_WEBASSEMBLY
// Handle wasm function.
if (shared->HasWasmExportedFunctionData()) {
int func_index = shared->wasm_exported_function_data().function_index();
@@ -784,6 +775,7 @@ bool Debug::SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
return WasmScript::SetBreakPointOnFirstBreakableForFunction(
script, func_index, breakpoint);
}
+#endif // V8_ENABLE_WEBASSEMBLY
return SetBreakpoint(shared, breakpoint, &source_position);
}
@@ -793,6 +785,7 @@ void Debug::RemoveBreakpoint(int id) {
ClearBreakPoint(breakpoint);
}
+#if V8_ENABLE_WEBASSEMBLY
void Debug::RemoveBreakpointForWasmScript(Handle<Script> script, int id) {
if (script->type() == Script::TYPE_WASM) {
WasmScript::ClearBreakPointById(script, id);
@@ -826,6 +819,7 @@ void Debug::RecordWasmScriptWithBreakpoints(Handle<Script> script) {
isolate_->global_handles()->Create(*new_list);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
// Clear out all the debug break code.
void Debug::ClearAllBreakPoints() {
@@ -833,6 +827,7 @@ void Debug::ClearAllBreakPoints() {
ClearBreakPoints(info);
info->ClearBreakInfo(isolate_);
});
+#if V8_ENABLE_WEBASSEMBLY
// Clear all wasm breakpoints.
if (!wasm_scripts_with_breakpoints_.is_null()) {
DisallowGarbageCollection no_gc;
@@ -849,6 +844,7 @@ void Debug::ClearAllBreakPoints() {
}
wasm_scripts_with_breakpoints_ = Handle<WeakArrayList>{};
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
@@ -1093,6 +1089,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_frame_count_ = current_frame_count;
// No longer perform the current async step.
clear_suspended_generator();
+#if V8_ENABLE_WEBASSEMBLY
} else if (frame->is_wasm() && step_action != StepOut) {
// Handle stepping in wasm.
WasmFrame* wasm_frame = WasmFrame::cast(frame);
@@ -1106,6 +1103,7 @@ void Debug::PrepareStep(StepAction step_action) {
// instead.
step_action = StepOut;
UpdateHookOnFunctionCall();
+#endif // V8_ENABLE_WEBASSEMBLY
}
switch (step_action) {
@@ -1128,6 +1126,7 @@ void Debug::PrepareStep(StepAction step_action) {
// and deoptimize every frame along the way.
bool in_current_frame = true;
for (; !frames_it.done(); frames_it.Advance()) {
+#if V8_ENABLE_WEBASSEMBLY
if (frames_it.frame()->is_wasm()) {
if (in_current_frame) {
in_current_frame = false;
@@ -1139,6 +1138,7 @@ void Debug::PrepareStep(StepAction step_action) {
debug_info->PrepareStepOutTo(wasm_frame);
return;
}
+#endif // V8_ENABLE_WEBASSEMBLY
JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
if (last_step_action() == StepIn) {
// Deoptimize frame to ensure calls are checked for step-in.
@@ -1240,9 +1240,9 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
bool deopt_all = shared_ == SharedFunctionInfo();
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ if (!deopt_all && it.frame()->function().shared() != shared_) continue;
if (it.frame()->type() == StackFrame::BASELINE) {
BaselineFrame* frame = BaselineFrame::cast(it.frame());
- if (!deopt_all && frame->function().shared() != shared_) continue;
int bytecode_offset = frame->GetBytecodeOffset();
Address* pc_addr = frame->pc_address();
Address advance = BUILTIN_CODE(isolate, InterpreterEnterBytecodeAdvance)
@@ -1250,6 +1250,27 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
PointerAuthentication::ReplacePC(pc_addr, advance, kSystemPointerSize);
InterpretedFrame::cast(it.Reframe())
->PatchBytecodeOffset(bytecode_offset);
+ } else if (it.frame()->type() == StackFrame::INTERPRETED) {
+ // Check if the PC is a baseline entry trampoline. If it is, replace it
+ // with the corresponding interpreter entry trampoline.
+ // This is the case if a baseline function was inlined into a function
+ // we deoptimized in the debugger and are stepping into it.
+ JavaScriptFrame* frame = it.frame();
+ Address pc = frame->pc();
+ Builtins::Name builtin_index =
+ InstructionStream::TryLookupCode(isolate, pc);
+ if (builtin_index == Builtins::kBaselineEnterAtBytecode ||
+ builtin_index == Builtins::kBaselineEnterAtNextBytecode) {
+ Address* pc_addr = frame->pc_address();
+ Builtins::Name advance =
+ builtin_index == Builtins::kBaselineEnterAtBytecode
+ ? Builtins::kInterpreterEnterBytecodeDispatch
+ : Builtins::kInterpreterEnterBytecodeAdvance;
+ Address advance_pc =
+ isolate->builtins()->builtin(advance).InstructionStart();
+ PointerAuthentication::ReplacePC(pc_addr, advance_pc,
+ kSystemPointerSize);
+ }
}
}
}
@@ -1379,12 +1400,7 @@ void Debug::InstallDebugBreakTrampoline() {
Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
std::vector<Handle<JSFunction>> needs_compile;
- using AccessorPairWithContext =
- std::pair<Handle<AccessorPair>, Handle<NativeContext>>;
- std::vector<AccessorPairWithContext> needs_instantiate;
{
- // Deduplicate {needs_instantiate} by recording all collected AccessorPairs.
- std::set<AccessorPair> recorded;
HeapObjectIterator iterator(isolate_->heap());
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -1401,58 +1417,10 @@ void Debug::InstallDebugBreakTrampoline() {
} else {
fun.set_code(*trampoline);
}
- } else if (obj.IsJSObject()) {
- JSObject object = JSObject::cast(obj);
- DescriptorArray descriptors =
- object.map().instance_descriptors(kRelaxedLoad);
-
- for (InternalIndex i : object.map().IterateOwnDescriptors()) {
- if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) {
- Object value = descriptors.GetStrongValue(i);
- if (!value.IsAccessorPair()) continue;
-
- AccessorPair accessor_pair = AccessorPair::cast(value);
- if (!accessor_pair.getter().IsFunctionTemplateInfo() &&
- !accessor_pair.setter().IsFunctionTemplateInfo()) {
- continue;
- }
- if (recorded.find(accessor_pair) != recorded.end()) continue;
-
- needs_instantiate.emplace_back(
- handle(accessor_pair, isolate_),
- object.GetCreationContext().ToHandleChecked());
- recorded.insert(accessor_pair);
- }
- }
}
}
}
- // Forcibly instantiate all lazy accessor pairs to make sure that they
- // properly hit the debug break trampoline.
- for (AccessorPairWithContext tuple : needs_instantiate) {
- Handle<AccessorPair> accessor_pair = tuple.first;
- Handle<NativeContext> native_context = tuple.second;
- if (accessor_pair->getter().IsFunctionTemplateInfo()) {
- Handle<JSFunction> fun =
- ApiNatives::InstantiateFunction(
- isolate_, native_context,
- handle(FunctionTemplateInfo::cast(accessor_pair->getter()),
- isolate_))
- .ToHandleChecked();
- accessor_pair->set_getter(*fun);
- }
- if (accessor_pair->setter().IsFunctionTemplateInfo()) {
- Handle<JSFunction> fun =
- ApiNatives::InstantiateFunction(
- isolate_, native_context,
- handle(FunctionTemplateInfo::cast(accessor_pair->setter()),
- isolate_))
- .ToHandleChecked();
- accessor_pair->set_setter(*fun);
- }
- }
-
// By overwriting the function code with DebugBreakTrampoline, which tailcalls
// to shared code, we bypass CompileLazy. Perform CompileLazy here instead.
for (Handle<JSFunction> fun : needs_compile) {
@@ -1483,6 +1451,24 @@ void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
BreakIterator it(debug_info);
GetBreakablePositions(&it, start_position, end_position, locations);
}
+
+bool CompileTopLevel(Isolate* isolate, Handle<Script> script) {
+ UnoptimizedCompileState compile_state(isolate);
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ ParseInfo parse_info(isolate, flags, &compile_state);
+ IsCompiledScope is_compiled_scope;
+ const MaybeHandle<SharedFunctionInfo> maybe_result =
+ Compiler::CompileToplevel(&parse_info, script, isolate,
+ &is_compiled_scope);
+ if (maybe_result.is_null()) {
+ if (isolate->has_pending_exception()) {
+ isolate->clear_pending_exception();
+ }
+ return false;
+ }
+ return true;
+}
} // namespace
bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
@@ -1490,7 +1476,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
std::vector<BreakLocation>* locations) {
if (restrict_to_function) {
Handle<Object> result =
- FindSharedFunctionInfoInScript(script, start_position);
+ FindInnermostContainingFunctionInfo(script, start_position);
if (result->IsUndefined(isolate_)) return false;
// Make sure the function has set up the debug info.
@@ -1504,51 +1490,18 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
return true;
}
- while (true) {
- HandleScope scope(isolate_);
- std::vector<Handle<SharedFunctionInfo>> candidates;
- std::vector<IsCompiledScope> compiled_scopes;
- SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
- for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
- info = iterator.Next()) {
- if (info.EndPosition() < start_position ||
- info.StartPosition() >= end_position) {
- continue;
- }
- if (!info.IsSubjectToDebugging()) continue;
- if (!info.is_compiled() && !info.allows_lazy_compilation()) continue;
- candidates.push_back(i::handle(info, isolate_));
- }
-
- bool was_compiled = false;
- for (const auto& candidate : candidates) {
- IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
- if (!is_compiled_scope.is_compiled()) {
- // Code that cannot be compiled lazily are internal and not debuggable.
- DCHECK(candidate->allows_lazy_compilation());
- if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
- return false;
- } else {
- was_compiled = true;
- }
- }
- DCHECK(is_compiled_scope.is_compiled());
- compiled_scopes.push_back(is_compiled_scope);
- if (!EnsureBreakInfo(candidate)) return false;
- PrepareFunctionForDebugExecution(candidate);
- }
- if (was_compiled) continue;
-
- for (const auto& candidate : candidates) {
- CHECK(candidate->HasBreakInfo());
- Handle<DebugInfo> debug_info(candidate->GetDebugInfo(), isolate_);
- FindBreakablePositions(debug_info, start_position, end_position,
- locations);
- }
- return true;
+ HandleScope scope(isolate_);
+ std::vector<Handle<SharedFunctionInfo>> candidates;
+ if (!FindSharedFunctionInfosIntersectingRange(script, start_position,
+ end_position, &candidates)) {
+ return false;
}
- UNREACHABLE();
+ for (const auto& candidate : candidates) {
+ CHECK(candidate->HasBreakInfo());
+ Handle<DebugInfo> debug_info(candidate->GetDebugInfo(), isolate_);
+ FindBreakablePositions(debug_info, start_position, end_position, locations);
+ }
+ return true;
}
class SharedFunctionInfoFinder {
@@ -1614,17 +1567,119 @@ SharedFunctionInfo FindSharedFunctionInfoCandidate(int position,
}
} // namespace
+Handle<SharedFunctionInfo> Debug::FindClosestSharedFunctionInfoFromPosition(
+ int position, Handle<Script> script,
+ Handle<SharedFunctionInfo> outer_shared) {
+ CHECK(outer_shared->HasBreakInfo());
+ int closest_position = FindBreakablePosition(
+ Handle<DebugInfo>(outer_shared->GetDebugInfo(), isolate_), position);
+ Handle<SharedFunctionInfo> closest_candidate = outer_shared;
+ if (closest_position == position) return outer_shared;
+
+ const int start_position = outer_shared->StartPosition();
+ const int end_position = outer_shared->EndPosition();
+ if (start_position == end_position) return outer_shared;
+
+ if (closest_position == 0) closest_position = end_position;
+ std::vector<Handle<SharedFunctionInfo>> candidates;
+ // Find all shared function infos of functions that are intersecting from
+ // the requested position until the end of the enclosing function.
+ if (!FindSharedFunctionInfosIntersectingRange(
+ script, position, closest_position, &candidates)) {
+ return outer_shared;
+ }
+
+ for (auto candidate : candidates) {
+ CHECK(candidate->HasBreakInfo());
+ Handle<DebugInfo> debug_info(candidate->GetDebugInfo(), isolate_);
+ const int candidate_position = FindBreakablePosition(debug_info, position);
+ if (candidate_position >= position &&
+ candidate_position < closest_position) {
+ closest_position = candidate_position;
+ closest_candidate = candidate;
+ }
+ if (closest_position == position) break;
+ }
+ return closest_candidate;
+}
+
+bool Debug::FindSharedFunctionInfosIntersectingRange(
+ Handle<Script> script, int start_position, int end_position,
+ std::vector<Handle<SharedFunctionInfo>>* intersecting_shared) {
+ bool candidateSubsumesRange = false;
+ bool triedTopLevelCompile = false;
+
+ while (true) {
+ std::vector<Handle<SharedFunctionInfo>> candidates;
+ std::vector<IsCompiledScope> compiled_scopes;
+ {
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
+ for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
+ info = iterator.Next()) {
+ if (info.EndPosition() < start_position ||
+ info.StartPosition() >= end_position) {
+ continue;
+ }
+ candidateSubsumesRange |= info.StartPosition() <= start_position &&
+ info.EndPosition() >= end_position;
+ if (!info.IsSubjectToDebugging()) continue;
+ if (!info.is_compiled() && !info.allows_lazy_compilation()) continue;
+ candidates.push_back(i::handle(info, isolate_));
+ }
+ }
+
+ if (!triedTopLevelCompile && !candidateSubsumesRange &&
+ script->shared_function_infos().length() > 0) {
+ MaybeObject maybeToplevel = script->shared_function_infos().Get(0);
+ HeapObject heap_object;
+ const bool topLevelInfoExists =
+ maybeToplevel->GetHeapObject(&heap_object) &&
+ !heap_object.IsUndefined();
+ if (!topLevelInfoExists) {
+ triedTopLevelCompile = true;
+ const bool success = CompileTopLevel(isolate_, script);
+ if (!success) return false;
+ continue;
+ }
+ }
+
+ bool was_compiled = false;
+ for (const auto& candidate : candidates) {
+ IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
+ if (!is_compiled_scope.is_compiled()) {
+ // Code that cannot be compiled lazily are internal and not debuggable.
+ DCHECK(candidate->allows_lazy_compilation());
+ if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ return false;
+ } else {
+ was_compiled = true;
+ }
+ }
+ DCHECK(is_compiled_scope.is_compiled());
+ compiled_scopes.push_back(is_compiled_scope);
+ if (!EnsureBreakInfo(candidate)) return false;
+ PrepareFunctionForDebugExecution(candidate);
+ }
+ if (was_compiled) continue;
+ *intersecting_shared = std::move(candidates);
+ return true;
+ }
+ UNREACHABLE();
+}
+
// We need to find a SFI for a literal that may not yet have been compiled yet,
// and there may not be a JSFunction referencing it. Find the SFI closest to
// the given position, compile it to reveal possible inner SFIs and repeat.
// While we are at this, also ensure code with debug break slots so that we do
// not have to compile a SFI without JSFunction, which is paifu for those that
// cannot be compiled without context (need to find outer compilable SFI etc.)
-Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
- int position) {
+Handle<Object> Debug::FindInnermostContainingFunctionInfo(Handle<Script> script,
+ int position) {
for (int iteration = 0;; iteration++) {
// Go through all shared function infos associated with this script to
- // find the inner most function containing this position.
+ // find the innermost function containing this position.
// If there is no shared function info for this script at all, there is
// no point in looking for it by walking the heap.
@@ -1633,23 +1688,12 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
{
shared = FindSharedFunctionInfoCandidate(position, script, isolate_);
if (shared.is_null()) {
+ if (iteration > 0) break;
// It might be that the shared function info is not available as the
// top level functions are removed due to the GC. Try to recompile
// the top level functions.
- UnoptimizedCompileState compile_state(isolate_);
- UnoptimizedCompileFlags flags =
- UnoptimizedCompileFlags::ForScriptCompile(isolate_, *script);
- ParseInfo parse_info(isolate_, flags, &compile_state);
- IsCompiledScope is_compiled_scope;
- const MaybeHandle<SharedFunctionInfo> maybe_result =
- Compiler::CompileToplevel(&parse_info, script, isolate_,
- &is_compiled_scope);
- if (maybe_result.is_null()) {
- if (isolate_->has_pending_exception()) {
- isolate_->clear_pending_exception();
- }
- break;
- }
+ const bool success = CompileTopLevel(isolate_, script);
+ if (!success) break;
continue;
}
// We found it if it's already compiled.
@@ -1661,7 +1705,6 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// be no JSFunction referencing it. We can anticipate creating a debug
// info while bypassing PrepareFunctionForDebugExecution.
if (iteration > 1) {
- AllowGarbageCollection allow_before_return;
CreateBreakInfo(shared_handle);
}
return shared_handle;
@@ -1914,7 +1957,9 @@ bool Debug::IsExceptionBlackboxed(bool uncaught) {
// Uncaught exception is blackboxed if all current frames are blackboxed,
// caught exception if top frame is blackboxed.
StackTraceFrameIterator it(isolate_);
+#if V8_ENABLE_WEBASSEMBLY
while (!it.done() && it.is_wasm()) it.Advance();
+#endif // V8_ENABLE_WEBASSEMBLY
bool is_top_frame_blackboxed =
!it.done() ? IsFrameBlackboxed(it.javascript_frame()) : true;
if (!uncaught || !is_top_frame_blackboxed) return is_top_frame_blackboxed;
@@ -2136,9 +2181,13 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
// inspector to filter scripts by native context.
script->set_context_data(isolate_->native_context()->debug_context_id());
if (ignore_events()) return;
+#if V8_ENABLE_WEBASSEMBLY
if (!script->IsUserJavaScript() && script->type() != i::Script::TYPE_WASM) {
return;
}
+#else
+ if (!script->IsUserJavaScript()) return;
+#endif // V8_ENABLE_WEBASSEMBLY
if (!debug_delegate_) return;
SuppressDebug while_processing(this);
DebugScope debug_scope(this);
@@ -2535,17 +2584,17 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
SharedFunctionInfo shared = frame->function().shared();
BytecodeArray bytecode_array = shared.GetBytecodeArray(isolate_);
int offset = frame->GetBytecodeOffset();
- interpreter::BytecodeArrayAccessor bytecode_accessor(
+ interpreter::BytecodeArrayIterator bytecode_iterator(
handle(bytecode_array, isolate_), offset);
- Bytecode bytecode = bytecode_accessor.current_bytecode();
+ Bytecode bytecode = bytecode_iterator.current_bytecode();
interpreter::Register reg;
switch (bytecode) {
case Bytecode::kStaCurrentContextSlot:
reg = interpreter::Register::current_context();
break;
default:
- reg = bytecode_accessor.GetRegisterOperand(0);
+ reg = bytecode_iterator.GetRegisterOperand(0);
break;
}
Handle<Object> object =
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 9462f701253..86c067c0357 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -245,9 +245,11 @@ class V8_EXPORT_PRIVATE Debug {
bool SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
Handle<String> condition, int* id);
void RemoveBreakpoint(int id);
+#if V8_ENABLE_WEBASSEMBLY
void RemoveBreakpointForWasmScript(Handle<Script> script, int id);
void RecordWasmScriptWithBreakpoints(Handle<Script> script);
+#endif // V8_ENABLE_WEBASSEMBLY
// Find breakpoints from the debug info and the break location and check
// whether they are hit. Return an empty handle if not, or a FixedArray with
@@ -292,8 +294,16 @@ class V8_EXPORT_PRIVATE Debug {
void RemoveAllCoverageInfos();
// This function is used in FunctionNameUsing* tests.
- Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
- int position);
+ Handle<Object> FindInnermostContainingFunctionInfo(Handle<Script> script,
+ int position);
+
+ Handle<SharedFunctionInfo> FindClosestSharedFunctionInfoFromPosition(
+ int position, Handle<Script> script,
+ Handle<SharedFunctionInfo> outer_shared);
+
+ bool FindSharedFunctionInfosIntersectingRange(
+ Handle<Script> script, int start_position, int end_position,
+ std::vector<Handle<SharedFunctionInfo>>* candidates);
static Handle<Object> GetSourceBreakLocations(
Isolate* isolate, Handle<SharedFunctionInfo> shared);
@@ -554,8 +564,10 @@ class V8_EXPORT_PRIVATE Debug {
// Storage location for registers when handling debug break calls
ThreadLocal thread_local_;
+#if V8_ENABLE_WEBASSEMBLY
// This is a global handle, lazily initialized.
Handle<WeakArrayList> wasm_scripts_with_breakpoints_;
+#endif // V8_ENABLE_WEBASSEMBLY
Isolate* isolate_;
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 17d9cb240f4..294f0e1f7bb 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1116,6 +1116,13 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
return;
}
+ // Patching a script means that the bytecode on the stack may no longer
+ // correspond to the bytecode of the JSFunction for that frame. As a result
+ // it is no longer safe to flush bytecode since we might flush the new
+ // bytecode for a JSFunction that is on the stack with an old bytecode, which
+ // breaks the invariant that any JSFunction active on the stack is compiled.
+ isolate->set_disable_bytecode_flushing(true);
+
std::map<int, int> start_position_to_unchanged_id;
for (const auto& mapping : unchanged) {
FunctionData* data = nullptr;
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index 2c08595f84a..9e608e9ed0b 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -400,7 +400,7 @@ bool WasmModuleDebug::GetWasmValue(const wasm::WasmValue& wasm_value,
case wasm::kWasmS128.kind():
return StoreValue(wasm_value.to_s128(), buffer, buffer_size, size);
- case wasm::kWasmStmt.kind():
+ case wasm::kWasmVoid.kind():
case wasm::kWasmExternRef.kind():
case wasm::kWasmBottom.kind():
default:
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index 137347321a8..197225fa9cc 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -1,4 +1,3 @@
jgruber@chromium.org
neis@chromium.org
nicohartmann@chromium.org
-sigurds@chromium.org
diff --git a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
index e1c08e4a99f..0b7472dbea6 100644
--- a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
@@ -14,6 +14,8 @@ void Builtins_ContinueToJavaScriptBuiltinWithResult();
void Builtins_ContinueToJavaScriptBuiltin();
void construct_stub_create_deopt_addr();
void construct_stub_invoke_deopt_addr();
+void Builtins_BaselineEnterAtBytecode();
+void Builtins_BaselineEnterAtNextBytecode();
typedef void (*function_ptr)();
}
@@ -30,6 +32,8 @@ constexpr function_ptr builtins[] = {
&Builtins_ContinueToJavaScriptBuiltin,
&construct_stub_create_deopt_addr,
&construct_stub_invoke_deopt_addr,
+ &Builtins_BaselineEnterAtBytecode,
+ &Builtins_BaselineEnterAtNextBytecode,
};
bool Deoptimizer::IsValidReturnAddress(Address address) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 340dede2295..7c4562dbd56 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -20,7 +20,10 @@
#include "src/objects/js-function-inl.h"
#include "src/objects/oddball.h"
#include "src/snapshot/embedded/embedded-data.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-linkage.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
@@ -178,7 +181,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
CHECK(CodeKindCanDeoptimize(code.kind()));
- if (code.contains(addr)) return code;
+ if (code.contains(isolate, addr)) return code;
element = code.next_code_link();
}
}
@@ -259,7 +262,8 @@ class ActivationsFinder : public ThreadVisitor {
code.marked_for_deoptimization()) {
codes_->erase(code);
// Obtain the trampoline to the deoptimizer call.
- SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
+ SafepointEntry safepoint =
+ code.GetSafepointEntry(isolate, it.frame()->pc());
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
@@ -305,7 +309,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
JSFunction function =
static_cast<OptimizedFrame*>(it.frame())->function();
TraceFoundActivation(isolate, function);
- SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
+ SafepointEntry safepoint =
+ code.GetSafepointEntry(isolate, it.frame()->pc());
// Turbofan deopt is checked when we are patching addresses on stack.
bool safe_if_deopt_triggered = safepoint.has_deoptimization_index();
@@ -659,11 +664,10 @@ Builtins::Name Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type_out) {
- Code maybe_code = InstructionStream::TryLookupCode(isolate, addr);
- if (maybe_code.is_null()) return false;
+ Builtins::Name builtin = InstructionStream::TryLookupCode(isolate, addr);
+ if (!Builtins::IsBuiltinId(builtin)) return false;
- Code code = maybe_code;
- switch (code.builtin_index()) {
+ switch (builtin) {
case Builtins::kDeoptimizationEntry_Eager:
*type_out = DeoptimizeKind::kEager;
return true;
@@ -937,7 +941,9 @@ void Deoptimizer::DoComputeOutputFrames() {
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kBuiltinContinuation:
+#if V8_ENABLE_WEBASSEMBLY
case TranslatedFrame::kJSToWasmBuiltinContinuation:
+#endif // V8_ENABLE_WEBASSEMBLY
DoComputeBuiltinContinuation(translated_frame, frame_index,
BuiltinContinuationMode::STUB);
break;
@@ -962,6 +968,10 @@ void Deoptimizer::DoComputeOutputFrames() {
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ topmost->GetRegisterValues()->SetRegister(kPointerCageBaseRegister.code(),
+ isolate()->isolate_root());
+#endif
// Print some helpful diagnostic information.
if (verbose_tracing_enabled()) {
@@ -981,11 +991,25 @@ void Deoptimizer::DoComputeOutputFrames() {
stack_guard->real_jslimit() - kStackLimitSlackForDeoptimizationInBytes);
}
+namespace {
+
+// Get the dispatch builtin for unoptimized frames.
+Builtins::Name DispatchBuiltinFor(bool is_baseline, bool advance_bc) {
+ if (is_baseline) {
+ return advance_bc ? Builtins::kBaselineEnterAtNextBytecode
+ : Builtins::kBaselineEnterAtBytecode;
+ } else {
+ return advance_bc ? Builtins::kInterpreterEnterBytecodeAdvance
+ : Builtins::kInterpreterEnterBytecodeDispatch;
+ }
+}
+
+} // namespace
+
void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
int frame_index,
bool goto_catch_handler) {
SharedFunctionInfo shared = translated_frame->raw_shared_info();
-
TranslatedFrame::iterator value_iterator = translated_frame->begin();
const bool is_bottommost = (0 == frame_index);
const bool is_topmost = (output_count_ - 1 == frame_index);
@@ -1010,15 +1034,10 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
TranslatedFrame::iterator function_iterator = value_iterator++;
- if (verbose_tracing_enabled()) {
- PrintF(trace_scope()->file(), " translating unoptimized frame ");
- std::unique_ptr<char[]> name = shared.DebugNameCStr();
- PrintF(trace_scope()->file(), "%s", name.get());
- PrintF(trace_scope()->file(),
- " => bytecode_offset=%d, variable_frame_size=%d, frame_size=%d%s\n",
- real_bytecode_offset, frame_info.frame_size_in_bytes_without_fixed(),
- output_frame_size, goto_catch_handler ? " (throw)" : "");
- }
+
+ BytecodeArray bytecode_array =
+ shared.HasBreakInfo() ? shared.GetDebugInfo().DebugBytecodeArray()
+ : shared.GetBytecodeArray(isolate());
// Allocate and store the output frame description.
FrameDescription* output_frame = new (output_frame_size)
@@ -1029,6 +1048,34 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
+ // Compute this frame's PC and state.
+ // For interpreted frames, the PC will be a special builtin that
+ // continues the bytecode dispatch. Note that non-topmost and lazy-style
+ // bailout handlers also advance the bytecode offset before dispatch, hence
+ // simulating what normal handlers do upon completion of the operation.
+ // For baseline frames, the PC will be a builtin to convert the interpreter
+ // frame to a baseline frame before continuing execution of baseline code.
+ // We can't directly continue into baseline code, because of CFI.
+ Builtins* builtins = isolate_->builtins();
+ const bool advance_bc =
+ (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
+ !goto_catch_handler;
+ const bool is_baseline = shared.HasBaselineData();
+ Code dispatch_builtin =
+ builtins->builtin(DispatchBuiltinFor(is_baseline, advance_bc));
+
+ if (verbose_tracing_enabled()) {
+ PrintF(trace_scope()->file(), " translating %s frame ",
+ is_baseline ? "baseline" : "interpreted");
+ std::unique_ptr<char[]> name = shared.DebugNameCStr();
+ PrintF(trace_scope()->file(), "%s", name.get());
+ PrintF(trace_scope()->file(), " => bytecode_offset=%d, ",
+ real_bytecode_offset);
+ PrintF(trace_scope()->file(), "variable_frame_size=%d, frame_size=%d%s\n",
+ frame_info.frame_size_in_bytes_without_fixed(), output_frame_size,
+ goto_catch_handler ? " (throw)" : "");
+ }
+
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
const intptr_t top_address =
@@ -1038,8 +1085,10 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
ReadOnlyRoots roots(isolate());
- if (should_pad_arguments && ShouldPadArguments(parameters_count)) {
- frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
+ if (should_pad_arguments) {
+ for (int i = 0; i < ArgumentPaddingSlots(parameters_count); ++i) {
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
+ }
}
// Note: parameters_count includes the receiver.
@@ -1133,9 +1182,6 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
frame_writer.PushRawValue(argc, "actual argument count\n");
// Set the bytecode array pointer.
- Object bytecode_array = shared.HasBreakInfo()
- ? shared.GetDebugInfo().DebugBytecodeArray()
- : shared.GetBytecodeArray(isolate());
frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
@@ -1191,7 +1237,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
// Translate the accumulator register (depending on frame position).
if (is_topmost) {
- if (kPadArguments) {
+ for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// For topmost frame, put the accumulator on the stack. The
@@ -1225,26 +1271,16 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
CHECK_EQ(translated_frame->end(), value_iterator);
CHECK_EQ(0u, frame_writer.top_offset());
- // Compute this frame's PC and state. The PC will be a special builtin that
- // continues the bytecode dispatch. Note that non-topmost and lazy-style
- // bailout handlers also advance the bytecode offset before dispatch, hence
- // simulating what normal handlers do upon completion of the operation.
- Builtins* builtins = isolate_->builtins();
- Code dispatch_builtin =
- (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
- !goto_catch_handler
- ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
- : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ const intptr_t pc =
+ static_cast<intptr_t>(dispatch_builtin.InstructionStart());
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
- static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
- frame_writer.frame()->GetTop());
+ pc, frame_writer.frame()->GetTop());
output_frame->SetPc(top_most_pc);
} else {
- output_frame->SetPc(
- static_cast<intptr_t>(dispatch_builtin.InstructionStart()));
+ output_frame->SetPc(pc);
}
// Update constant pool.
@@ -1295,11 +1331,10 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
argument_count_without_receiver - formal_parameter_count;
// The number of pushed arguments is the maximum of the actual argument count
// and the formal parameter count + the receiver.
- const bool should_pad_args = ShouldPadArguments(
+ const int padding = ArgumentPaddingSlots(
std::max(argument_count_without_receiver, formal_parameter_count) + 1);
const int output_frame_size =
- std::max(0, extra_argument_count * kSystemPointerSize) +
- (should_pad_args ? kSystemPointerSize : 0);
+ (std::max(0, extra_argument_count) + padding) * kSystemPointerSize;
if (verbose_tracing_enabled()) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => variable_size=%d\n",
@@ -1322,7 +1357,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
ReadOnlyRoots roots(isolate());
- if (should_pad_args) {
+ for (int i = 0; i < padding; ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
@@ -1384,7 +1419,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
output_frame->SetTop(top_address);
ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(parameters_count)) {
+ for (int i = 0; i < ArgumentPaddingSlots(parameters_count); ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
@@ -1450,7 +1485,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushTranslatedValue(receiver_iterator, debug_hint);
if (is_topmost) {
- if (kPadArguments) {
+ for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// Ensure the result is restored back when we return to the stub.
@@ -1557,10 +1592,11 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
UNREACHABLE();
}
-TranslatedValue Deoptimizer::TranslatedValueForWasmReturnType(
- base::Optional<wasm::ValueKind> wasm_call_return_type) {
- if (wasm_call_return_type) {
- switch (wasm_call_return_type.value()) {
+#if V8_ENABLE_WEBASSEMBLY
+TranslatedValue Deoptimizer::TranslatedValueForWasmReturnKind(
+ base::Optional<wasm::ValueKind> wasm_call_return_kind) {
+ if (wasm_call_return_kind) {
+ switch (wasm_call_return_kind.value()) {
case wasm::kI32:
return TranslatedValue::NewInt32(
&translated_state_,
@@ -1586,6 +1622,7 @@ TranslatedValue Deoptimizer::TranslatedValueForWasmReturnType(
return TranslatedValue::NewTagged(&translated_state_,
ReadOnlyRoots(isolate()).undefined_value());
}
+#endif // V8_ENABLE_WEBASSEMBLY
// BuiltinContinuationFrames capture the machine state that is expected as input
// to a builtin, including both input register values and stack parameters. When
@@ -1650,7 +1687,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
BuiltinContinuationMode mode) {
TranslatedFrame::iterator result_iterator = translated_frame->end();
- bool is_js_to_wasm_builtin_continuation =
+ bool is_js_to_wasm_builtin_continuation = false;
+#if V8_ENABLE_WEBASSEMBLY
+ is_js_to_wasm_builtin_continuation =
translated_frame->kind() == TranslatedFrame::kJSToWasmBuiltinContinuation;
if (is_js_to_wasm_builtin_continuation) {
// For JSToWasmBuiltinContinuations, add a TranslatedValue with the result
@@ -1658,10 +1697,11 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// This TranslatedValue will be written in the output frame in place of the
// hole and we'll use ContinueToCodeStubBuiltin in place of
// ContinueToCodeStubBuiltinWithResult.
- TranslatedValue result = TranslatedValueForWasmReturnType(
- translated_frame->wasm_call_return_type());
+ TranslatedValue result = TranslatedValueForWasmReturnKind(
+ translated_frame->wasm_call_return_kind());
translated_frame->Add(result);
}
+#endif // V8_ENABLE_WEBASSEMBLY
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1734,7 +1774,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
++value_iterator;
ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(frame_info.stack_parameter_count())) {
+ const int padding = ArgumentPaddingSlots(frame_info.stack_parameter_count());
+ for (int i = 0; i < padding; ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
@@ -1889,7 +1930,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
if (is_topmost) {
- if (kPadArguments) {
+ for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index ced3eeab44a..61a40d3556a 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -15,6 +15,10 @@
#include "src/diagnostics/code-tracer.h"
#include "src/objects/js-function.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -164,8 +168,10 @@ class Deoptimizer : public Malloced {
static Builtins::Name TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result);
- TranslatedValue TranslatedValueForWasmReturnType(
- base::Optional<wasm::ValueKind> wasm_call_return_type);
+#if V8_ENABLE_WEBASSEMBLY
+ TranslatedValue TranslatedValueForWasmReturnKind(
+ base::Optional<wasm::ValueKind> wasm_call_return_kind);
+#endif // V8_ENABLE_WEBASSEMBLY
void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
int frame_index,
diff --git a/deps/v8/src/deoptimizer/frame-description.h b/deps/v8/src/deoptimizer/frame-description.h
index bc5c6219ef3..f7e79aec6c7 100644
--- a/deps/v8/src/deoptimizer/frame-description.h
+++ b/deps/v8/src/deoptimizer/frame-description.h
@@ -112,7 +112,9 @@ class FrameDescription {
unsigned GetLastArgumentSlotOffset(bool pad_arguments = true) {
int parameter_slots = parameter_count();
- if (pad_arguments && ShouldPadArguments(parameter_slots)) parameter_slots++;
+ if (pad_arguments) {
+ parameter_slots = AddArgumentPaddingSlots(parameter_slots);
+ }
return GetFrameSize() - parameter_slots * kSystemPointerSize;
}
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index cb423d09e8a..02c473d22b1 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -98,6 +98,7 @@ void TranslationArrayPrintSingleFrame(std::ostream& os,
break;
}
+#if V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
int bailout_id = iterator.Next();
@@ -111,6 +112,7 @@ void TranslationArrayPrintSingleFrame(std::ostream& os,
<< "}";
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::ARGUMENTS_ADAPTOR_FRAME: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
@@ -265,19 +267,6 @@ void TranslationArrayPrintSingleFrame(std::ostream& os,
}
}
-namespace {
-
-// Decodes the return type of a Wasm function as the integer value of
-// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
-base::Optional<wasm::ValueKind> DecodeWasmReturnType(int code) {
- if (code != kNoWasmReturnType) {
- return {static_cast<wasm::ValueKind>(code)};
- }
- return {};
-}
-
-} // namespace
-
// static
TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
int length,
@@ -656,14 +645,16 @@ TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
return frame;
}
+#if V8_ENABLE_WEBASSEMBLY
TranslatedFrame TranslatedFrame::JSToWasmBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info, int height,
- base::Optional<wasm::ValueKind> return_type) {
+ base::Optional<wasm::ValueKind> return_kind) {
TranslatedFrame frame(kJSToWasmBuiltinContinuation, shared_info, height);
frame.bytecode_offset_ = bytecode_offset;
- frame.return_type_ = return_type;
+ frame.return_kind_ = return_kind;
return frame;
}
+#endif // V8_ENABLE_WEBASSEMBLY
TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info,
@@ -711,7 +702,9 @@ int TranslatedFrame::GetValueCount() {
case kConstructStub:
case kBuiltinContinuation:
+#if V8_ENABLE_WEBASSEMBLY
case kJSToWasmBuiltinContinuation:
+#endif // V8_ENABLE_WEBASSEMBLY
case kJavaScriptBuiltinContinuation:
case kJavaScriptBuiltinContinuationWithCatch: {
static constexpr int kTheContext = 1;
@@ -805,13 +798,17 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
shared_info, height);
}
+#if V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
BytecodeOffset bailout_id = BytecodeOffset(iterator->Next());
SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
- base::Optional<wasm::ValueKind> return_type =
- DecodeWasmReturnType(iterator->Next());
+ int return_kind_code = iterator->Next();
+ base::Optional<wasm::ValueKind> return_kind;
+ if (return_kind_code != kNoWasmReturnKind) {
+ return_kind = static_cast<wasm::ValueKind>(return_kind_code);
+ }
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading JS to Wasm builtin continuation frame %s",
@@ -819,11 +816,12 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
PrintF(trace_file,
" => bailout_id=%d, height=%d return_type=%d; inputs:\n",
bailout_id.ToInt(), height,
- return_type.has_value() ? return_type.value() : -1);
+ return_kind.has_value() ? return_kind.value() : -1);
}
return TranslatedFrame::JSToWasmBuiltinContinuationFrame(
- bailout_id, shared_info, height, return_type);
+ bailout_id, shared_info, height, return_kind);
}
+#endif // V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
@@ -980,7 +978,9 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
case TranslationOpcode::BUILTIN_CONTINUATION_FRAME:
+#if V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME:
+#endif // V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::UPDATE_FEEDBACK:
// Peeled off before getting here.
break;
@@ -1275,8 +1275,7 @@ int TranslatedState::CreateNextTranslatedValue(
Address TranslatedState::DecompressIfNeeded(intptr_t value) {
if (COMPRESS_POINTERS_BOOL) {
- return DecompressTaggedAny(isolate()->isolate_root(),
- static_cast<uint32_t>(value));
+ return DecompressTaggedAny(isolate(), static_cast<uint32_t>(value));
} else {
return value;
}
@@ -1765,7 +1764,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->set_storage(object_storage);
// Set markers for out-of-object properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
@@ -1799,7 +1798,7 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
isolate());
// Set markers for in-object properties.
diff --git a/deps/v8/src/deoptimizer/translated-state.h b/deps/v8/src/deoptimizer/translated-state.h
index a818979ad71..799cb5b18c2 100644
--- a/deps/v8/src/deoptimizer/translated-state.h
+++ b/deps/v8/src/deoptimizer/translated-state.h
@@ -13,7 +13,10 @@
#include "src/objects/heap-object.h"
#include "src/objects/shared-function-info.h"
#include "src/utils/boxed-float.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -177,7 +180,9 @@ class TranslatedFrame {
kArgumentsAdaptor,
kConstructStub,
kBuiltinContinuation,
+#if V8_ENABLE_WEBASSEMBLY
kJSToWasmBuiltinContinuation,
+#endif // V8_ENABLE_WEBASSEMBLY
kJavaScriptBuiltinContinuation,
kJavaScriptBuiltinContinuationWithCatch,
kInvalid
@@ -252,11 +257,13 @@ class TranslatedFrame {
reference front() { return values_.front(); }
const_reference front() const { return values_.front(); }
+#if V8_ENABLE_WEBASSEMBLY
// Only for Kind == kJSToWasmBuiltinContinuation
- base::Optional<wasm::ValueKind> wasm_call_return_type() const {
+ base::Optional<wasm::ValueKind> wasm_call_return_kind() const {
DCHECK_EQ(kind(), kJSToWasmBuiltinContinuation);
- return return_type_;
+ return return_kind_;
}
+#endif // V8_ENABLE_WEBASSEMBLY
private:
friend class TranslatedState;
@@ -276,9 +283,11 @@ class TranslatedFrame {
int height);
static TranslatedFrame BuiltinContinuationFrame(
BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height);
+#if V8_ENABLE_WEBASSEMBLY
static TranslatedFrame JSToWasmBuiltinContinuationFrame(
BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height,
base::Optional<wasm::ValueKind> return_type);
+#endif // V8_ENABLE_WEBASSEMBLY
static TranslatedFrame JavaScriptBuiltinContinuationFrame(
BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height);
static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame(
@@ -316,8 +325,10 @@ class TranslatedFrame {
ValuesContainer values_;
+#if V8_ENABLE_WEBASSEMBLY
// Only for Kind == kJSToWasmBuiltinContinuation
- base::Optional<wasm::ValueKind> return_type_;
+ base::Optional<wasm::ValueKind> return_kind_;
+#endif // V8_ENABLE_WEBASSEMBLY
};
// Auxiliary class for translating deoptimization values.
@@ -459,8 +470,8 @@ class TranslatedState {
FeedbackSlot feedback_slot_;
};
-// Return type encoding for a Wasm function returning void.
-const int kNoWasmReturnType = -1;
+// Return kind encoding for a Wasm function returning void.
+const int kNoWasmReturnKind = -1;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/translation-array.cc b/deps/v8/src/deoptimizer/translation-array.cc
index 0e1ee34b400..e3d5244c4f0 100644
--- a/deps/v8/src/deoptimizer/translation-array.cc
+++ b/deps/v8/src/deoptimizer/translation-array.cc
@@ -4,6 +4,7 @@
#include "src/deoptimizer/translation-array.h"
+#include "src/base/vlq.h"
#include "src/deoptimizer/translated-state.h"
#include "src/objects/fixed-array-inl.h"
#include "third_party/zlib/google/compression_utils_portable.h"
@@ -21,13 +22,6 @@ constexpr int kCompressedDataOffset =
kUncompressedSizeOffset + kUncompressedSizeSize;
constexpr int kTranslationArrayElementSize = kInt32Size;
-// Encodes the return type of a Wasm function as the integer value of
-// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
-int EncodeWasmReturnType(base::Optional<wasm::ValueKind> return_type) {
- return return_type ? static_cast<int>(return_type.value())
- : kNoWasmReturnType;
-}
-
} // namespace
TranslationArrayIterator::TranslationArrayIterator(TranslationArray buffer,
@@ -56,19 +50,9 @@ int32_t TranslationArrayIterator::Next() {
if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
return uncompressed_contents_[index_++];
} else {
- // Run through the bytes until we reach one with a least significant
- // bit of zero (marks the end).
- uint32_t bits = 0;
- for (int i = 0; true; i += 7) {
- DCHECK(HasNext());
- uint8_t next = buffer_.get(index_++);
- bits |= (next >> 1) << i;
- if ((next & 1) == 0) break;
- }
- // The bits encode the sign in the least significant bit.
- bool is_negative = (bits & 1) == 1;
- int32_t result = bits >> 1;
- return is_negative ? -result : result;
+ int32_t value = base::VLQDecode(buffer_.GetDataStartAddress(), &index_);
+ DCHECK_LE(index_, buffer_.length());
+ return value;
}
}
@@ -84,19 +68,7 @@ void TranslationArrayBuilder::Add(int32_t value) {
if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
contents_for_compression_.push_back(value);
} else {
- // This wouldn't handle kMinInt correctly if it ever encountered it.
- DCHECK_NE(value, kMinInt);
- // Encode the sign bit in the least significant bit.
- bool is_negative = (value < 0);
- uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) |
- static_cast<uint32_t>(is_negative);
- // Encode the individual bytes using the least significant bit of
- // each byte to indicate whether or not more bytes follow.
- do {
- uint32_t next = bits >> 7;
- contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
- bits = next;
- } while (bits != 0);
+ base::VLQEncode(&contents_, value);
}
}
@@ -144,17 +116,19 @@ void TranslationArrayBuilder::BeginBuiltinContinuationFrame(
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
}
+#if V8_ENABLE_WEBASSEMBLY
void TranslationArrayBuilder::BeginJSToWasmBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height,
- base::Optional<wasm::ValueKind> return_type) {
+ base::Optional<wasm::ValueKind> return_kind) {
auto opcode = TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME;
Add(opcode);
Add(bytecode_offset.ToInt());
Add(literal_id);
Add(height);
- Add(EncodeWasmReturnType(return_type));
+ Add(return_kind ? static_cast<int>(return_kind.value()) : kNoWasmReturnKind);
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void TranslationArrayBuilder::BeginJavaScriptBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
diff --git a/deps/v8/src/deoptimizer/translation-array.h b/deps/v8/src/deoptimizer/translation-array.h
index db6be0f87b5..c8b2e485cb9 100644
--- a/deps/v8/src/deoptimizer/translation-array.h
+++ b/deps/v8/src/deoptimizer/translation-array.h
@@ -8,9 +8,12 @@
#include "src/codegen/register-arch.h"
#include "src/deoptimizer/translation-opcode.h"
#include "src/objects/fixed-array.h"
-#include "src/wasm/value-type.h"
#include "src/zone/zone-containers.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/value-type.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -68,9 +71,11 @@ class TranslationArrayBuilder {
unsigned height);
void BeginBuiltinContinuationFrame(BytecodeOffset bailout_id, int literal_id,
unsigned height);
+#if V8_ENABLE_WEBASSEMBLY
void BeginJSToWasmBuiltinContinuationFrame(
BytecodeOffset bailout_id, int literal_id, unsigned height,
- base::Optional<wasm::ValueKind> return_type);
+ base::Optional<wasm::ValueKind> return_kind);
+#endif // V8_ENABLE_WEBASSEMBLY
void BeginJavaScriptBuiltinContinuationFrame(BytecodeOffset bailout_id,
int literal_id, unsigned height);
void BeginJavaScriptBuiltinContinuationWithCatchFrame(
diff --git a/deps/v8/src/deoptimizer/translation-opcode.h b/deps/v8/src/deoptimizer/translation-opcode.h
index a91a948d8eb..d3032bc726b 100644
--- a/deps/v8/src/deoptimizer/translation-opcode.h
+++ b/deps/v8/src/deoptimizer/translation-opcode.h
@@ -5,6 +5,8 @@
#ifndef V8_DEOPTIMIZER_TRANSLATION_OPCODE_H_
#define V8_DEOPTIMIZER_TRANSLATION_OPCODE_H_
+#include "src/base/macros.h"
+
namespace v8 {
namespace internal {
@@ -31,7 +33,7 @@ namespace internal {
V(INTERPRETED_FRAME, 5) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME, 3) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME, 3) \
- V(JS_TO_WASM_BUILTIN_CONTINUATION_FRAME, 4) \
+ IF_WASM(V, JS_TO_WASM_BUILTIN_CONTINUATION_FRAME, 4) \
V(LITERAL, 1) \
V(REGISTER, 1) \
V(STACK_SLOT, 1) \
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index 456f0b5f662..2b8b84e25f5 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -4268,19 +4268,12 @@ int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
USE(format);
int prefetch_mode = instr->PrefetchMode();
- const std::array<std::string, 3> hints = {"ld", "li", "st"};
- unsigned hint = instr->PrefetchHint();
- unsigned target = instr->PrefetchTarget() + 1;
- if (hint >= hints.size() || target > 3) {
- std::bitset<5> prefetch_mode(instr->ImmPrefetchOperation());
- AppendToOutput("#0b%s", prefetch_mode.to_string().c_str());
- } else {
- const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
-
- AppendToOutput("p%sl%d%s", hints[hint].c_str(), target, ks);
- }
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+ AppendToOutput("p%sl%d%s", ls, level, ks);
return 6;
}
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index 70b958871dc..6e6ecb3f034 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -23,8 +23,11 @@
#include "src/snapshot/embedded/embedded-data.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -94,6 +97,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
return v8_buffer_.begin();
}
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope wasm_code_ref_scope;
wasm::WasmCode* wasm_code =
isolate_ ? isolate_->wasm_engine()->code_manager()->LookupCode(
@@ -104,6 +108,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
wasm::GetWasmCodeKindAsString(wasm_code->kind()));
return v8_buffer_.begin();
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
return disasm::NameConverter::NameOfAddress(pc);
@@ -247,12 +252,14 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
} else {
out->AddFormatted(" %s", CodeKindToString(kind));
}
+#if V8_ENABLE_WEBASSEMBLY
} else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) {
// Host is isolate-independent, try wasm native module instead.
const char* runtime_stub_name = GetRuntimeStubName(
host.as_wasm_code()->native_module()->GetRuntimeStubId(
relocinfo->wasm_stub_call_address()));
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
+#endif // V8_ENABLE_WEBASSEMBLY
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
Address addr = relocinfo->target_address();
@@ -294,7 +301,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
- if (constants > 0) {
+ bool decoding_constant_pool = constants > 0;
+ if (decoding_constant_pool) {
SNPrintF(
decode_buffer, "%08x constant",
base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(pc)));
@@ -384,7 +392,11 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// If this is a constant pool load and we haven't found any RelocInfo
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
- if (pcs.empty() && !code.is_null()) {
+ // Make sure we're also not currently in the middle of decoding a constant
+ // pool itself, rather than a contant pool load. Since it can store any
+ // bytes, a constant could accidentally match with the bit-pattern checked
+ // by IsInConstantPool() below.
+ if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
0, Code());
if (dummy_rinfo.IsInConstantPool()) {
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index bf841db010a..b48df9385ae 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -5,7 +5,6 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/date/date.h"
-#include "src/debug/debug-wasm-objects-inl.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/combined-heap.h"
@@ -30,6 +29,7 @@
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -75,9 +75,13 @@
#include "src/objects/transitions-inl.h"
#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
-#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects-inl.h"
+#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -226,12 +230,14 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
JSObject::cast(*this).JSObjectVerify(isolate);
break;
+#if V8_ENABLE_WEBASSEMBLY
case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectVerify(isolate);
break;
case WASM_VALUE_OBJECT_TYPE:
WasmValueObject::cast(*this).WasmValueObjectVerify(isolate);
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
JSSetIterator::cast(*this).JSSetIteratorVerify(isolate);
@@ -319,11 +325,11 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(JSReceiver)
-bool JSObject::ElementsAreSafeToExamine(IsolateRoot isolate) const {
+bool JSObject::ElementsAreSafeToExamine(PtrComprCageBase cage_base) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- return elements(isolate) !=
- GetReadOnlyRoots(isolate).one_pointer_filler_map();
+ return elements(cage_base) !=
+ GetReadOnlyRoots(cage_base).one_pointer_filler_map();
}
namespace {
@@ -387,7 +393,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
int delta = actual_unused_property_fields - map().UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
- DescriptorArray descriptors = map().instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map().instance_descriptors(isolate);
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map().elements_kind());
@@ -457,13 +463,13 @@ void Map::MapVerify(Isolate* isolate) {
// Root maps must not have descriptors in the descriptor array that do not
// belong to the map.
CHECK_EQ(NumberOfOwnDescriptors(),
- instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ instance_descriptors(isolate).number_of_descriptors());
} else {
// If there is a parent map it must be non-stable.
Map parent = Map::cast(GetBackPointer());
CHECK(!parent.is_stable());
- DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
- if (descriptors == parent.instance_descriptors(kRelaxedLoad)) {
+ DescriptorArray descriptors = instance_descriptors(isolate);
+ if (descriptors == parent.instance_descriptors(isolate)) {
if (NumberOfOwnDescriptors() == parent.NumberOfOwnDescriptors() + 1) {
// Descriptors sharing through property transitions takes over
// ownership from the parent map.
@@ -481,7 +487,7 @@ void Map::MapVerify(Isolate* isolate) {
}
}
}
- SLOW_DCHECK(instance_descriptors(kRelaxedLoad).IsSortedNoDuplicates());
+ SLOW_DCHECK(instance_descriptors(isolate).IsSortedNoDuplicates());
DisallowGarbageCollection no_gc;
SLOW_DCHECK(
TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
@@ -495,7 +501,7 @@ void Map::MapVerify(Isolate* isolate) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
- DescriptorArray const descriptors = instance_descriptors(kRelaxedLoad);
+ DescriptorArray const descriptors = instance_descriptors(isolate);
for (InternalIndex i : IterateOwnDescriptors()) {
CHECK(!descriptors.GetKey(i).IsInterestingSymbol());
}
@@ -519,7 +525,7 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
CHECK(is_dictionary_map());
CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(),
- instance_descriptors(kRelaxedLoad));
+ instance_descriptors(isolate));
CHECK_EQ(0, UnusedPropertyFields());
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
@@ -578,20 +584,6 @@ void Context::ContextVerify(Isolate* isolate) {
}
}
-void ScopeInfo::ScopeInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::ScopeInfoVerify(*this, isolate);
-
- // Make sure that the FixedArray-style length matches the length that we would
- // compute based on the Torque indexed fields.
- CHECK_EQ(FixedArray::SizeFor(length()), AllocatedSize());
-
- // Code that treats ScopeInfo like a FixedArray expects all values to be
- // tagged.
- for (int i = 0; i < length(); ++i) {
- Object::VerifyPointer(isolate, get(isolate, i));
- }
-}
-
void NativeContext::NativeContextVerify(Isolate* isolate) {
ContextVerify(isolate);
CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
@@ -851,11 +843,15 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
CHECK_NE(value, roots.empty_scope_info());
}
- CHECK(HasWasmExportedFunctionData() || IsApiFunction() ||
- HasBytecodeArray() || HasAsmWasmData() || HasBuiltinId() ||
+#if V8_ENABLE_WEBASSEMBLY
+ bool is_wasm = HasWasmExportedFunctionData() || HasAsmWasmData() ||
+ HasWasmJSFunctionData() || HasWasmCapiFunctionData();
+#else
+ bool is_wasm = false;
+#endif // V8_ENABLE_WEBASSEMBLY
+ CHECK(is_wasm || IsApiFunction() || HasBytecodeArray() || HasBuiltinId() ||
HasUncompiledDataWithPreparseData() ||
- HasUncompiledDataWithoutPreparseData() || HasWasmJSFunctionData() ||
- HasWasmCapiFunctionData());
+ HasUncompiledDataWithoutPreparseData());
{
auto script = script_or_debug_info(kAcquireLoad);
@@ -1599,6 +1595,7 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
}
}
+#if V8_ENABLE_WEBASSEMBLY
USE_TORQUE_VERIFIER(AsmWasmData)
void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
@@ -1638,6 +1635,11 @@ USE_TORQUE_VERIFIER(WasmGlobalObject)
USE_TORQUE_VERIFIER(WasmExceptionObject)
+USE_TORQUE_VERIFIER(WasmJSFunctionData)
+
+USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
+#endif // V8_ENABLE_WEBASSEMBLY
+
void DataHandler::DataHandlerVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
CHECK_IMPLIES(!smi_handler().IsSmi(),
@@ -1673,10 +1675,6 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
.next_call_side_effect_free_call_handler_info_map());
}
-USE_TORQUE_VERIFIER(WasmJSFunctionData)
-
-USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
-
void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(IsAllocationSite());
CHECK(dependent_code().IsDependentCode());
@@ -1728,12 +1726,14 @@ USE_TORQUE_VERIFIER(InterpreterData)
void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StackFrameInfoVerify(*this, isolate);
+#if V8_ENABLE_WEBASSEMBLY
CHECK_IMPLIES(IsAsmJsWasm(), IsWasm());
CHECK_IMPLIES(IsWasm(), receiver_or_instance().IsWasmInstanceObject());
CHECK_IMPLIES(IsWasm(), function().IsSmi());
CHECK_IMPLIES(!IsWasm(), function().IsJSFunction());
CHECK_IMPLIES(IsAsync(), !IsWasm());
CHECK_IMPLIES(IsConstructor(), !IsWasm());
+#endif // V8_ENABLE_WEBASSEMBLY
}
#endif // VERIFY_HEAP
@@ -1754,8 +1754,8 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
info->number_of_slow_used_properties_ += dict.NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict.Capacity() - dict.NumberOfElements();
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- OrderedNameDictionary dict = property_dictionary_ordered();
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = property_dictionary_swiss();
info->number_of_slow_used_properties_ += dict.NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict.Capacity() - dict.NumberOfElements();
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index df848422c76..aa1713e3b37 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -6,9 +6,9 @@
#include <memory>
#include "src/common/globals.h"
-#include "src/debug/debug-wasm-objects-inl.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
+#include "src/execution/isolate-utils-inl.h"
#include "src/heap/heap-inl.h" // For InOldSpace.
#include "src/heap/heap-write-barrier-inl.h" // For GetIsolateFromWritableObj.
#include "src/init/bootstrapper.h"
@@ -18,9 +18,13 @@
#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects-inl.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -70,6 +74,13 @@ void PrintDictionaryContents(std::ostream& os, T dict) {
DisallowGarbageCollection no_gc;
ReadOnlyRoots roots = dict.GetReadOnlyRoots();
+ if (dict.Capacity() == 0) {
+ return;
+ }
+
+ Isolate* isolate = GetIsolateFromWritableObject(dict);
+ // IterateEntries for SwissNameDictionary needs to create a handle.
+ HandleScope scope(isolate);
for (InternalIndex i : dict.IterateEntries()) {
Object k;
if (!dict.ToKey(roots, i, &k)) continue;
@@ -172,12 +183,14 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
JSObject::cast(*this).JSObjectPrint(os);
break;
+#if V8_ENABLE_WEBASSEMBLY
case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
case WASM_VALUE_OBJECT_TYPE:
WasmValueObject::cast(*this).WasmValueObjectPrint(os);
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case CODE_TYPE:
Code::cast(*this).CodePrint(os);
break;
@@ -267,7 +280,7 @@ void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
+ DescriptorArray descs = map().instance_descriptors(GetIsolate());
int nof_inobject_properties = map().GetInObjectProperties();
for (InternalIndex i : map().IterateOwnDescriptors()) {
os << "\n ";
@@ -302,8 +315,8 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
} else if (IsJSGlobalObject()) {
PrintDictionaryContents(
os, JSGlobalObject::cast(*this).global_dictionary(kAcquireLoad));
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- PrintDictionaryContents(os, property_dictionary_ordered());
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ PrintDictionaryContents(os, property_dictionary_swiss());
} else {
PrintDictionaryContents(os, property_dictionary());
}
@@ -455,13 +468,13 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
-void PrintEmbedderData(IsolateRoot isolate, std::ostream& os,
+void PrintEmbedderData(PtrComprCageBase cage_base, std::ostream& os,
EmbedderDataSlot slot) {
DisallowGarbageCollection no_gc;
Object value = slot.load_tagged();
os << Brief(value);
void* raw_pointer;
- if (slot.ToAlignedPointer(isolate, &raw_pointer)) {
+ if (slot.ToAlignedPointer(cage_base, &raw_pointer)) {
os << ", aligned pointer: " << raw_pointer;
}
}
@@ -566,11 +579,11 @@ static void JSObjectPrintBody(std::ostream& os,
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
- IsolateRoot isolate = GetIsolateForPtrCompr(obj);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
- PrintEmbedderData(isolate, os, EmbedderDataSlot(obj, i));
+ PrintEmbedderData(cage_base, os, EmbedderDataSlot(obj, i));
}
os << "\n }\n";
}
@@ -749,14 +762,14 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionPrint(
}
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
EmbedderDataSlot end(*this, length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
os << "\n ";
- PrintEmbedderData(isolate, os, slot);
+ PrintEmbedderData(cage_base, os, slot);
}
os << "\n";
}
@@ -1474,6 +1487,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - bytecode: " << shared().GetBytecodeArray(isolate);
}
}
+#if V8_ENABLE_WEBASSEMBLY
if (WasmExportedFunction::IsWasmExportedFunction(*this)) {
WasmExportedFunction function = WasmExportedFunction::cast(*this);
os << "\n - Wasm instance: " << Brief(function.instance());
@@ -1483,6 +1497,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
WasmJSFunction function = WasmJSFunction::cast(*this);
os << "\n - Wasm wrapper around: " << Brief(function.GetCallable());
}
+#endif // V8_ENABLE_WEBASSEMBLY
shared().PrintSourceCode(os);
JSObjectPrintBody(os, *this);
os << " - feedback vector: ";
@@ -1783,6 +1798,7 @@ void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
os << "\n";
}
+#if V8_ENABLE_WEBASSEMBLY
void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AsmWasmData");
os << "\n - native module: " << Brief(managed_native_module());
@@ -1827,7 +1843,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
case wasm::kRtt:
case wasm::kRttWithDepth:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
os << "UNIMPLEMENTED"; // TODO(7748): Implement.
break;
}
@@ -1867,7 +1883,7 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
case wasm::kRtt:
case wasm::kRttWithDepth:
case wasm::kBottom:
- case wasm::kStmt:
+ case wasm::kVoid:
os << "\n Printing elements of this type is unimplemented, sorry";
// TODO(7748): Implement.
break;
@@ -1963,15 +1979,9 @@ void WasmTableObject::WasmTableObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmValueObject::WasmValueObjectPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "WasmValueObject");
- os << "\n - value: " << Brief(value());
- os << "\n";
-}
-
void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmGlobalObject");
- if (type().is_reference_type()) {
+ if (type().is_reference()) {
os << "\n - tagged_buffer: " << Brief(tagged_buffer());
} else {
os << "\n - untagged_buffer: " << Brief(untagged_buffer());
@@ -1979,7 +1989,7 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
os << "\n - offset: " << offset();
os << "\n - raw_type: " << raw_type();
os << "\n - is_mutable: " << is_mutable();
- os << "\n - type: " << type().kind();
+ os << "\n - type: " << type();
os << "\n - is_mutable: " << is_mutable();
os << "\n";
}
@@ -2000,6 +2010,27 @@ void WasmExceptionObject::WasmExceptionObjectPrint(
os << "\n";
}
+void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
+ std::ostream& os) {
+ PrintHeader(os, "WasmIndirectFunctionTable");
+ os << "\n - size: " << size();
+ os << "\n - sig_ids: " << static_cast<void*>(sig_ids());
+ os << "\n - targets: " << static_cast<void*>(targets());
+ if (has_managed_native_allocations()) {
+ os << "\n - managed_native_allocations: "
+ << Brief(managed_native_allocations());
+ }
+ os << "\n - refs: " << Brief(refs());
+ os << "\n";
+}
+
+void WasmValueObject::WasmValueObjectPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmValueObject");
+ os << "\n - value: " << Brief(value());
+ os << "\n";
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "LoadHandler");
// TODO(ishell): implement printing based on handler kind
@@ -2067,21 +2098,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - undetectable: " << (undetectable() ? "true" : "false");
os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
os << "\n - instantiated: " << (instantiated() ? "true" : "false");
- os << "\n - rare_data: " << Brief(rare_data());
- os << "\n";
-}
-
-void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
- std::ostream& os) {
- PrintHeader(os, "WasmIndirectFunctionTable");
- os << "\n - size: " << size();
- os << "\n - sig_ids: " << static_cast<void*>(sig_ids());
- os << "\n - targets: " << static_cast<void*>(targets());
- if (has_managed_native_allocations()) {
- os << "\n - managed_native_allocations: "
- << Brief(managed_native_allocations());
- }
- os << "\n - refs: " << Brief(refs());
+ os << "\n - rare_data: " << Brief(rare_data(kAcquireLoad));
os << "\n";
}
@@ -2142,11 +2159,15 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - context data: " << Brief(context_data());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
- if (type() == TYPE_WASM) {
+ bool is_wasm = false;
+#if V8_ENABLE_WEBASSEMBLY
+ if ((is_wasm = (type() == TYPE_WASM))) {
if (has_wasm_breakpoint_infos()) {
os << "\n - wasm_breakpoint_infos: " << Brief(wasm_breakpoint_infos());
}
- } else {
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (!is_wasm) {
if (has_eval_from_shared()) {
os << "\n - eval from shared: " << Brief(eval_from_shared());
}
@@ -2267,18 +2288,13 @@ void JSSegments::JSSegmentsPrint(std::ostream& os) { // NOLINT
namespace {
void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
- const char* list_name, int nof_internal_slots,
- int start, int length) {
+ const char* list_name, int length) {
if (length <= 0) return;
- int end = start + length;
os << "\n - " << list_name;
- if (nof_internal_slots > 0) {
- os << " " << start << "-" << end << " [internal slots]";
- }
os << " {\n";
- for (int i = nof_internal_slots; start < end; ++i, ++start) {
+ for (int i = 0; i < length; ++i) {
os << " - " << i << ": ";
- String::cast(scope_info.get(start)).ShortPrint(os);
+ scope_info.context_local_names(i).ShortPrint(os);
os << "\n";
}
os << " }";
@@ -2287,8 +2303,8 @@ void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "ScopeInfo");
- if (length() == 0) {
- os << "\n - length = 0\n";
+ if (IsEmpty()) {
+ os << "\n - empty\n";
return;
}
int flags = Flags();
@@ -2335,8 +2351,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - length: " << length();
if (length() > 0) {
- PrintScopeInfoList(*this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
- ContextLocalNamesIndex(), ContextLocalCount());
+ PrintScopeInfoList(*this, os, "context slots", ContextLocalCount());
// TODO(neis): Print module stuff if present.
}
os << "\n";
@@ -2462,7 +2477,7 @@ int Name::NameShortPrint(Vector<char> str) {
void Map::PrintMapDetails(std::ostream& os) {
DisallowGarbageCollection no_gc;
this->MapPrint(os);
- instance_descriptors(kRelaxedLoad).PrintDescriptors(os);
+ instance_descriptors().PrintDescriptors(os);
}
void Map::MapPrint(std::ostream& os) { // NOLINT
@@ -2516,7 +2531,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
- << Brief(instance_descriptors(kRelaxedLoad));
+ << Brief(instance_descriptors());
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
@@ -2628,7 +2643,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = target.instance_descriptors();
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
os << ")";
@@ -2706,7 +2721,7 @@ void TransitionsAccessor::PrintTransitionTree(
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = target.instance_descriptors(isolate_);
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
@@ -2732,12 +2747,11 @@ namespace {
inline i::Object GetObjectFromRaw(void* object) {
i::Address object_ptr = reinterpret_cast<i::Address>(object);
#ifdef V8_COMPRESS_POINTERS
- if (RoundDown<i::kPtrComprIsolateRootAlignment>(object_ptr) ==
- i::kNullAddress) {
+ if (RoundDown<i::kPtrComprCageBaseAlignment>(object_ptr) == i::kNullAddress) {
// Try to decompress pointer.
i::Isolate* isolate = i::Isolate::Current();
- object_ptr = i::DecompressTaggedAny(isolate->isolate_root(),
- static_cast<i::Tagged_t>(object_ptr));
+ object_ptr =
+ i::DecompressTaggedAny(isolate, static_cast<i::Tagged_t>(object_ptr));
}
#endif
return i::Object(object_ptr);
@@ -2760,6 +2774,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
+#if V8_ENABLE_WEBASSEMBLY
{
i::wasm::WasmCodeRefScope scope;
i::wasm::WasmCode* wasm_code =
@@ -2770,6 +2785,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
return;
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
!isolate->heap()->InSpaceSlow(address, i::CODE_LO_SPACE) &&
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 5a2650131fc..dea33400480 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -46,7 +46,10 @@
#include "src/objects/shared-function-info.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -247,6 +250,7 @@ void PerfJitLogger::LogRecordedBuffer(
length);
}
+#if V8_ENABLE_WEBASSEMBLY
void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
const char* name, int length) {
base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
@@ -260,6 +264,7 @@ void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
WriteJitCodeLoadEntry(code->instructions().begin(),
code->instructions().length(), name, length);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
uint32_t code_size, const char* name,
@@ -340,12 +345,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
Handle<SharedFunctionInfo> shared) {
DisallowGarbageCollection no_gc;
// TODO(v8:11429,cbruni): add proper baseline source position iterator
- bool is_baseline = code->kind() == CodeKind::BASELINE;
- ByteArray source_position_table = code->SourcePositionTable();
- if (is_baseline) {
- source_position_table =
- shared->GetBytecodeArray(shared->GetIsolate()).SourcePositionTable();
- }
+ ByteArray source_position_table = code->SourcePositionTable(*shared);
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
for (SourcePositionTableIterator iterator(source_position_table);
@@ -406,6 +406,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
LogWriteBytes(padding_bytes, padding);
}
+#if V8_ENABLE_WEBASSEMBLY
void PerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
wasm::WasmModuleSourceMap* source_map =
code->native_module()->GetWasmSourceMap();
@@ -472,6 +473,7 @@ void PerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
char padding_bytes[8] = {0};
LogWriteBytes(padding_bytes, padding);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
PerfJitCodeUnwindingInfo unwinding_info_header;
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 1c9112299ed..746f9f7c857 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -58,8 +58,10 @@ class PerfJitLogger : public CodeEventLogger {
void LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
+#endif // V8_ENABLE_WEBASSEMBLY
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -75,7 +77,9 @@ class PerfJitLogger : public CodeEventLogger {
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
void LogWriteDebugInfo(Handle<Code> code, Handle<SharedFunctionInfo> shared);
+#if V8_ENABLE_WEBASSEMBLY
void LogWriteDebugInfo(const wasm::WasmCode* code);
+#endif // V8_ENABLE_WEBASSEMBLY
void LogWriteUnwindingInfo(Code code);
static const uint32_t kElfMachIA32 = 3;
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index 37f8ddc0407..9757c8901bf 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -265,6 +265,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 6;
}
+ case 'F': { // FXM
+ uint8_t value = instr->Bits(19, 12);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 3;
+ }
case 'U': { // UIM
int32_t value = instr->Bits(20, 16);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
@@ -413,6 +418,16 @@ void Decoder::DecodeExt0(Instruction* instr) {
PPC_VA_OPCODE_A_FORM_LIST(DECODE_VA_A_FORM__INSTRUCTIONS)
#undef DECODE_VA_A_FORM__INSTRUCTIONS
}
+ switch (EXT0 | (instr->BitField(9, 0))) {
+// TODO(miladfarca): Fix RC indicator.
+#define DECODE_VC_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Vt, 'Va, 'Vb"); \
+ return; \
+ }
+ PPC_VC_OPCODE_LIST(DECODE_VC_FORM__INSTRUCTIONS)
+#undef DECODE_VC_FORM__INSTRUCTIONS
+ }
switch (EXT0 | (instr->BitField(10, 0))) {
#define DECODE_VX_A_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
@@ -790,7 +805,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
// ?? are all of these xo_form?
- switch (EXT2 | (instr->BitField(9, 1))) {
+ switch (EXT2 | (instr->BitField(10, 1))) {
case CMP: {
#if V8_TARGET_ARCH_PPC64
if (instr->Bit(21)) {
@@ -1056,6 +1071,14 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "mtvsrwz 'Xt, 'ra");
return;
}
+ case LDBRX: {
+ Format(instr, "ldbrx 'rt, 'ra, 'rb");
+ return;
+ }
+ case MTCRF: {
+ Format(instr, "mtcrf 'FXM, 'rs");
+ return;
+ }
#endif
}
@@ -1254,6 +1277,12 @@ void Decoder::DecodeExt5(Instruction* instr) {
}
void Decoder::DecodeExt6(Instruction* instr) {
+ switch (EXT6 | (instr->BitField(10, 2))) {
+ case XXBRQ: {
+ Format(instr, "xxbrq 'Xt, 'Xb");
+ return;
+ }
+ }
switch (EXT6 | (instr->BitField(10, 1))) {
case XXSPLTIB: {
Format(instr, "xxspltib 'Xt, 'IMM8");
diff --git a/deps/v8/src/diagnostics/system-jit-metadata-win.h b/deps/v8/src/diagnostics/system-jit-metadata-win.h
new file mode 100644
index 00000000000..37678c25185
--- /dev/null
+++ b/deps/v8/src/diagnostics/system-jit-metadata-win.h
@@ -0,0 +1,243 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DIAGNOSTICS_SYSTEM_JIT_METADATA_WIN_H_
+#define V8_DIAGNOSTICS_SYSTEM_JIT_METADATA_WIN_H_
+
+#include <Windows.h>
+#include <TraceLoggingProvider.h>
+#include <evntprov.h>
+#include <evntrace.h> // defines TRACE_LEVEL_* and EVENT_TRACE_TYPE_*
+
+#include <cstdint>
+#include <string>
+#include <unordered_set>
+#include <utility>
+
+namespace v8 {
+namespace internal {
+namespace ETWJITInterface {
+
+/*******************************************************************************
+Helper templates to create tightly packed metadata of the format expected by the
+ETW data structures.
+*******************************************************************************/
+
+// All "manifest-free" events should go to channel 11 by default
+const uint8_t kManifestFreeChannel = 11;
+
+// Number of metadescriptors. Use this to find out the index of the field
+// descriptors in the descriptors_array
+const uint8_t kMetaDescriptorsCount = 2;
+
+// Filtering keyword to find JScript stack-walking events
+constexpr uint64_t kJScriptRuntimeKeyword = 1;
+
+constexpr uint16_t kSourceLoadEventID = 41;
+constexpr uint16_t kMethodLoadEventID = 9;
+
+// Structure to treat a string literal, or char[], as a constexpr byte sequence
+template <size_t count>
+struct str_bytes {
+ template <std::size_t... idx>
+ constexpr str_bytes(char const (&str)[count], std::index_sequence<idx...>)
+ : bytes{str[idx]...}, size(count) {}
+
+ // Concatenate two str_bytes
+ template <std::size_t count1, std::size_t count2, std::size_t... idx1,
+ std::size_t... idx2>
+ constexpr str_bytes(const str_bytes<count1>& s1, std::index_sequence<idx1...>,
+ const str_bytes<count2>& s2, std::index_sequence<idx2...>)
+ : bytes{s1.bytes[idx1]..., s2.bytes[idx2]...}, size(count) {}
+
+ char bytes[count]; // NOLINT
+ size_t size;
+};
+
+// Specialization for 0 (base case when joining fields)
+template <>
+struct str_bytes<0> {
+ constexpr str_bytes() : bytes{}, size(0) {}
+ char bytes[1]; // MSVC doesn't like an array of 0 bytes
+ size_t size;
+};
+
+// Factory function to simplify creating a str_bytes from a string literal
+template <size_t count, typename idx = std::make_index_sequence<count>>
+constexpr auto MakeStrBytes(char const (&s)[count]) {
+ return str_bytes<count>{s, idx{}};
+}
+
+// Concatenates two str_bytes into one
+template <std::size_t size1, std::size_t size2>
+constexpr auto JoinBytes(const str_bytes<size1>& str1,
+ const str_bytes<size2>& str2) {
+ auto idx1 = std::make_index_sequence<size1>();
+ auto idx2 = std::make_index_sequence<size2>();
+ return str_bytes<size1 + size2>{str1, idx1, str2, idx2};
+}
+
+// Creates an str_bytes which is the field name suffixed with the field type
+template <size_t count>
+constexpr auto Field(char const (&s)[count], uint8_t type) {
+ auto field_name = MakeStrBytes(s);
+ const char type_arr[1] = {static_cast<char>(type)};
+ return JoinBytes(field_name, MakeStrBytes(type_arr));
+}
+
+// Creates the ETW event metadata header, which consists of a uint16_t
+// representing the total size, and a tag byte (always 0x00 currently).
+constexpr auto Header(size_t size) {
+ size_t total_size = size + 3; // total_size includes the 2 byte size + tag
+ const char header_bytes[3] = {static_cast<char>(total_size & 0xFF),
+ static_cast<char>(total_size >> 8 & 0xFF),
+ '\0'};
+ return MakeStrBytes(header_bytes);
+}
+
+// The JoinFields implementations below are a set of overloads for constructing
+// a str_bytes representing the concatenated fields from a parameter pack.
+
+// Empty case needed for events with no fields.
+constexpr auto JoinFields() { return str_bytes<0>{}; }
+
+// Only one field, or base case when multiple fields.
+template <typename T1>
+constexpr auto JoinFields(T1 field) {
+ return field;
+}
+
+// Join two or more fields together.
+template <typename T1, typename T2, typename... Ts>
+constexpr auto JoinFields(T1 field1, T2 field2, Ts... args) {
+ auto bytes = JoinBytes(field1, field2);
+ return JoinFields(bytes, args...);
+}
+
+// Creates a constexpr char[] representing the fields for an ETW event.
+// Declare the variable as `constexpr static auto` and provide the event name,
+// followed by a series of `Field` invocations for each field.
+//
+// Example:
+// constexpr static auto event_fields = EventFields("my1stEvent",
+// Field("MyIntVal", kTypeInt32),
+// Field("MyMsg", kTypeAnsiStr),
+// Field("Address", kTypePointer));
+template <std::size_t count, typename... Ts>
+constexpr auto EventFields(char const (&name)[count], Ts... field_args) {
+ auto name_bytes = MakeStrBytes(name);
+ auto fields = JoinFields(field_args...);
+ auto data = JoinBytes(name_bytes, fields);
+
+ auto header = Header(data.size);
+ return JoinBytes(header, data);
+}
+
+constexpr auto EventMetadata(uint16_t id, uint64_t keywords) {
+ return EVENT_DESCRIPTOR{id,
+ 0, // Version
+ kManifestFreeChannel,
+ TRACE_LEVEL_INFORMATION, // Level
+ EVENT_TRACE_TYPE_START, // Opcode
+ 0, // Task
+ keywords};
+}
+
+void SetMetaDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptor,
+ UINT16 const UNALIGNED* traits, const void* metadata,
+ size_t size) {
+ // The first descriptor is the provider traits (just the name currently)
+ uint16_t traits_size = *reinterpret_cast<const uint16_t*>(traits);
+ EventDataDescCreate(data_descriptor, traits, traits_size);
+ data_descriptor->Type = EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA;
+ ++data_descriptor;
+
+ // The second descriptor contains the data to describe the field layout
+ EventDataDescCreate(data_descriptor, metadata, static_cast<ULONG>(size));
+ data_descriptor->Type = EVENT_DATA_DESCRIPTOR_TYPE_EVENT_METADATA;
+}
+
+// Base case, no fields left to set
+inline void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors) {}
+
+// Need to declare all the base overloads in advance, as ther bodies may become
+// a point of reference for any of the overloads, and only overloads that have
+// been seen will be candidates.
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const std::wstring& value, const Ts&... rest);
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const std::string& value, const Ts&... rest);
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const char* value, const Ts&... rest);
+
+// One or more fields to set
+template <typename T, typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const T& value, const Ts&... rest) {
+ EventDataDescCreate(data_descriptors, &value, sizeof(value));
+ SetFieldDescriptors(++data_descriptors, rest...);
+}
+
+// Specialize for strings
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const std::wstring& value, const Ts&... rest) {
+ EventDataDescCreate(data_descriptors, value.data(),
+ static_cast<ULONG>(value.size() * 2 + 2));
+ SetFieldDescriptors(++data_descriptors, rest...);
+}
+
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const std::string& value, const Ts&... rest) {
+ EventDataDescCreate(data_descriptors, value.data(),
+ static_cast<ULONG>(value.size() + 1));
+ SetFieldDescriptors(++data_descriptors, rest...);
+}
+
+template <typename... Ts>
+void SetFieldDescriptors(EVENT_DATA_DESCRIPTOR* data_descriptors,
+ const char* value, const Ts&... rest) {
+ ULONG size = static_cast<ULONG>(strlen(value) + 1);
+ EventDataDescCreate(data_descriptors, value, size);
+ SetFieldDescriptors(++data_descriptors, rest...);
+}
+
+// This function does the actual writing of the event via the Win32 API
+inline ULONG LogEvent(uint64_t regHandle,
+ const EVENT_DESCRIPTOR* event_descriptor,
+ EVENT_DATA_DESCRIPTOR* data_descriptor,
+ ULONG desc_count) {
+ if (regHandle == 0) return ERROR_SUCCESS;
+ return EventWriteTransfer(regHandle, event_descriptor, NULL /* ActivityId */,
+ NULL /* RelatedActivityId */, desc_count,
+ data_descriptor);
+}
+
+// This template is called by the provider implementation
+template <typename T, typename... Fs>
+void LogEventData(const TraceLoggingHProvider provider,
+ const EVENT_DESCRIPTOR* event_descriptor, T* meta,
+ const Fs&... fields) {
+ const size_t descriptor_count = sizeof...(fields) + kMetaDescriptorsCount;
+ EVENT_DATA_DESCRIPTOR descriptors[sizeof...(fields) + kMetaDescriptorsCount];
+
+ SetMetaDescriptors(descriptors, provider->ProviderMetadataPtr, meta->bytes,
+ meta->size);
+
+ EVENT_DATA_DESCRIPTOR* data_descriptors = descriptors + kMetaDescriptorsCount;
+ SetFieldDescriptors(data_descriptors, fields...);
+
+ LogEvent(provider->RegHandle, event_descriptor, descriptors,
+ descriptor_count);
+}
+
+} // namespace ETWJITInterface
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DIAGNOSTICS_SYSTEM_JIT_METADATA_WIN_H_
diff --git a/deps/v8/src/diagnostics/system-jit-win.cc b/deps/v8/src/diagnostics/system-jit-win.cc
new file mode 100644
index 00000000000..49200219c26
--- /dev/null
+++ b/deps/v8/src/diagnostics/system-jit-win.cc
@@ -0,0 +1,108 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/system-jit-win.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
+#include "src/diagnostics/system-jit-metadata-win.h"
+#include "src/libplatform/tracing/recorder.h"
+
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wc++98-compat-extra-semi"
+#endif
+
+namespace v8 {
+namespace internal {
+namespace ETWJITInterface {
+
+TRACELOGGING_DECLARE_PROVIDER(g_v8Provider);
+
+TRACELOGGING_DEFINE_PROVIDER(g_v8Provider, "V8.js", (V8_ETW_GUID));
+
+using ScriptMapType = std::unordered_set<int>;
+static base::LazyInstance<ScriptMapType>::type script_map =
+ LAZY_INSTANCE_INITIALIZER;
+
+void Register() {
+ DCHECK(!TraceLoggingProviderEnabled(g_v8Provider, 0, 0));
+ TraceLoggingRegister(g_v8Provider);
+}
+
+void Unregister() {
+ if (g_v8Provider) {
+ TraceLoggingUnregister(g_v8Provider);
+ }
+}
+
+void EventHandler(const JitCodeEvent* event) {
+ if (event->code_type != v8::JitCodeEvent::CodeType::JIT_CODE) return;
+ if (event->type != v8::JitCodeEvent::EventType::CODE_ADDED) return;
+
+ int name_len = static_cast<int>(event->name.len);
+ // Note: event->name.str is not null terminated.
+ std::wstring method_name(name_len + 1, '\0');
+ MultiByteToWideChar(
+ CP_UTF8, 0, event->name.str, name_len,
+ // Const cast needed as building with C++14 (not const in >= C++17)
+ const_cast<LPWSTR>(method_name.data()),
+ static_cast<int>(method_name.size()));
+
+ v8::Isolate* script_context = event->isolate;
+ auto script = event->script;
+ int script_id = 0;
+ if (!script.IsEmpty()) {
+ // if the first time seeing this source file, log the SourceLoad event
+ script_id = script->GetId();
+ if (script_map.Pointer()->find(script_id) == script_map.Pointer()->end()) {
+ script_map.Pointer()->insert(script_id);
+
+ auto script_name = script->GetScriptName();
+ std::wstring wstr_name(0, L'\0');
+ if (script_name->IsString()) {
+ auto v8str_name = script_name.As<v8::String>();
+ wstr_name.resize(v8str_name->Length());
+ // On Windows wchar_t == uint16_t. const_Cast needed for C++14.
+ uint16_t* wstr_data = const_cast<uint16_t*>(
+ reinterpret_cast<const uint16_t*>(wstr_name.data()));
+ v8str_name->Write(event->isolate, wstr_data);
+ }
+
+ constexpr static auto source_load_event_meta =
+ EventMetadata(kSourceLoadEventID, kJScriptRuntimeKeyword);
+ constexpr static auto source_load_event_fields = EventFields(
+ "SourceLoad", Field("SourceID", TlgInUINT64),
+ Field("ScriptContextID", TlgInPOINTER),
+ Field("SourceFlags", TlgInUINT32), Field("Url", TlgInUNICODESTRING));
+ LogEventData(g_v8Provider, &source_load_event_meta,
+ &source_load_event_fields, (uint64_t)script_id,
+ script_context,
+ (uint32_t)0, // SourceFlags
+ wstr_name);
+ }
+ }
+
+ constexpr static auto method_load_event_meta =
+ EventMetadata(kMethodLoadEventID, kJScriptRuntimeKeyword);
+ constexpr static auto method_load_event_fields = EventFields(
+ "MethodLoad", Field("ScriptContextID", TlgInPOINTER),
+ Field("MethodStartAddress", TlgInPOINTER),
+ Field("MethodSize", TlgInUINT64), Field("MethodID", TlgInUINT32),
+ Field("MethodFlags", TlgInUINT16),
+ Field("MethodAddressRangeID", TlgInUINT16),
+ Field("SourceID", TlgInUINT64), Field("Line", TlgInUINT32),
+ Field("Column", TlgInUINT32), Field("MethodName", TlgInUNICODESTRING));
+
+ LogEventData(g_v8Provider, &method_load_event_meta, &method_load_event_fields,
+ script_context, event->code_start, (uint64_t)event->code_len,
+ (uint32_t)0, // MethodId
+ (uint16_t)0, // MethodFlags
+ (uint16_t)0, // MethodAddressRangeId
+ (uint64_t)script_id, (uint32_t)0, (uint32_t)0, // Line & Column
+ method_name);
+}
+
+} // namespace ETWJITInterface
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/system-jit-win.h b/deps/v8/src/diagnostics/system-jit-win.h
new file mode 100644
index 00000000000..dffd34df6c6
--- /dev/null
+++ b/deps/v8/src/diagnostics/system-jit-win.h
@@ -0,0 +1,20 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DIAGNOSTICS_SYSTEM_JIT_WIN_H_
+#define V8_DIAGNOSTICS_SYSTEM_JIT_WIN_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace ETWJITInterface {
+void Register();
+void Unregister();
+void EventHandler(const v8::JitCodeEvent* event);
+} // namespace ETWJITInterface
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DIAGNOSTICS_SYSTEM_JIT_WIN_H_
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 54978eeb749..9a5f7069e75 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -17,37 +17,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index 1e89f1e7502..1a987f65e7e 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,11 +1,9 @@
-bmeurer@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
mythria@chromium.org
delphick@chromium.org
-petermarshall@chromium.org
-szuend@chromium.org
verwaest@chromium.org
+victorgomes@chromium.org
per-file futex-emulation.*=marja@chromium.org
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index b4c4e013b77..2e3b1ed665e 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -76,16 +76,18 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {r0, r1, r2, r3, r4, r5, r6, r8, r9}
- static constexpr uint32_t kPushedGpRegs = 0b1101111111;
- // {d0 .. d12}
- static constexpr int kFirstPushedFpReg = 0;
- static constexpr int kLastPushedFpReg = 12;
+ // r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
+ static constexpr RegList kPushedGpRegs =
+ Register::ListOf(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9);
+
+ // d13: zero, d14-d15: scratch
+ static constexpr RegList kPushedFpRegs = LowDwVfpRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
static constexpr int kNumPushedFpRegisters =
- kLastPushedFpReg - kFirstPushedFpReg + 1;
+ base::bits::CountPopulation(kPushedFpRegs);
static constexpr int kLastPushedGpRegisterOffset =
-TypedFrameConstants::kFixedFrameSizeFromFp -
@@ -102,10 +104,10 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
}
static int GetPushedFpRegisterOffset(int reg_code) {
- DCHECK_LE(kFirstPushedFpReg, reg_code);
- DCHECK_GE(kLastPushedFpReg, reg_code);
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
- (reg_code - kFirstPushedFpReg) * kDoubleSize;
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
};
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index a013deb418b..ee4e9af4621 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -4277,13 +4277,6 @@ void PairwiseAddLong(Simulator* simulator, int Vd, int Vm) {
simulator->set_neon_register<WideType, SIZE>(Vd, dst);
}
-template <typename T, int SIZE = kSimd128Size>
-void RoundingAverageUnsigned(Simulator* simulator, int Vd, int Vm, int Vn) {
- static_assert(std::is_unsigned<T>::value,
- "Implemented only for unsigned types.");
- Binop<T>(simulator, Vd, Vm, Vn, base::RoundingAverageUnsigned<T>);
-}
-
template <typename NarrowType, typename WideType>
void MultiplyLong(Simulator* simulator, int Vd, int Vn, int Vm) {
DCHECK_EQ(sizeof(WideType), 2 * sizeof(NarrowType));
@@ -5034,6 +5027,11 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
}
}
set_neon_register(Vd, src1);
+ } else if (!u && opc == 1 && sz == 3 && q && op1) {
+ // vorn, Qd, Qm, Qn.
+ // NeonSize does not matter.
+ Binop<uint32_t>(this, Vd, Vm, Vn,
+ [](uint32_t x, uint32_t y) { return x | (~y); });
} else if (!u && opc == 1 && sz == 0 && q && op1) {
// vand Qd, Qm, Qn.
uint32_t src1[4], src2[4];
@@ -5314,13 +5312,13 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
switch (size) {
case Neon8:
- RoundingAverageUnsigned<uint8_t>(this, Vd, Vm, Vn);
+ Binop<uint8_t>(this, Vd, Vm, Vn, RoundingAverageUnsigned<uint8_t>);
break;
case Neon16:
- RoundingAverageUnsigned<uint16_t>(this, Vd, Vm, Vn);
+ Binop<uint16_t>(this, Vd, Vm, Vn, RoundingAverageUnsigned<uint16_t>);
break;
case Neon32:
- RoundingAverageUnsigned<uint32_t>(this, Vd, Vm, Vn);
+ Binop<uint32_t>(this, Vd, Vm, Vn, RoundingAverageUnsigned<uint32_t>);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index a01c15d3482..8810586360f 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -7,6 +7,9 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/arm64/register-arm64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
#include "src/common/globals.h"
#include "src/execution/frame-constants.h"
@@ -93,15 +96,23 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {x0 .. x28} \ {x16, x17, x18, x26, x27}
- static constexpr uint32_t kPushedGpRegs =
- (1 << 29) - 1 - (1 << 16) - (1 << 17) - (1 << 18) - (1 << 26) - (1 << 27);
- // {d0 .. d29}; {d15} is not used, but we still keep it for alignment reasons
- // (the frame size needs to be a multiple of 16).
- static constexpr uint32_t kPushedFpRegs = (1 << 30) - 1;
+ // x16: ip0, x17: ip1, x18: platform register, x26: root, x28: base, x29: fp,
+ // x30: lr, x31: xzr.
+ static constexpr RegList kPushedGpRegs = CPURegister::ListOf(
+ x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x19,
+ x20, x21, x22, x23, x24, x25, x27);
+
+ // We push FpRegs as 128-bit SIMD registers, so 16-byte frame alignment
+ // is guaranteed regardless of register count.
+ static constexpr RegList kPushedFpRegs = CPURegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d16, d17,
+ d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
+ static_assert(kNumPushedGpRegisters % 2 == 0,
+ "stack frames need to be 16-byte aligned");
+
static constexpr int kNumPushedFpRegisters =
base::bits::CountPopulation(kPushedFpRegs);
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 78ea638e0db..23a03848ad5 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -1800,17 +1800,14 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
unsigned addr_reg = instr->Rn();
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
- LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad();
- } else if (instr->IsStore()) {
+ } else {
local_monitor_.NotifyStore();
GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
- } else {
- DCHECK_EQ(op, PRFM);
}
}
@@ -1829,6 +1826,7 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
stack = sp();
}
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
switch (op) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_VREGS). We will print a more detailed log.
@@ -1903,10 +1901,6 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
MemoryWrite<qreg_t>(address, qreg(srcdst));
break;
- // Do nothing for prefetch.
- case PRFM:
- break;
-
default:
UNIMPLEMENTED();
}
@@ -1922,7 +1916,7 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
} else {
LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
}
- } else if (instr->IsStore()) {
+ } else {
if ((op == STR_s) || (op == STR_d)) {
LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
} else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
@@ -1930,8 +1924,6 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
} else {
LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
}
- } else {
- DCHECK_EQ(op, PRFM);
}
// Handle the writeback for loads after the load to ensure safe pop
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 3da4cbdbaff..7866b406d90 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -5,12 +5,16 @@
#include "src/execution/execution.h"
#include "src/api/api-inl.h"
-#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#include "src/debug/debug.h"
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/vm-state-inl.h"
#include "src/logging/counters.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -517,6 +521,7 @@ STATIC_ASSERT(offsetof(StackHandlerMarker, padding) ==
StackHandlerConstants::kPaddingOffset);
STATIC_ASSERT(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
+#if V8_ENABLE_WEBASSEMBLY
void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
Address wasm_call_target, Handle<Object> object_ref,
Address packed_args) {
@@ -570,6 +575,7 @@ void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
}
*isolate->c_entry_fp_address() = saved_c_entry_fp;
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index f1f175521ed..ff42ada550a 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -60,6 +60,7 @@ class Execution final : public AllStatic {
Isolate* isolate, MicrotaskQueue* microtask_queue,
MaybeHandle<Object>* exception_out);
+#if V8_ENABLE_WEBASSEMBLY
// Call a Wasm function identified by {wasm_call_target} through the
// provided {wrapper_code}, which must match the function's signature.
// Upon return, either isolate->has_pending_exception() is true, or
@@ -69,6 +70,7 @@ class Execution final : public AllStatic {
Address wasm_call_target,
Handle<Object> object_ref,
Address packed_args);
+#endif // V8_ENABLE_WEBASSEMBLY
};
} // namespace internal
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 6903ae0032b..10cdff5c0fc 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -216,6 +216,7 @@ class ConstructFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(5);
};
+#if V8_ENABLE_WEBASSEMBLY
class CWasmEntryFrameConstants : public TypedFrameConstants {
public:
// FP-relative:
@@ -236,6 +237,7 @@ class WasmExitFrameConstants : public WasmFrameConstants {
static const int kCallingPCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
};
+#endif // V8_ENABLE_WEBASSEMBLY
class BuiltinContinuationFrameConstants : public TypedFrameConstants {
public:
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index a5d60f825f0..ba2a7bce9a0 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -163,7 +163,7 @@ inline Address CommonFrame::caller_fp() const {
}
inline Address CommonFrame::caller_pc() const {
- return base::Memory<Address>(ComputePCAddress(fp()));
+ return ReadPC(reinterpret_cast<Address*>(ComputePCAddress(fp())));
}
inline Address CommonFrame::ComputePCAddress(Address fp) {
@@ -222,6 +222,7 @@ inline BaselineFrame::BaselineFrame(StackFrameIteratorBase* iterator)
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
: TypedFrameWithJSLinkage(iterator) {}
+#if V8_ENABLE_WEBASSEMBLY
inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
: TypedFrame(iterator) {}
@@ -244,6 +245,7 @@ inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
inline WasmCompileLazyFrame::WasmCompileLazyFrame(
StackFrameIteratorBase* iterator)
: TypedFrame(iterator) {}
+#endif // V8_ENABLE_WEBASSEMBLY
inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: TypedFrame(iterator) {}
@@ -287,7 +289,11 @@ inline JavaScriptFrame* JavaScriptFrameIterator::Reframe() {
inline CommonFrame* StackTraceFrameIterator::frame() const {
StackFrame* frame = iterator_.frame();
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(frame->is_java_script() || frame->is_wasm());
+#else
+ DCHECK(frame->is_java_script());
+#endif // V8_ENABLE_WEBASSEMBLY
return static_cast<CommonFrame*>(frame);
}
@@ -300,7 +306,9 @@ bool StackTraceFrameIterator::is_javascript() const {
return frame()->is_java_script();
}
+#if V8_ENABLE_WEBASSEMBLY
bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
+#endif // V8_ENABLE_WEBASSEMBLY
JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
return JavaScriptFrame::cast(frame());
@@ -308,9 +316,14 @@ JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
inline StackFrame* SafeStackFrameIterator::frame() const {
DCHECK(!done());
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(frame_->is_java_script() || frame_->is_exit() ||
frame_->is_builtin_exit() || frame_->is_wasm() ||
frame_->is_wasm_to_js() || frame_->is_js_to_wasm());
+#else
+ DCHECK(frame_->is_java_script() || frame_->is_exit() ||
+ frame_->is_builtin_exit());
+#endif // V8_ENABLE_WEBASSEMBLY
return frame_;
}
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 6ee597572ef..65d34a56519 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -24,10 +24,13 @@
#include "src/objects/visitors.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/strings/string-stream.h"
+#include "src/zone/zone-containers.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/zone/zone-containers.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -60,12 +63,12 @@ class StackHandlerIterator {
: limit_(frame->fp()), handler_(handler) {
// Make sure the handler has already been unwound to this frame.
DCHECK(frame->sp() <= AddressOf(handler));
+#if V8_ENABLE_WEBASSEMBLY
// For CWasmEntry frames, the handler was registered by the last C++
// frame (Execution::CallWasm), so even though its address is already
// beyond the limit, we know we always want to unwind one handler.
- if (frame->type() == StackFrame::C_WASM_ENTRY) {
- handler_ = handler_->next();
- }
+ if (frame->is_c_wasm_entry()) handler_ = handler_->next();
+#endif // V8_ENABLE_WEBASSEMBLY
}
StackHandler* handler() const { return handler_; }
@@ -210,7 +213,10 @@ bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
return js_frame->function().shared().IsSubjectToDebugging();
}
// Apart from JavaScript frames, only Wasm frames are valid.
- return frame->is_wasm();
+#if V8_ENABLE_WEBASSEMBLY
+ if (frame->is_wasm()) return true;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
}
// -------------------------------------------------------------------------
@@ -219,16 +225,13 @@ namespace {
bool IsInterpreterFramePc(Isolate* isolate, Address pc,
StackFrame::State* state) {
- Code interpreter_entry_trampoline =
- isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- Code interpreter_bytecode_advance =
- isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
- Code interpreter_bytecode_dispatch =
- isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
-
- if (interpreter_entry_trampoline.contains(pc) ||
- interpreter_bytecode_advance.contains(pc) ||
- interpreter_bytecode_dispatch.contains(pc)) {
+ Builtins::Name builtin_index = InstructionStream::TryLookupCode(isolate, pc);
+ if (builtin_index != Builtins::kNoBuiltinId &&
+ (builtin_index == Builtins::kInterpreterEntryTrampoline ||
+ builtin_index == Builtins::kInterpreterEnterBytecodeAdvance ||
+ builtin_index == Builtins::kInterpreterEnterBytecodeDispatch ||
+ builtin_index == Builtins::kBaselineEnterAtBytecode ||
+ builtin_index == Builtins::kBaselineEnterAtNextBytecode)) {
return true;
} else if (FLAG_interpreted_frames_native_stack) {
intptr_t marker = Memory<intptr_t>(
@@ -245,7 +248,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
} else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
return false;
}
- interpreter_entry_trampoline =
+ Code interpreter_entry_trampoline =
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
return interpreter_entry_trampoline.is_interpreter_trampoline_builtin();
} else {
@@ -261,7 +264,7 @@ bool SafeStackFrameIterator::IsNoFrameBytecodeHandlerPc(Isolate* isolate,
// Return false for builds with non-embedded bytecode handlers.
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) return false;
- EmbeddedData d = EmbeddedData::FromBlob();
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
if (pc < d.InstructionStartOfBytecodeHandlers() ||
pc >= d.InstructionEndOfBytecodeHandlers()) {
// Not a bytecode handler pc address.
@@ -471,10 +474,13 @@ void SafeStackFrameIterator::Advance() {
last_callback_scope = external_callback_scope_;
external_callback_scope_ = external_callback_scope_->previous();
}
- if (frame_->is_java_script() || frame_->is_wasm() ||
- frame_->is_wasm_to_js() || frame_->is_js_to_wasm()) {
+ if (frame_->is_java_script()) break;
+#if V8_ENABLE_WEBASSEMBLY
+ if (frame_->is_wasm() || frame_->is_wasm_to_js() ||
+ frame_->is_js_to_wasm()) {
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (frame_->is_exit() || frame_->is_builtin_exit()) {
// Some of the EXIT frames may have ExternalCallbackScope allocated on
// top of them. In that case the scope corresponds to the first EXIT
@@ -500,23 +506,22 @@ Code GetContainingCode(Isolate* isolate, Address pc) {
Code StackFrame::LookupCode() const {
Code result = GetContainingCode(isolate(), pc());
- DCHECK_GE(pc(), result.InstructionStart());
- DCHECK_LT(pc(), result.InstructionEnd());
+ DCHECK_GE(pc(), result.InstructionStart(isolate(), pc()));
+ DCHECK_LT(pc(), result.InstructionEnd(isolate(), pc()));
return result;
}
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
- Address* constant_pool_address, Code holder) {
+ Address* constant_pool_address, Code holder) const {
Address old_pc = ReadPC(pc_address);
DCHECK(ReadOnlyHeap::Contains(holder) ||
holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
- unsigned pc_offset =
- static_cast<unsigned>(old_pc - holder.InstructionStart());
+ unsigned pc_offset = holder.GetOffsetFromInstructionStart(isolate_, old_pc);
Object code = holder;
- v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&code));
+ v->VisitRootPointer(Root::kStackRoots, nullptr, FullObjectSlot(&code));
if (code == holder) return;
holder = Code::unchecked_cast(code);
- Address pc = holder.InstructionStart() + pc_offset;
+ Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
// TODO(v8:10026): avoid replacing a signed pointer.
PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
@@ -561,12 +566,13 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
} else {
+#if V8_ENABLE_WEBASSEMBLY
// If the {pc} does not point into WebAssembly code we can rely on the
// returned {wasm_code} to be null and fall back to {GetContainingCode}.
wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCode* wasm_code =
- iterator->isolate()->wasm_engine()->code_manager()->LookupCode(pc);
- if (wasm_code != nullptr) {
+ if (wasm::WasmCode* wasm_code =
+ iterator->isolate()->wasm_engine()->code_manager()->LookupCode(
+ pc)) {
switch (wasm_code->kind()) {
case wasm::WasmCode::kFunction:
return WASM;
@@ -577,52 +583,58 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
default:
UNREACHABLE();
}
- } else {
- // Look up the code object to figure out the type of the stack frame.
- Code code_obj = GetContainingCode(iterator->isolate(), pc);
- if (!code_obj.is_null()) {
- switch (code_obj.kind()) {
- case CodeKind::BUILTIN:
- if (StackFrame::IsTypeMarker(marker)) break;
- if (code_obj.is_interpreter_trampoline_builtin()) {
- return INTERPRETED;
- }
- if (code_obj.is_baseline_leave_frame_builtin()) {
- return BASELINE;
- }
- if (code_obj.is_turbofanned()) {
- // TODO(bmeurer): We treat frames for BUILTIN Code objects as
- // OptimizedFrame for now (all the builtins with JavaScript
- // linkage are actually generated with TurboFan currently, so
- // this is sound).
- return OPTIMIZED;
- }
- return BUILTIN;
- case CodeKind::TURBOFAN:
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
- case CodeKind::TURBOPROP:
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ // Look up the code object to figure out the type of the stack frame.
+ Code code_obj = GetContainingCode(iterator->isolate(), pc);
+ if (!code_obj.is_null()) {
+ switch (code_obj.kind()) {
+ case CodeKind::BUILTIN:
+ if (StackFrame::IsTypeMarker(marker)) break;
+ if (code_obj.is_interpreter_trampoline_builtin() ||
+ // Frames for baseline entry trampolines on the stack are still
+ // interpreted frames.
+ code_obj.is_baseline_trampoline_builtin()) {
+ return INTERPRETED;
+ }
+ if (code_obj.is_baseline_leave_frame_builtin()) {
+ return BASELINE;
+ }
+ if (code_obj.is_turbofanned()) {
+ // TODO(bmeurer): We treat frames for BUILTIN Code objects as
+ // OptimizedFrame for now (all the builtins with JavaScript
+ // linkage are actually generated with TurboFan currently, so
+ // this is sound).
return OPTIMIZED;
- case CodeKind::BASELINE:
- return Type::BASELINE;
- case CodeKind::JS_TO_WASM_FUNCTION:
- return JS_TO_WASM;
- case CodeKind::JS_TO_JS_FUNCTION:
- return STUB;
- case CodeKind::C_WASM_ENTRY:
- return C_WASM_ENTRY;
- case CodeKind::WASM_TO_JS_FUNCTION:
- return WASM_TO_JS;
- case CodeKind::WASM_FUNCTION:
- case CodeKind::WASM_TO_CAPI_FUNCTION:
- // Never appear as on-heap {Code} objects.
- UNREACHABLE();
- default:
- // All other types should have an explicit marker
- break;
- }
- } else {
- return NATIVE;
+ }
+ return BUILTIN;
+ case CodeKind::TURBOFAN:
+ case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
+ return OPTIMIZED;
+ case CodeKind::BASELINE:
+ return Type::BASELINE;
+#if V8_ENABLE_WEBASSEMBLY
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ return JS_TO_WASM;
+ case CodeKind::JS_TO_JS_FUNCTION:
+ return STUB;
+ case CodeKind::C_WASM_ENTRY:
+ return C_WASM_ENTRY;
+ case CodeKind::WASM_TO_JS_FUNCTION:
+ return WASM_TO_JS;
+ case CodeKind::WASM_FUNCTION:
+ case CodeKind::WASM_TO_CAPI_FUNCTION:
+ // Never appear as on-heap {Code} objects.
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
+ default:
+ // All other types should have an explicit marker
+ break;
}
+ } else {
+ return NATIVE;
}
}
DCHECK(StackFrame::IsTypeMarker(marker));
@@ -638,12 +650,14 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case STUB:
case INTERNAL:
case CONSTRUCT:
+#if V8_ENABLE_WEBASSEMBLY
case WASM_TO_JS:
case WASM:
case WASM_COMPILE_LAZY:
case WASM_EXIT:
case WASM_DEBUG_BREAK:
case JS_TO_WASM:
+#endif // V8_ENABLE_WEBASSEMBLY
return candidate;
case OPTIMIZED:
case INTERPRETED:
@@ -694,11 +708,13 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
return ExitFrame::GetStateForFramePointer(fp, state);
}
+#if V8_ENABLE_WEBASSEMBLY
StackFrame::Type CWasmEntryFrame::GetCallerState(State* state) const {
const int offset = CWasmEntryFrameConstants::kCEntryFPOffset;
Address fp = Memory<Address>(this->fp() + offset);
return ExitFrame::GetStateForFramePointer(fp, state);
}
+#endif // V8_ENABLE_WEBASSEMBLY
Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
@@ -726,8 +742,12 @@ void ExitFrame::Iterate(RootVisitor* v) const {
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
StackFrame::Type type = ComputeFrameType(fp);
- Address sp = (type == WASM_EXIT) ? WasmExitFrame::ComputeStackPointer(fp)
- : ExitFrame::ComputeStackPointer(fp);
+#if V8_ENABLE_WEBASSEMBLY
+ Address sp = type == WASM_EXIT ? WasmExitFrame::ComputeStackPointer(fp)
+ : ExitFrame::ComputeStackPointer(fp);
+#else
+ Address sp = ExitFrame::ComputeStackPointer(fp);
+#endif // V8_ENABLE_WEBASSEMBLY
FillState(fp, sp, state);
DCHECK_NE(*state->pc_address, kNullAddress);
return type;
@@ -746,12 +766,16 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
intptr_t marker_int = bit_cast<intptr_t>(marker);
StackFrame::Type frame_type = static_cast<StackFrame::Type>(marker_int >> 1);
- if (frame_type == EXIT || frame_type == BUILTIN_EXIT ||
- frame_type == WASM_EXIT) {
- return frame_type;
+ switch (frame_type) {
+ case BUILTIN_EXIT:
+ return BUILTIN_EXIT;
+#if V8_ENABLE_WEBASSEMBLY
+ case WASM_EXIT:
+ return WASM_EXIT;
+#endif // V8_ENABLE_WEBASSEMBLY
+ default:
+ return EXIT;
}
-
- return EXIT;
}
Address ExitFrame::ComputeStackPointer(Address fp) {
@@ -760,6 +784,7 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
return Memory<Address>(fp + ExitFrameConstants::kSPOffset);
}
+#if V8_ENABLE_WEBASSEMBLY
Address WasmExitFrame::ComputeStackPointer(Address fp) {
// For WASM_EXIT frames, {sp} is only needed for finding the PC slot,
// everything else is handled via safepoint information.
@@ -768,6 +793,7 @@ Address WasmExitFrame::ComputeStackPointer(Address fp) {
fp + WasmExitFrameConstants::kCallingPCOffset);
return sp;
}
+#endif // V8_ENABLE_WEBASSEMBLY
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
@@ -875,9 +901,9 @@ Object CommonFrame::context() const {
}
int CommonFrame::position() const {
- AbstractCode code = AbstractCode::cast(LookupCode());
- int code_offset = static_cast<int>(pc() - code.InstructionStart());
- return code.SourcePosition(code_offset);
+ Code code = LookupCode();
+ int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
+ return AbstractCode::cast(code).SourcePosition(code_offset);
}
int CommonFrame::ComputeExpressionsCount() const {
@@ -911,14 +937,17 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// Find the code and compute the safepoint information.
Address inner_pointer = pc();
- const wasm::WasmCode* wasm_code =
- isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer);
SafepointEntry safepoint_entry;
- uint32_t stack_slots;
+ uint32_t stack_slots = 0;
Code code;
bool has_tagged_outgoing_params = false;
uint32_t tagged_parameter_slots = 0;
- if (wasm_code != nullptr) {
+ bool is_wasm = false;
+
+#if V8_ENABLE_WEBASSEMBLY
+ if (auto* wasm_code =
+ isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer)) {
+ is_wasm = true;
SafepointTable table(wasm_code);
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
@@ -926,30 +955,36 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
wasm_code->kind() != wasm::WasmCode::kFunction &&
wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
tagged_parameter_slots = wasm_code->tagged_parameter_slots();
- } else {
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ if (!is_wasm) {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code.GetSafepointEntry(inner_pointer);
+ entry->safepoint_entry =
+ entry->code.GetSafepointEntry(isolate(), inner_pointer);
DCHECK(entry->safepoint_entry.is_valid());
} else {
DCHECK(entry->safepoint_entry.Equals(
- entry->code.GetSafepointEntry(inner_pointer)));
+ entry->code.GetSafepointEntry(isolate(), inner_pointer)));
}
code = entry->code;
safepoint_entry = entry->safepoint_entry;
stack_slots = code.stack_slots();
+ has_tagged_outgoing_params = code.has_tagged_outgoing_params();
+
+#if V8_ENABLE_WEBASSEMBLY
// With inlined JS-to-Wasm calls, we can be in an OptimizedFrame and
// directly call a Wasm function from JavaScript. In this case the
// parameters we pass to the callee are not tagged.
wasm::WasmCode* wasm_callee =
isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc());
bool is_wasm_call = (wasm_callee != nullptr);
-
- has_tagged_outgoing_params =
- !is_wasm_call && code.has_tagged_outgoing_params();
+ if (is_wasm_call) has_tagged_outgoing_params = false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
uint32_t slot_space = stack_slots * kSystemPointerSize;
@@ -971,11 +1006,14 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
case STUB:
case INTERNAL:
case CONSTRUCT:
+#if V8_ENABLE_WEBASSEMBLY
case JS_TO_WASM:
case C_WASM_ENTRY:
case WASM_DEBUG_BREAK:
+#endif // V8_ENABLE_WEBASSEMBLY
frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
break;
+#if V8_ENABLE_WEBASSEMBLY
case WASM_TO_JS:
case WASM:
case WASM_COMPILE_LAZY:
@@ -989,6 +1027,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
"WasmExitFrame has one slot more than WasmFrame");
frame_header_size = WasmFrameConstants::kFixedFrameSizeFromFp;
break;
+#endif // V8_ENABLE_WEBASSEMBLY
case OPTIMIZED:
case INTERPRETED:
case BASELINE:
@@ -1016,7 +1055,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit the rest of the parameters if they are tagged.
if (has_tagged_outgoing_params) {
- v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
+ v->VisitRootPointers(Root::kStackRoots, nullptr, parameters_base,
parameters_limit);
}
@@ -1042,7 +1081,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
DecompressTaggedPointer(isolate(), compressed_value);
}
#endif
- v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
+ v->VisitRootPointer(Root::kStackRoots, nullptr, spill_slot);
}
}
@@ -1055,7 +1094,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
FullObjectSlot tagged_parameter_limit =
tagged_parameter_base + tagged_parameter_slots;
- v->VisitRootPointers(Root::kTop, nullptr, tagged_parameter_base,
+ v->VisitRootPointers(Root::kStackRoots, nullptr, tagged_parameter_base,
tagged_parameter_limit);
}
@@ -1073,7 +1112,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// untagged, we don't need to visit it.
frame_header_base += 1;
}
- v->VisitRootPointers(Root::kTop, nullptr, frame_header_base,
+ v->VisitRootPointers(Root::kStackRoots, nullptr, frame_header_base,
frame_header_limit);
}
@@ -1086,7 +1125,7 @@ int StubFrame::LookupExceptionHandlerInTable() {
DCHECK(code.is_turbofanned());
DCHECK_EQ(code.kind(), CodeKind::BUILTIN);
HandlerTable table(code);
- int pc_offset = static_cast<int>(pc() - code.InstructionStart());
+ int pc_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
return table.LookupReturn(pc_offset);
}
@@ -1149,7 +1188,7 @@ void CommonFrameWithJSLinkage::Summarize(
std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code code = LookupCode();
- int offset = static_cast<int>(pc() - code.InstructionStart());
+ int offset = code.GetOffsetFromInstructionStart(isolate(), pc());
Handle<AbstractCode> abstract_code(AbstractCode::cast(code), isolate());
Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
@@ -1237,7 +1276,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = iframe->GetBytecodeOffset();
} else {
Code code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code.InstructionStart());
+ code_offset = code.GetOffsetFromInstructionStart(isolate, frame->pc());
}
PrintFunctionAndOffset(function, function.abstract_code(isolate),
code_offset, file, print_line_number);
@@ -1413,6 +1452,7 @@ Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
return handle(function_->context().native_context(), isolate());
}
+#if V8_ENABLE_WEBASSEMBLY
FrameSummary::WasmFrameSummary::WasmFrameSummary(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
int code_offset, bool at_to_number_conversion)
@@ -1455,6 +1495,7 @@ Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
return handle(wasm_instance()->native_context(), isolate());
}
+#endif // V8_ENABLE_WEBASSEMBLY
FrameSummary::~FrameSummary() {
#define FRAME_SUMMARY_DESTR(kind, type, field, desc) \
@@ -1495,6 +1536,7 @@ FrameSummary FrameSummary::Get(const CommonFrame* frame, int index) {
return frames[index];
}
+#if V8_ENABLE_WEBASSEMBLY
#define FRAME_SUMMARY_DISPATCH(ret, name) \
ret FrameSummary::name() const { \
switch (base_.kind()) { \
@@ -1506,6 +1548,13 @@ FrameSummary FrameSummary::Get(const CommonFrame* frame, int index) {
UNREACHABLE(); \
} \
}
+#else
+#define FRAME_SUMMARY_DISPATCH(ret, name) \
+ ret FrameSummary::name() const { \
+ DCHECK_EQ(JAVA_SCRIPT, base_.kind()); \
+ return java_script_summary_.name(); \
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
FRAME_SUMMARY_DISPATCH(Handle<Object>, receiver)
FRAME_SUMMARY_DISPATCH(int, code_offset)
@@ -1608,14 +1657,14 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
DCHECK_NULL(prediction);
Code code = LookupCode();
HandlerTable table(code);
- int pc_offset = static_cast<int>(pc() - code.InstructionStart());
+ int pc_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
DCHECK_NULL(data); // Data is not used and will not return a value.
// When the return pc has been replaced by a trampoline there won't be
// a handler for this trampoline. Thus we need to use the return pc that
// _used to be_ on the stack to get the right ExceptionHandler.
if (CodeKindCanDeoptimize(code.kind()) && code.marked_for_deoptimization()) {
- SafepointTable safepoints(code);
+ SafepointTable safepoints(isolate(), pc(), code);
pc_offset = safepoints.find_return_pc(pc_offset);
}
return table.LookupReturn(pc_offset);
@@ -1631,13 +1680,13 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
// code object.
- if (!code.contains(pc())) {
+ if (!code.contains(isolate(), pc())) {
code = isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
}
DCHECK(!code.is_null());
DCHECK(CodeKindCanDeoptimize(code.kind()));
- SafepointEntry safepoint_entry = code.GetSafepointEntry(pc());
+ SafepointEntry safepoint_entry = code.GetSafepointEntry(isolate(), pc());
if (safepoint_entry.has_deoptimization_index()) {
*deopt_index = safepoint_entry.deoptimization_index();
return DeoptimizationData::cast(code.deoptimization_data());
@@ -1784,11 +1833,13 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
}
int BaselineFrame::GetBytecodeOffset() const {
- return LookupCode().GetBytecodeOffsetForBaselinePC(this->pc());
+ return LookupCode().GetBytecodeOffsetForBaselinePC(this->pc(),
+ GetBytecodeArray());
}
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
- return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset);
+ return LookupCode().GetBaselineStartPCForBytecodeOffset(bytecode_offset,
+ GetBytecodeArray());
}
void BaselineFrame::PatchContext(Context value) {
@@ -1806,8 +1857,10 @@ int BuiltinFrame::ComputeParametersCount() const {
return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
}
+#if V8_ENABLE_WEBASSEMBLY
void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
+ wasm::WasmCodeRefScope code_ref_scope;
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM [");
accumulator->PrintName(script().name());
@@ -1920,8 +1973,24 @@ int WasmFrame::LookupExceptionHandlerInTable() {
}
void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
- // Nothing to iterate here. This will change once we support references in
- // Liftoff.
+ DCHECK(caller_pc());
+ wasm::WasmCode* code =
+ isolate()->wasm_engine()->code_manager()->LookupCode(caller_pc());
+ DCHECK(code);
+ SafepointTable table(code);
+ SafepointEntry safepoint_entry = table.FindEntry(caller_pc());
+ if (!safepoint_entry.has_register_bits()) return;
+ uint32_t register_bits = safepoint_entry.register_bits();
+
+ while (register_bits != 0) {
+ int reg_code = base::bits::CountTrailingZeros(register_bits);
+ register_bits &= ~(1 << reg_code);
+ FullObjectSlot spill_slot(&Memory<Address>(
+ fp() +
+ WasmDebugBreakFrameConstants::GetPushedGpRegisterOffset(reg_code)));
+
+ v->VisitRootPointer(Root::kStackRoots, nullptr, spill_slot);
+ }
}
void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
@@ -1962,7 +2031,8 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
FullObjectSlot spill_slot_base(&Memory<Address>(sp()));
FullObjectSlot spill_slot_limit(
&Memory<Address>(sp() + scan_count * kSystemPointerSize));
- v->VisitRootPointers(Root::kTop, nullptr, spill_slot_base, spill_slot_limit);
+ v->VisitRootPointers(Root::kStackRoots, nullptr, spill_slot_base,
+ spill_slot_limit);
}
WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
@@ -1978,9 +2048,10 @@ void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
const int header_size = WasmCompileLazyFrameConstants::kFixedFrameSizeFromFp;
FullObjectSlot base(&Memory<Address>(sp()));
FullObjectSlot limit(&Memory<Address>(fp() - header_size));
- v->VisitRootPointers(Root::kTop, nullptr, base, limit);
- v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
+ v->VisitRootPointers(Root::kStackRoots, nullptr, base, limit);
+ v->VisitRootPointer(Root::kStackRoots, nullptr, wasm_instance_slot());
}
+#endif // V8_ENABLE_WEBASSEMBLY
namespace {
@@ -2122,14 +2193,14 @@ void CommonFrame::IterateExpressions(RootVisitor* v) const {
FullObjectSlot base(&Memory<Address>(sp()));
FullObjectSlot limit(&Memory<Address>(fp() + last_object_offset) + 1);
if (StackFrame::IsTypeMarker(marker)) {
- v->VisitRootPointers(Root::kTop, nullptr, base, limit);
+ v->VisitRootPointers(Root::kStackRoots, nullptr, base, limit);
} else {
// The frame contains the actual argument count (intptr) that should not be
// visited.
FullObjectSlot argc(
&Memory<Address>(fp() + StandardFrameConstants::kArgCOffset));
- v->VisitRootPointers(Root::kTop, nullptr, base, argc);
- v->VisitRootPointers(Root::kTop, nullptr, argc + 1, limit);
+ v->VisitRootPointers(Root::kStackRoots, nullptr, base, argc);
+ v->VisitRootPointers(Root::kStackRoots, nullptr, argc + 1, limit);
}
}
@@ -2154,10 +2225,13 @@ void InternalFrame::Iterate(RootVisitor* v) const {
namespace {
+// Predictably converts PC to uint32 by calculating offset of the PC in
+// from the embedded builtins start or from respective MemoryChunk.
uint32_t PcAddressForHashing(Isolate* isolate, Address address) {
- if (InstructionStream::PcIsOffHeap(isolate, address)) {
- // Ensure that we get predictable hashes for addresses in embedded code.
- return EmbeddedData::FromBlob(isolate).AddressForHashing(address);
+ uint32_t hashable_address;
+ if (InstructionStream::TryGetAddressForHashing(isolate, address,
+ &hashable_address)) {
+ return hashable_address;
}
return ObjectAddressForHashing(address);
}
@@ -2194,13 +2268,11 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
namespace {
-int ArgumentPaddingSlots(int arg_count) {
- return ShouldPadArguments(arg_count) ? 1 : 0;
-}
-
// Some architectures need to push padding together with the TOS register
// in order to maintain stack alignment.
-constexpr int TopOfStackRegisterPaddingSlots() { return kPadArguments ? 1 : 0; }
+constexpr int TopOfStackRegisterPaddingSlots() {
+ return ArgumentPaddingSlots(1);
+}
bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
switch (mode) {
@@ -2247,6 +2319,13 @@ UnoptimizedFrameInfo::UnoptimizedFrameInfo(int parameters_count_with_receiver,
frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ + fixed_frame_size;
}
+// static
+uint32_t UnoptimizedFrameInfo::GetStackSizeForAdditionalArguments(
+ int parameters_count) {
+ return (parameters_count + ArgumentPaddingSlots(parameters_count)) *
+ kSystemPointerSize;
+}
+
ConstructStubFrameInfo::ConstructStubFrameInfo(int translation_height,
bool is_topmost,
FrameInfoKind frame_info_kind) {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index eef201914b1..32157b4cc12 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -96,13 +96,13 @@ class StackHandler {
V(ENTRY, EntryFrame) \
V(CONSTRUCT_ENTRY, ConstructEntryFrame) \
V(EXIT, ExitFrame) \
- V(WASM, WasmFrame) \
- V(WASM_TO_JS, WasmToJsFrame) \
- V(JS_TO_WASM, JsToWasmFrame) \
- V(WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
- V(C_WASM_ENTRY, CWasmEntryFrame) \
- V(WASM_EXIT, WasmExitFrame) \
- V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
+ IF_WASM(V, WASM, WasmFrame) \
+ IF_WASM(V, WASM_TO_JS, WasmToJsFrame) \
+ IF_WASM(V, JS_TO_WASM, JsToWasmFrame) \
+ IF_WASM(V, WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
+ IF_WASM(V, C_WASM_ENTRY, CWasmEntryFrame) \
+ IF_WASM(V, WASM_EXIT, WasmExitFrame) \
+ IF_WASM(V, WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
V(INTERPRETED, InterpretedFrame) \
V(BASELINE, BaselineFrame) \
V(OPTIMIZED, OptimizedFrame) \
@@ -216,9 +216,14 @@ class StackFrame {
}
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_baseline() const { return type() == BASELINE; }
+#if V8_ENABLE_WEBASSEMBLY
bool is_wasm() const { return this->type() == WASM; }
+ bool is_c_wasm_entry() const { return type() == C_WASM_ENTRY; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
bool is_wasm_debug_break() const { return type() == WASM_DEBUG_BREAK; }
+ bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
+ bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
+#endif // V8_ENABLE_WEBASSEMBLY
bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
bool is_builtin_continuation() const {
@@ -239,8 +244,6 @@ class StackFrame {
return t >= INTERPRETED && t <= OPTIMIZED;
}
bool is_java_script() const { return IsJavaScript(type()); }
- bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
- bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
// Accessors.
Address sp() const { return state_.sp; }
@@ -283,8 +286,8 @@ class StackFrame {
V8_EXPORT_PRIVATE Code LookupCode() const;
virtual void Iterate(RootVisitor* v) const = 0;
- static void IteratePc(RootVisitor* v, Address* pc_address,
- Address* constant_pool_address, Code holder);
+ void IteratePc(RootVisitor* v, Address* pc_address,
+ Address* constant_pool_address, Code holder) const;
// Sets a callback function for return-address rewriting profilers
// to resolve the location of a return address to the location of the
@@ -348,7 +351,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
// Subclasses for the different summary kinds:
#define FRAME_SUMMARY_VARIANTS(F) \
F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript) \
- F(WASM, WasmFrameSummary, wasm_summary_, Wasm)
+ IF_WASM(F, WASM, WasmFrameSummary, wasm_summary_, Wasm)
#define FRAME_SUMMARY_KIND(kind, type, field, desc) kind,
enum Kind { FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_KIND) };
@@ -398,6 +401,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
Handle<FixedArray> parameters_;
};
+#if V8_ENABLE_WEBASSEMBLY
class WasmFrameSummary : public FrameSummaryBase {
public:
WasmFrameSummary(Isolate*, Handle<WasmInstanceObject>, wasm::WasmCode*,
@@ -424,6 +428,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
wasm::WasmCode* const code_;
int code_offset_;
};
+#endif // V8_ENABLE_WEBASSEMBLY
#define FRAME_SUMMARY_CONS(kind, type, field, desc) \
FrameSummary(type summ) : field(summ) {} // NOLINT
@@ -943,6 +948,7 @@ class BuiltinFrame final : public TypedFrameWithJSLinkage {
friend class StackFrameIteratorBase;
};
+#if V8_ENABLE_WEBASSEMBLY
class WasmFrame : public TypedFrame {
public:
Type type() const override { return WASM; }
@@ -1074,6 +1080,7 @@ class WasmCompileLazyFrame : public TypedFrame {
private:
friend class StackFrameIteratorBase;
};
+#endif // V8_ENABLE_WEBASSEMBLY
class InternalFrame : public TypedFrame {
public:
@@ -1266,7 +1273,9 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
inline CommonFrame* Reframe();
inline bool is_javascript() const;
+#if V8_ENABLE_WEBASSEMBLY
inline bool is_wasm() const;
+#endif // V8_ENABLE_WEBASSEMBLY
inline JavaScriptFrame* javascript_frame() const;
private:
@@ -1354,6 +1363,8 @@ class UnoptimizedFrameInfo {
FrameInfoKind::kConservative};
}
+ static uint32_t GetStackSizeForAdditionalArguments(int parameters_count);
+
uint32_t register_stack_slot_count() const {
return register_stack_slot_count_;
}
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index f93eaacf03c..2206b98c9f0 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -517,13 +517,6 @@ FutexWaitListNode::FutexWaitListNode(
Utils::ToLocal(Handle<Context>::cast(native_context));
native_context_.Reset(v8_isolate, local_native_context);
native_context_.SetWeak();
-
- // Add the Promise into the NativeContext's atomics_waitasync_promises set, so
- // that the list keeps it alive.
- Handle<OrderedHashSet> promises(native_context->atomics_waitasync_promises(),
- isolate);
- promises = OrderedHashSet::Add(isolate, promises, promise).ToHandleChecked();
- native_context->set_atomics_waitasync_promises(*promises);
}
template <typename T>
@@ -531,83 +524,112 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
int64_t rel_timeout_ns) {
- DCHECK(FLAG_harmony_atomics_waitasync);
base::TimeDelta rel_timeout =
base::TimeDelta::FromNanoseconds(rel_timeout_ns);
Factory* factory = isolate->factory();
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<JSObject> promise_capability = factory->NewJSPromise();
- std::shared_ptr<BackingStore> backing_store = array_buffer->GetBackingStore();
+ enum { kNotEqual, kTimedOut, kAsync } result_kind;
+ {
+ // 16. Perform EnterCriticalSection(WL).
+ NoGarbageCollectionMutexGuard lock_guard(g_mutex.Pointer());
+
+ std::shared_ptr<BackingStore> backing_store =
+ array_buffer->GetBackingStore();
+
+ // 17. Let w be ! AtomicLoad(typedArray, i).
+ std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(
+ static_cast<int8_t*>(backing_store->buffer_start()) + addr);
+ if (p->load() != value) {
+ result_kind = kNotEqual;
+ } else if (use_timeout && rel_timeout_ns == 0) {
+ result_kind = kTimedOut;
+ } else {
+ result_kind = kAsync;
- // 17. Let w be ! AtomicLoad(typedArray, i).
- std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(
- static_cast<int8_t*>(backing_store->buffer_start()) + addr);
- if (p->load() != value) {
- // 18. If v is not equal to w, then
- // a. Perform LeaveCriticalSection(WL).
- // ...
- // c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
- // d. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
- // "not-equal").
- // e. Return resultObject.
- CHECK(
- JSReceiver::CreateDataProperty(isolate, result, factory->async_string(),
- factory->false_value(), Just(kDontThrow))
- .FromJust());
- CHECK(JSReceiver::CreateDataProperty(
- isolate, result, factory->value_string(),
- factory->not_equal_string(), Just(kDontThrow))
- .FromJust());
- return *result;
- }
-
- if (use_timeout && rel_timeout_ns == 0) {
- // 19. If t is 0 and mode is async, then
- // ...
- // b. Perform LeaveCriticalSection(WL).
- // c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
- // d. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
- // "timed-out").
- // e. Return resultObject.
- CHECK(
- JSReceiver::CreateDataProperty(isolate, result, factory->async_string(),
- factory->false_value(), Just(kDontThrow))
- .FromJust());
- CHECK(JSReceiver::CreateDataProperty(
- isolate, result, factory->value_string(),
- factory->timed_out_string(), Just(kDontThrow))
- .FromJust());
- return *result;
+ FutexWaitListNode* node = new FutexWaitListNode(
+ backing_store, addr, promise_capability, isolate);
+
+ if (use_timeout) {
+ node->async_timeout_time_ = base::TimeTicks::Now() + rel_timeout;
+ auto task = std::make_unique<AsyncWaiterTimeoutTask>(
+ node->cancelable_task_manager_, node);
+ node->timeout_task_id_ = task->id();
+ node->task_runner_->PostNonNestableDelayedTask(
+ std::move(task), rel_timeout.InSecondsF());
+ }
+
+ g_wait_list.Pointer()->AddNode(node);
+ }
+
+ // Leaving the block collapses the following steps:
+ // 18.a. Perform LeaveCriticalSection(WL).
+ // 19.b. Perform LeaveCriticalSection(WL).
+ // 24. Perform LeaveCriticalSection(WL).
}
- Handle<JSObject> promise_capability = factory->NewJSPromise();
- FutexWaitListNode* node =
- new FutexWaitListNode(backing_store, addr, promise_capability, isolate);
+ switch (result_kind) {
+ case kNotEqual:
+ // 18. If v is not equal to w, then
+ // ...
+ // c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
+ // d. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
+ // "not-equal").
+ // e. Return resultObject.
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->async_string(),
+ factory->false_value(), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->value_string(),
+ factory->not_equal_string(), Just(kDontThrow))
+ .FromJust());
+ break;
- {
- NoGarbageCollectionMutexGuard lock_guard(g_mutex.Pointer());
- g_wait_list.Pointer()->AddNode(node);
+ case kTimedOut:
+ // 19. If t is 0 and mode is async, then
+ // ...
+ // c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
+ // d. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
+ // "timed-out").
+ // e. Return resultObject.
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->async_string(),
+ factory->false_value(), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->value_string(),
+ factory->timed_out_string(), Just(kDontThrow))
+ .FromJust());
+ break;
+
+ case kAsync:
+ // Add the Promise into the NativeContext's atomics_waitasync_promises
+ // set, so that the list keeps it alive.
+ Handle<NativeContext> native_context(isolate->native_context());
+ Handle<OrderedHashSet> promises(
+ native_context->atomics_waitasync_promises(), isolate);
+ promises = OrderedHashSet::Add(isolate, promises, promise_capability)
+ .ToHandleChecked();
+ native_context->set_atomics_waitasync_promises(*promises);
+
+ // 26. Perform ! CreateDataPropertyOrThrow(resultObject, "async", true).
+ // 27. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
+ // promiseCapability.[[Promise]]).
+ // 28. Return resultObject.
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->async_string(), factory->true_value(),
+ Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(isolate, result,
+ factory->value_string(),
+ promise_capability, Just(kDontThrow))
+ .FromJust());
+ break;
}
- if (use_timeout) {
- node->async_timeout_time_ = base::TimeTicks::Now() + rel_timeout;
- auto task = std::make_unique<AsyncWaiterTimeoutTask>(
- node->cancelable_task_manager_, node);
- node->timeout_task_id_ = task->id();
- node->task_runner_->PostNonNestableDelayedTask(std::move(task),
- rel_timeout.InSecondsF());
- }
-
- // 26. Perform ! CreateDataPropertyOrThrow(resultObject, "async", true).
- // 27. Perform ! CreateDataPropertyOrThrow(resultObject, "value",
- // promiseCapability.[[Promise]]).
- // 28. Return resultObject.
- CHECK(JSReceiver::CreateDataProperty(isolate, result, factory->async_string(),
- factory->true_value(), Just(kDontThrow))
- .FromJust());
- CHECK(JSReceiver::CreateDataProperty(isolate, result, factory->value_string(),
- promise_capability, Just(kDontThrow))
- .FromJust());
+
return *result;
}
@@ -706,7 +728,6 @@ void FutexEmulation::CleanupAsyncWaiterPromise(FutexWaitListNode* node) {
// This function must run in the main thread of node's Isolate. This function
// may allocate memory. To avoid deadlocks, we shouldn't be holding g_mutex.
- DCHECK(FLAG_harmony_atomics_waitasync);
DCHECK(node->IsAsync());
Isolate* isolate = node->isolate_for_async_waiters_;
@@ -737,7 +758,6 @@ void FutexEmulation::CleanupAsyncWaiterPromise(FutexWaitListNode* node) {
void FutexEmulation::ResolveAsyncWaiterPromise(FutexWaitListNode* node) {
// This function must run in the main thread of node's Isolate.
- DCHECK(FLAG_harmony_atomics_waitasync);
auto v8_isolate =
reinterpret_cast<v8::Isolate*>(node->isolate_for_async_waiters_);
@@ -779,7 +799,6 @@ void FutexEmulation::ResolveAsyncWaiterPromise(FutexWaitListNode* node) {
void FutexEmulation::ResolveAsyncWaiterPromises(Isolate* isolate) {
// This function must run in the main thread of isolate.
- DCHECK(FLAG_harmony_atomics_waitasync);
FutexWaitListNode* node;
{
@@ -813,7 +832,6 @@ void FutexEmulation::ResolveAsyncWaiterPromises(Isolate* isolate) {
void FutexEmulation::HandleAsyncWaiterTimeout(FutexWaitListNode* node) {
// This function must run in the main thread of node's Isolate.
- DCHECK(FLAG_harmony_atomics_waitasync);
DCHECK(node->IsAsync());
{
@@ -966,7 +984,6 @@ void FutexWaitList::VerifyNode(FutexWaitListNode* node, FutexWaitListNode* head,
}
if (node->async_timeout_time_ != base::TimeTicks()) {
- DCHECK(FLAG_harmony_atomics_waitasync);
DCHECK(node->IsAsync());
}
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.h b/deps/v8/src/execution/ia32/frame-constants-ia32.h
index dde0611f275..45c7355979c 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/ia32/register-ia32.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -51,10 +52,13 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {eax, ecx, edx, esi, edi}
- static constexpr uint32_t kPushedGpRegs = 0b11000111;
- // {xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6}
- static constexpr uint32_t kPushedFpRegs = 0b01111111;
+ // Omit ebx, which is the root register.
+ static constexpr RegList kPushedGpRegs =
+ Register::ListOf(eax, ecx, edx, esi, edi);
+
+ // Omit xmm7, which is the kScratchDoubleReg.
+ static constexpr RegList kPushedFpRegs =
+ DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 28beea58ee6..96ea770e65b 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -7,6 +7,7 @@
#include "src/execution/isolate.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/js-function.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 2cc66a473cb..f199b525aab 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -13,26 +13,36 @@
namespace v8 {
namespace internal {
-inline constexpr IsolateRoot GetIsolateForPtrComprFromOnHeapAddress(
- Address address) {
-#ifdef V8_COMPRESS_POINTERS
- return IsolateRoot(GetIsolateRootAddress(address));
-#else
- return IsolateRoot();
-#endif // V8_COMPRESS_POINTERS
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+
+// Aliases for GetPtrComprCageBase when
+// V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE. Each Isolate has its own cage, whose
+// base address is also the Isolate root.
+V8_INLINE constexpr Address GetIsolateRootAddress(Address on_heap_addr) {
+ return GetPtrComprCageBaseAddress(on_heap_addr);
}
-inline IsolateRoot GetIsolateForPtrCompr(HeapObject object) {
- return GetIsolateForPtrComprFromOnHeapAddress(object.ptr());
+V8_INLINE Address GetIsolateRootAddress(PtrComprCageBase cage_base) {
+ return cage_base.address();
+}
+
+#else
+
+V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) { UNREACHABLE(); }
+
+V8_INLINE Address GetIsolateRootAddress(PtrComprCageBase cage_base) {
+ UNREACHABLE();
}
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
// Avoid using the below GetIsolateFromWritableObject because we want to be
// able to get the heap, but not the isolate, for off-thread objects.
#if defined V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object)->heap();
-#elif defined V8_COMPRESS_POINTERS
+#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
Isolate* isolate =
Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
@@ -47,7 +57,7 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
-#elif defined V8_COMPRESS_POINTERS
+#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
Isolate* isolate =
Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
index 2204b2cd965..c41788d9452 100644
--- a/deps/v8/src/execution/isolate-utils.h
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -10,11 +10,12 @@
namespace v8 {
namespace internal {
-// Computes isolate from any read only or writable heap object. The resulting
-// value is intended to be used only as a hoisted computation of isolate root
-// inside trivial accessors for optmizing value decompression.
-// When pointer compression is disabled this function always returns nullptr.
-V8_INLINE IsolateRoot GetIsolateForPtrCompr(HeapObject object);
+// Computes the pointer compression cage base from any read only or writable
+// heap object. The resulting value is intended to be used only as a hoisted
+// computation of cage base inside trivial accessors for optimizing value
+// decompression. When pointer compression is disabled this function always
+// returns nullptr.
+V8_INLINE PtrComprCageBase GetPtrComprCageBase(HeapObject object);
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 7fa6d7774da..6f133ece117 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -65,6 +65,7 @@
#include "src/objects/elements.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-weak-refs-inl.h"
@@ -80,7 +81,7 @@
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/embedded/embedded-file-writer.h"
+#include "src/snapshot/embedded/embedded-file-writer-interface.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/strings/string-builder-inl.h"
@@ -91,16 +92,19 @@
#include "src/utils/address-map.h"
#include "src/utils/ostreams.h"
#include "src/utils/version.h"
-#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/type-stats.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uobject.h"
#endif // V8_INTL_SUPPORT
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
#if defined(V8_OS_WIN64)
#include "src/diagnostics/unwinding-info-win64.h"
#endif // V8_OS_WIN64
@@ -503,22 +507,23 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- v->VisitRootPointer(Root::kTop, nullptr,
+ v->VisitRootPointer(Root::kStackRoots, nullptr,
FullObjectSlot(&thread->pending_exception_));
- v->VisitRootPointer(Root::kTop, nullptr,
+ v->VisitRootPointer(Root::kStackRoots, nullptr,
FullObjectSlot(&thread->pending_message_obj_));
- v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&thread->context_));
- v->VisitRootPointer(Root::kTop, nullptr,
+ v->VisitRootPointer(Root::kStackRoots, nullptr,
+ FullObjectSlot(&thread->context_));
+ v->VisitRootPointer(Root::kStackRoots, nullptr,
FullObjectSlot(&thread->scheduled_exception_));
for (v8::TryCatch* block = thread->try_catch_handler_; block != nullptr;
block = block->next_) {
// TODO(3770): Make TryCatch::exception_ an Address (and message_obj_ too).
v->VisitRootPointer(
- Root::kTop, nullptr,
+ Root::kStackRoots, nullptr,
FullObjectSlot(reinterpret_cast<Address>(&(block->exception_))));
v->VisitRootPointer(
- Root::kTop, nullptr,
+ Root::kStackRoots, nullptr,
FullObjectSlot(reinterpret_cast<Address>(&(block->message_obj_))));
}
@@ -528,7 +533,9 @@ void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
#endif
// Iterate over pointers on native execution stack.
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope wasm_code_ref_scope;
+#endif // V8_ENABLE_WEBASSEMBLY
for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
it.frame()->Iterate(v);
}
@@ -695,6 +702,7 @@ class StackTraceBuilder {
summary.code_offset(), flags, summary.parameters());
}
+#if V8_ENABLE_WEBASSEMBLY
void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
@@ -714,6 +722,7 @@ class StackTraceBuilder {
summary.code_offset(), flags,
isolate_->factory()->empty_fixed_array());
}
+#endif // V8_ENABLE_WEBASSEMBLY
void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
Handle<JSFunction> function(exit_frame->function(), isolate_);
@@ -729,7 +738,7 @@ class StackTraceBuilder {
Handle<Object> receiver(exit_frame->receiver(), isolate_);
Handle<Code> code(exit_frame->LookupCode(), isolate_);
const int offset =
- static_cast<int>(exit_frame->pc() - code->InstructionStart());
+ code->GetOffsetFromInstructionStart(isolate_, exit_frame->pc());
int flags = 0;
if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
@@ -1001,7 +1010,10 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
"CaptureStackTrace", "maxFrameCount", options.limit);
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
+#endif // V8_ENABLE_WEBASSEMBLY
+
StackTraceBuilder builder(isolate, options.skip_mode, options.limit, caller,
options.filter_mode);
@@ -1018,7 +1030,10 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
case StackFrame::INTERPRETED:
case StackFrame::BASELINE:
case StackFrame::BUILTIN:
- case StackFrame::WASM: {
+#if V8_ENABLE_WEBASSEMBLY
+ case StackFrame::WASM:
+#endif // V8_ENABLE_WEBASSEMBLY
+ {
// A standard frame may include many summarized frames (due to
// inlining).
std::vector<FrameSummary> frames;
@@ -1036,12 +1051,14 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
//=========================================================
auto const& java_script = summary.AsJavaScript();
builder.AppendJavaScriptFrame(java_script);
+#if V8_ENABLE_WEBASSEMBLY
} else if (summary.IsWasm()) {
//=========================================================
// Handle a Wasm frame.
//=========================================================
auto const& wasm = summary.AsWasm();
builder.AppendWasmFrame(wasm);
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
break;
@@ -1276,7 +1293,6 @@ static void PrintFrames(Isolate* isolate, StringStream* accumulator,
void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
HandleScope scope(this);
- wasm::WasmCodeRefScope wasm_code_ref_scope;
DCHECK(accumulator->IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
@@ -1689,7 +1705,6 @@ Object Isolate::UnwindAndFindHandler() {
// Special handling of termination exceptions, uncatchable by JavaScript and
// Wasm code, we unwind the handlers until the top ENTRY handler is found.
bool catchable_by_js = is_catchable_by_javascript(exception);
- bool catchable_by_wasm = is_catchable_by_wasm(exception);
// Compute handler and stack unwinding information by performing a full walk
// over the stack and dispatching according to the frame type.
@@ -1711,18 +1726,19 @@ Object Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code code = frame->LookupCode();
HandlerTable table(code);
- return FoundHandler(Context(), code.InstructionStart(),
+ return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
table.LookupReturn(0), code.constant_pool(),
handler->address() + StackHandlerConstants::kSize,
0);
}
+#if V8_ENABLE_WEBASSEMBLY
case StackFrame::C_WASM_ENTRY: {
StackHandler* handler = frame->top_handler();
thread_local_top()->handler_ = handler->next_address();
Code code = frame->LookupCode();
HandlerTable table(code);
- Address instruction_start = code.InstructionStart();
+ Address instruction_start = code.InstructionStart(this, frame->pc());
int return_offset = static_cast<int>(frame->pc() - instruction_start);
int handler_offset = table.LookupReturn(return_offset);
DCHECK_NE(-1, handler_offset);
@@ -1736,7 +1752,7 @@ Object Isolate::UnwindAndFindHandler() {
}
case StackFrame::WASM: {
- if (!catchable_by_wasm) break;
+ if (!is_catchable_by_wasm(exception)) break;
// For WebAssembly frames we perform a lookup in the handler table.
// This code ref scope is here to avoid a check failure when looking up
@@ -1768,6 +1784,7 @@ Object Isolate::UnwindAndFindHandler() {
DCHECK(FLAG_wasm_lazy_validation);
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
case StackFrame::OPTIMIZED: {
// For optimized frames we perform a lookup in the handler table.
@@ -1793,18 +1810,19 @@ Object Isolate::UnwindAndFindHandler() {
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(Context(), code.InstructionStart(), offset,
- code.constant_pool(), return_sp, frame->fp());
+ return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
+ offset, code.constant_pool(), return_sp,
+ frame->fp());
}
case StackFrame::STUB: {
// Some stubs are able to handle exceptions.
if (!catchable_by_js) break;
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
-#ifdef DEBUG
+#if defined(DEBUG) && V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
DCHECK_NULL(wasm_engine()->code_manager()->LookupCode(frame->pc()));
-#endif // DEBUG
+#endif // defined(DEBUG) && V8_ENABLE_WEBASSEMBLY
Code code = stub_frame->LookupCode();
if (!code.IsCode() || code.kind() != CodeKind::BUILTIN ||
!code.has_handler_table() || !code.is_turbofanned()) {
@@ -1820,8 +1838,9 @@ Object Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
code.stack_slots() * kSystemPointerSize;
- return FoundHandler(Context(), code.InstructionStart(), offset,
- code.constant_pool(), return_sp, frame->fp());
+ return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
+ offset, code.constant_pool(), return_sp,
+ frame->fp());
}
case StackFrame::INTERPRETED:
@@ -1859,8 +1878,9 @@ Object Isolate::UnwindAndFindHandler() {
// Patch the context register directly on the frame, so that we don't
// need to have a context read + write in the baseline code.
sp_frame->PatchContext(context);
- return FoundHandler(Context(), code.InstructionStart(), pc_offset,
- code.constant_pool(), return_sp, sp_frame->fp());
+ return FoundHandler(
+ Context(), code.InstructionStart(this, sp_frame->sp()), pc_offset,
+ code.constant_pool(), return_sp, sp_frame->fp());
} else {
InterpretedFrame::cast(js_frame)->PatchBytecodeOffset(
static_cast<int>(offset));
@@ -2113,7 +2133,9 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
std::vector<FrameSummary> frames;
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
+#endif // V8_ENABLE_WEBASSEMBLY
frame->Summarize(&frames);
FrameSummary& summary = frames.back();
Handle<SharedFunctionInfo> shared;
@@ -2529,19 +2551,27 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
}
bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
+#if V8_ENABLE_WEBASSEMBLY
if (wasm_simd_enabled_callback()) {
v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
return wasm_simd_enabled_callback()(api_context);
}
return FLAG_experimental_wasm_simd;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
bool Isolate::AreWasmExceptionsEnabled(Handle<Context> context) {
+#if V8_ENABLE_WEBASSEMBLY
if (wasm_exceptions_enabled_callback()) {
v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
return wasm_exceptions_enabled_callback()(api_context);
}
return FLAG_experimental_wasm_eh;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
Handle<Context> Isolate::GetIncumbentContext() {
@@ -2636,12 +2666,23 @@ void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
destructor->next_ = nullptr;
}
+#if V8_ENABLE_WEBASSEMBLY
void Isolate::SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine) {
DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
wasm_engine_ = std::move(engine);
wasm_engine_->AddIsolate(this);
}
+void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
+ HandleScope scope(this);
+ Handle<WeakArrayList> shared_wasm_memories =
+ factory()->shared_wasm_memories();
+ shared_wasm_memories = WeakArrayList::AddToEnd(
+ this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
+ heap()->set_shared_wasm_memories(*shared_wasm_memories);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
// NOLINTNEXTLINE
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
#if defined(USE_SIMULATOR)
@@ -2821,8 +2862,8 @@ Isolate* Isolate::New() {
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
-#ifdef V8_COMPRESS_POINTERS
- DCHECK(IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ DCHECK(IsAligned(isolate->isolate_root(), kPtrComprCageBaseAlignment));
#endif
#ifdef DEBUG
@@ -2991,16 +3032,18 @@ void Isolate::Deinit() {
debug()->Unload();
+#if V8_ENABLE_WEBASSEMBLY
wasm_engine()->DeleteCompileJobsOnIsolate(this);
+ BackingStore::RemoveSharedWasmMemoryObjects(this);
+#endif // V8_ENABLE_WEBASSEMBLY
+
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
optimizing_compile_dispatcher_ = nullptr;
}
- BackingStore::RemoveSharedWasmMemoryObjects(this);
-
// Help sweeper threads complete sweeping to stop faster.
heap_.mark_compact_collector()->DrainSweepingWorklists();
heap_.mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
@@ -3054,10 +3097,12 @@ void Isolate::Deinit() {
FILE* logfile = logger_->TearDownAndGetLogFile();
if (logfile != nullptr) base::Fclose(logfile);
+#if V8_ENABLE_WEBASSEMBLY
if (wasm_engine_) {
wasm_engine_->RemoveIsolate(this);
wasm_engine_.reset();
}
+#endif // V8_ENABLE_WEBASSEMBLY
TearDownEmbeddedBlob();
@@ -3245,7 +3290,7 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
HandleScope scope(isolate);
Builtins* builtins = isolate->builtins();
- EmbeddedData d = EmbeddedData::FromBlob();
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
@@ -3333,15 +3378,32 @@ void Isolate::CreateAndSetEmbeddedBlob() {
SetStickyEmbeddedBlob(code, code_size, data, data_size);
}
+ MaybeRemapEmbeddedBuiltinsIntoCodeRange();
+
CreateOffHeapTrampolines(this);
}
+void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
+ if (!is_short_builtin_calls_enabled() || !RequiresCodeRange()) return;
+
+ CHECK_NOT_NULL(embedded_blob_code_);
+ CHECK_NE(embedded_blob_code_size_, 0);
+
+ embedded_blob_code_ = heap_.RemapEmbeddedBuiltinsIntoCodeRange(
+ embedded_blob_code_, embedded_blob_code_size_);
+ CHECK_NOT_NULL(embedded_blob_code_);
+ // The un-embedded code blob is already a part of the registered code range
+ // so it's not necessary to register it again.
+}
+
void Isolate::TearDownEmbeddedBlob() {
// Nothing to do in case the blob is embedded into the binary or unset.
if (StickyEmbeddedBlobCode() == nullptr) return;
- CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
+ if (!is_short_builtin_calls_enabled()) {
+ CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
+ CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
+ }
CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
@@ -3350,8 +3412,10 @@ void Isolate::TearDownEmbeddedBlob() {
if (current_embedded_blob_refs_ == 0 && enable_embedded_blob_refcounting_) {
// We own the embedded blob and are the last holder. Free it.
InstructionStream::FreeOffHeapInstructionStream(
- const_cast<uint8_t*>(embedded_blob_code()), embedded_blob_code_size(),
- const_cast<uint8_t*>(embedded_blob_data()), embedded_blob_data_size());
+ const_cast<uint8_t*>(CurrentEmbeddedBlobCode()),
+ embedded_blob_code_size(),
+ const_cast<uint8_t*>(CurrentEmbeddedBlobData()),
+ embedded_blob_data_size());
ClearEmbeddedBlob();
}
}
@@ -3481,17 +3545,26 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
heap_.SetUpSpaces();
+ if (V8_SHORT_BUILTIN_CALLS_BOOL && FLAG_short_builtin_calls) {
+ // Check if the system has more than 4GB of physical memory by comaring
+ // the old space size with respective threshod value.
+ is_short_builtin_calls_enabled_ =
+ heap_.MaxOldGenerationSize() >= kShortBuiltinCallsOldSpaceSizeThreshold;
+ }
+
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
isolate_data_.external_reference_table()->Init(this);
+#if V8_ENABLE_WEBASSEMBLY
// Setup the wasm engine.
if (wasm_engine_ == nullptr) {
SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
}
DCHECK_NOT_NULL(wasm_engine_);
+#endif // V8_ENABLE_WEBASSEMBLY
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
@@ -3542,6 +3615,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
CreateAndSetEmbeddedBlob();
} else {
setup_delegate_->SetupBuiltins(this);
+ MaybeRemapEmbeddedBuiltinsIntoCodeRange();
}
// Initialize custom memcopy and memmove functions (must happen after
@@ -3764,11 +3838,13 @@ void Isolate::DumpAndResetStats() {
delete turbo_statistics_;
turbo_statistics_ = nullptr;
}
+#if V8_ENABLE_WEBASSEMBLY
// TODO(7424): There is no public API for the {WasmEngine} yet. So for now we
// just dump and reset the engines statistics together with the Isolate.
if (FLAG_turbo_stats_wasm) {
wasm_engine()->DumpAndResetTurboStatistics();
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
counters()->worker_thread_runtime_call_stats()->AddToMainTable(
@@ -4045,23 +4121,19 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
}
}
-void Isolate::UpdatePromiseHookProtector() {
- if (Protectors::IsPromiseHookIntact(this)) {
+void Isolate::PromiseHookStateUpdated() {
+ bool promise_hook_or_async_event_delegate =
+ promise_hook_ || async_event_delegate_;
+ bool promise_hook_or_debug_is_active_or_async_event_delegate =
+ promise_hook_or_async_event_delegate || debug()->is_active();
+ if (promise_hook_or_debug_is_active_or_async_event_delegate &&
+ Protectors::IsPromiseHookIntact(this)) {
HandleScope scope(this);
Protectors::InvalidatePromiseHook(this);
}
-}
-
-void Isolate::PromiseHookStateUpdated() {
- promise_hook_flags_ =
- (promise_hook_flags_ & PromiseHookFields::HasContextPromiseHook::kMask) |
- PromiseHookFields::HasIsolatePromiseHook::encode(promise_hook_) |
- PromiseHookFields::HasAsyncEventDelegate::encode(async_event_delegate_) |
- PromiseHookFields::IsDebugActive::encode(debug()->is_active());
-
- if (promise_hook_flags_ != 0) {
- UpdatePromiseHookProtector();
- }
+ promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
+ promise_hook_or_debug_is_active_or_async_event_delegate_ =
+ promise_hook_or_debug_is_active_or_async_event_delegate;
}
namespace {
@@ -4361,30 +4433,17 @@ void Isolate::SetPromiseHook(PromiseHook hook) {
PromiseHookStateUpdated();
}
-void Isolate::RunAllPromiseHooks(PromiseHookType type,
- Handle<JSPromise> promise,
- Handle<Object> parent) {
- if (HasContextPromiseHooks()) {
- native_context()->RunPromiseHook(type, promise, parent);
- }
- if (HasIsolatePromiseHooks() || HasAsyncEventDelegate()) {
- RunPromiseHook(type, promise, parent);
- }
-}
-
void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent) {
RunPromiseHookForAsyncEventDelegate(type, promise);
- if (!HasIsolatePromiseHooks()) return;
- DCHECK(promise_hook_ != nullptr);
+ if (promise_hook_ == nullptr) return;
promise_hook_(type, v8::Utils::PromiseToLocal(promise),
v8::Utils::ToLocal(parent));
}
void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise) {
- if (!HasAsyncEventDelegate()) return;
- DCHECK(async_event_delegate_ != nullptr);
+ if (!async_event_delegate_) return;
switch (type) {
case PromiseHookType::kResolve:
return;
@@ -4515,15 +4574,6 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
heap()->set_detached_contexts(*detached_contexts);
}
-void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
- HandleScope scope(this);
- Handle<WeakArrayList> shared_wasm_memories =
- factory()->shared_wasm_memories();
- shared_wasm_memories = WeakArrayList::AddToEnd(
- this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
- heap()->set_shared_wasm_memories(*shared_wasm_memories);
-}
-
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
@@ -4567,6 +4617,11 @@ double Isolate::LoadStartTimeMs() {
return load_start_time_ms_;
}
+void Isolate::UpdateLoadStartTime() {
+ base::MutexGuard guard(&rail_mutex_);
+ load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+}
+
void Isolate::SetRAILMode(RAILMode rail_mode) {
RAILMode old_rail_mode = rail_mode_.load();
if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index e08195674fd..61934d56190 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -465,7 +465,9 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info) \
V(int, embedder_wrapper_type_index, -1) \
V(int, embedder_wrapper_object_index, -1) \
- V(compiler::NodeObserver*, node_observer, nullptr)
+ V(compiler::NodeObserver*, node_observer, nullptr) \
+ /* Used in combination with --script-run-delay-once */ \
+ V(bool, did_run_script_delay, false)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
@@ -631,24 +633,31 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Mutex for serializing access to break control structures.
base::RecursiveMutex* break_access() { return &break_access_; }
- // Shared mutex for allowing concurrent read/writes to FeedbackVectors.
+ // Shared mutex for allowing thread-safe concurrent reads of FeedbackVectors.
base::SharedMutex* feedback_vector_access() {
return &feedback_vector_access_;
}
- // Shared mutex for allowing concurrent read/writes to Strings.
- base::SharedMutex* string_access() { return &string_access_; }
+ // Shared mutex for allowing thread-safe concurrent reads of
+ // InternalizedStrings.
+ base::SharedMutex* internalized_string_access() {
+ return &internalized_string_access_;
+ }
- // Shared mutex for allowing concurrent read/writes to TransitionArrays.
- base::SharedMutex* transition_array_access() {
- return &transition_array_access_;
+ // Shared mutex for allowing thread-safe concurrent reads of TransitionArrays
+ // of kind kFullTransitionArray.
+ base::SharedMutex* full_transition_array_access() {
+ return &full_transition_array_access_;
}
- // Shared mutex for allowing concurrent read/writes to SharedFunctionInfos.
+ // Shared mutex for allowing thread-safe concurrent reads of
+ // SharedFunctionInfos.
base::SharedMutex* shared_function_info_access() {
return &shared_function_info_access_;
}
+ base::SharedMutex* map_updater_access() { return &map_updater_access_; }
+
// The isolate's string table.
StringTable* string_table() { return string_table_.get(); }
@@ -1428,21 +1437,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#endif
- void SetHasContextPromiseHooks(bool context_promise_hook) {
- promise_hook_flags_ = PromiseHookFields::HasContextPromiseHook::update(
- promise_hook_flags_, context_promise_hook);
- PromiseHookStateUpdated();
- }
-
- bool HasContextPromiseHooks() const {
- return PromiseHookFields::HasContextPromiseHook::decode(
- promise_hook_flags_);
- }
-
- Address promise_hook_flags_address() {
- return reinterpret_cast<Address>(&promise_hook_flags_);
- }
-
Address promise_hook_address() {
return reinterpret_cast<Address>(&promise_hook_);
}
@@ -1451,6 +1445,15 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return reinterpret_cast<Address>(&async_event_delegate_);
}
+ Address promise_hook_or_async_event_delegate_address() {
+ return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
+ }
+
+ Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
+ return reinterpret_cast<Address>(
+ &promise_hook_or_debug_is_active_or_async_event_delegate_);
+ }
+
Address handle_scope_implementer_address() {
return reinterpret_cast<Address>(&handle_scope_implementer_);
}
@@ -1466,16 +1469,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void SetPromiseHook(PromiseHook hook);
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent);
- void RunAllPromiseHooks(PromiseHookType type, Handle<JSPromise> promise,
- Handle<Object> parent);
- void UpdatePromiseHookProtector();
void PromiseHookStateUpdated();
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
- void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
-
std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
bool IsGeneratingEmbeddedBuiltins() const {
@@ -1504,6 +1502,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const uint8_t* embedded_blob_data() const;
uint32_t embedded_blob_data_size() const;
+ // Returns true if short bultin calls optimization is enabled for the Isolate.
+ bool is_short_builtin_calls_enabled() const {
+ return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
+ }
+
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
}
@@ -1623,6 +1626,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
double LoadStartTimeMs();
+ void UpdateLoadStartTime();
+
void IsolateInForegroundNotification();
void IsolateInBackgroundNotification();
@@ -1651,9 +1656,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
elements_deletion_counter_ = value;
}
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
+ void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
+#endif // V8_ENABLE_WEBASSEMBLY
+
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
return top_backup_incumbent_scope_;
}
@@ -1706,13 +1715,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#endif
- struct PromiseHookFields {
- using HasContextPromiseHook = base::BitField<bool, 0, 1>;
- using HasIsolatePromiseHook = HasContextPromiseHook::Next<bool, 1>;
- using HasAsyncEventDelegate = HasIsolatePromiseHook::Next<bool, 1>;
- using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
- };
-
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
@@ -1796,16 +1798,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise);
- bool HasIsolatePromiseHooks() const {
- return PromiseHookFields::HasIsolatePromiseHook::decode(
- promise_hook_flags_);
- }
-
- bool HasAsyncEventDelegate() const {
- return PromiseHookFields::HasAsyncEventDelegate::decode(
- promise_hook_flags_);
- }
-
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1847,9 +1839,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
base::SharedMutex feedback_vector_access_;
- base::SharedMutex string_access_;
- base::SharedMutex transition_array_access_;
+ base::SharedMutex internalized_string_access_;
+ base::SharedMutex full_transition_array_access_;
base::SharedMutex shared_function_info_access_;
+ base::SharedMutex map_updater_access_;
Logger* logger_ = nullptr;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
@@ -1937,9 +1930,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_ = false;
- // TODO(ishell): remove
- // True if ES2015 tail call elimination feature is enabled.
- bool is_tail_call_elimination_enabled_ = true;
+ // True if short bultin calls optimization is enabled.
+ bool is_short_builtin_calls_enabled_ = false;
// True if the isolate is in background. This flag is used
// to prioritize between memory usage and latency.
@@ -2037,8 +2029,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void InitializeDefaultEmbeddedBlob();
void CreateAndSetEmbeddedBlob();
+ void MaybeRemapEmbeddedBuiltinsIntoCodeRange();
void TearDownEmbeddedBlob();
-
void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
const uint8_t* data, uint32_t data_size);
void ClearEmbeddedBlob();
@@ -2058,7 +2050,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
debug::ConsoleDelegate* console_delegate_ = nullptr;
debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
- uint32_t promise_hook_flags_ = 0;
+ bool promise_hook_or_async_event_delegate_ = false;
+ bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
int async_task_count_ = 0;
std::unique_ptr<LocalIsolate> main_thread_local_isolate_;
@@ -2075,7 +2068,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
size_t elements_deletion_counter_ = 0;
+#if V8_ENABLE_WEBASSEMBLY
std::shared_ptr<wasm::WasmEngine> wasm_engine_;
+#endif // V8_ENABLE_WEBASSEMBLY
std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
@@ -2219,13 +2214,21 @@ class StackLimitCheck {
Isolate* isolate_;
};
-#define STACK_CHECK(isolate, result_value) \
- do { \
- StackLimitCheck stack_check(isolate); \
- if (stack_check.HasOverflowed()) { \
- isolate->StackOverflow(); \
- return result_value; \
- } \
+// This macro may be used in context that disallows JS execution.
+// That is why it checks only for a stack overflow and termination.
+#define STACK_CHECK(isolate, result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (stack_check.InterruptRequested()) { \
+ if (stack_check.HasOverflowed()) { \
+ isolate->StackOverflow(); \
+ return result_value; \
+ } \
+ if (isolate->stack_guard()->HasTerminationRequest()) { \
+ isolate->TerminateExecution(); \
+ return result_value; \
+ } \
+ } \
} while (false)
class StackTraceFailureMessage {
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index 77733907f82..20a4344cfae 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -12,7 +12,8 @@
namespace v8 {
namespace internal {
-LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
+LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
+ RuntimeCallStats* runtime_call_stats)
: HiddenLocalFactory(isolate),
heap_(isolate->heap(), kind),
isolate_(isolate),
@@ -20,7 +21,8 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
thread_id_(ThreadId::Current()),
stack_limit_(kind == ThreadKind::kMain
? isolate->stack_guard()->real_climit()
- : GetCurrentStackPosition() - FLAG_stack_size * KB) {}
+ : GetCurrentStackPosition() - FLAG_stack_size * KB),
+ runtime_call_stats_(runtime_call_stats) {}
LocalIsolate::~LocalIsolate() = default;
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index c55f8dc65e8..91192d5f0f1 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -19,6 +19,7 @@ namespace internal {
class Isolate;
class LocalLogger;
+class RuntimeCallStats;
// HiddenLocalFactory parallels Isolate's HiddenFactory
class V8_EXPORT_PRIVATE HiddenLocalFactory : private LocalFactory {
@@ -37,7 +38,8 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
public:
using HandleScopeType = LocalHandleScope;
- explicit LocalIsolate(Isolate* isolate, ThreadKind kind);
+ explicit LocalIsolate(Isolate* isolate, ThreadKind kind,
+ RuntimeCallStats* runtime_call_stats = nullptr);
~LocalIsolate();
// Kinda sketchy.
@@ -53,7 +55,9 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
inline Object root(RootIndex index) const;
StringTable* string_table() const { return isolate_->string_table(); }
- base::SharedMutex* string_access() { return isolate_->string_access(); }
+ base::SharedMutex* internalized_string_access() {
+ return isolate_->internalized_string_access();
+ }
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
@@ -82,6 +86,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalLogger* logger() const { return logger_.get(); }
ThreadId thread_id() const { return thread_id_; }
Address stack_limit() const { return stack_limit_; }
+ RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
bool is_main_thread() const { return heap_.is_main_thread(); }
@@ -99,6 +104,8 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
std::unique_ptr<LocalLogger> logger_;
ThreadId const thread_id_;
Address const stack_limit_;
+
+ RuntimeCallStats* runtime_call_stats_;
};
template <base::MutexSharedType kIsShared>
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index b0ac8222989..a8b7ad23ca4 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -23,8 +23,6 @@
#include "src/parsing/parsing.h"
#include "src/roots/roots.h"
#include "src/strings/string-builder-inl.h"
-#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -368,8 +366,6 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
Object);
- wasm::WasmCodeRefScope wasm_code_ref_scope;
-
for (int i = 0; i < elems->length(); ++i) {
builder.AppendCString("\n at ");
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 71596c561d5..7e9ca5c766b 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/ppc/register-ppc.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -42,10 +43,11 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {r3, r4, r5, r6, r7, r8, r9, r10, r11}
- static constexpr uint32_t kPushedGpRegs = 0b111111111000;
- // {d0 .. d12}
- static constexpr uint32_t kPushedFpRegs = 0b1111111111111;
+ static constexpr RegList kPushedGpRegs =
+ Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11);
+
+ static constexpr RegList kPushedFpRegs = DoubleRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 50a47de9aaa..c812af360ea 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -13,6 +13,7 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
+#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler.h"
@@ -1326,6 +1327,15 @@ void Simulator::SetCR0(intptr_t result, bool setSO) {
condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
}
+void Simulator::SetCR6(bool true_for_all, bool false_for_all) {
+ int32_t clear_cr6_mask = 0xFFFFFF0F;
+ if (true_for_all) {
+ condition_reg_ = (condition_reg_ & clear_cr6_mask) | 0x80;
+ } else if (false_for_all) {
+ condition_reg_ = (condition_reg_ & clear_cr6_mask) | 0x20;
+ }
+}
+
void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
int bo = instr->Bits(25, 21) << 21;
int condition_bit = instr->Bits(20, 16);
@@ -1378,6 +1388,129 @@ void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
}
}
+// Vector instruction helpers.
+#define GET_ADDRESS(a, b, a_val, b_val) \
+ intptr_t a_val = a == 0 ? 0 : get_register(a); \
+ intptr_t b_val = get_register(b);
+#define DECODE_VX_INSTRUCTION(d, a, b, source_or_target) \
+ int d = instr->R##source_or_target##Value(); \
+ int a = instr->RAValue(); \
+ int b = instr->RBValue();
+#define FOR_EACH_LANE(i, type) \
+ for (uint32_t i = 0; i < kSimd128Size / sizeof(type); i++)
+template <typename A, typename T, typename Operation>
+void VectorCompareOp(Simulator* sim, Instruction* instr, bool is_fp,
+ Operation op) {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ bool true_for_all = true, false_for_all = true;
+ FOR_EACH_LANE(i, A) {
+ A a_val = sim->get_simd_register_by_lane<A>(a, i);
+ A b_val = sim->get_simd_register_by_lane<A>(b, i);
+ T t_val = 0;
+ bool is_not_nan = is_fp ? !isnan(a_val) && !isnan(b_val) : true;
+ if (is_not_nan && op(a_val, b_val)) {
+ false_for_all = false;
+ t_val = -1; // Set all bits to 1 indicating true.
+ } else {
+ true_for_all = false;
+ }
+ sim->set_simd_register_by_lane<T>(t, i, t_val);
+ }
+ if (instr->Bit(10)) { // RC bit set.
+ sim->SetCR6(true_for_all, false_for_all);
+ }
+}
+
+template <typename S, typename T>
+void VectorConverFromFPSaturate(Simulator* sim, Instruction* instr, T min_val,
+ T max_val, bool even_lane_result = false) {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, S) {
+ T t_val;
+ double b_val = static_cast<double>(sim->get_simd_register_by_lane<S>(b, i));
+ if (isnan(b_val)) {
+ t_val = min_val;
+ } else {
+ // Round Towards Zero.
+ b_val = std::trunc(b_val);
+ if (b_val < min_val) {
+ t_val = min_val;
+ } else if (b_val > max_val) {
+ t_val = max_val;
+ } else {
+ t_val = static_cast<T>(b_val);
+ }
+ }
+ sim->set_simd_register_by_lane<T>(t, even_lane_result ? 2 * i : i, t_val);
+ }
+}
+
+template <typename S, typename T>
+void VectorPackSaturate(Simulator* sim, Instruction* instr, S min_val,
+ S max_val) {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ int src = a;
+ int count = 0;
+ S value = 0;
+ // Setup a temp array to avoid overwriting dst mid loop.
+ T temps[kSimd128Size / sizeof(T)] = {0};
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++, count++) {
+ if (count == kSimd128Size / sizeof(S)) {
+ src = b;
+ count = 0;
+ }
+ value = sim->get_simd_register_by_lane<S>(src, count);
+ if (value > max_val) {
+ value = max_val;
+ } else if (value < min_val) {
+ value = min_val;
+ }
+ temps[i] = static_cast<T>(value);
+ }
+ FOR_EACH_LANE(i, T) { sim->set_simd_register_by_lane<T>(t, i, temps[i]); }
+}
+
+template <typename T>
+T VSXFPMin(T x, T y) {
+ // Handle NaN.
+ // TODO(miladfarca): include the payload of src1.
+ if (std::isnan(x) && std::isnan(y)) return NAN;
+ // Handle +0 and -0.
+ if (std::signbit(x) < std::signbit(y)) return y;
+ if (std::signbit(y) < std::signbit(x)) return x;
+ return std::fmin(x, y);
+}
+
+template <typename T>
+T VSXFPMax(T x, T y) {
+ // Handle NaN.
+ // TODO(miladfarca): include the payload of src1.
+ if (std::isnan(x) && std::isnan(y)) return NAN;
+ // Handle +0 and -0.
+ if (std::signbit(x) < std::signbit(y)) return x;
+ if (std::signbit(y) < std::signbit(x)) return y;
+ return std::fmax(x, y);
+}
+
+float VMXFPMin(float x, float y) {
+ // Handle NaN.
+ if (std::isnan(x) || std::isnan(y)) return NAN;
+ // Handle +0 and -0.
+ if (std::signbit(x) < std::signbit(y)) return y;
+ if (std::signbit(y) < std::signbit(x)) return x;
+ return x < y ? x : y;
+}
+
+float VMXFPMax(float x, float y) {
+ // Handle NaN.
+ if (std::isnan(x) || std::isnan(y)) return NAN;
+ // Handle +0 and -0.
+ if (std::signbit(x) < std::signbit(y)) return x;
+ if (std::signbit(y) < std::signbit(x)) return y;
+ return x > y ? x : y;
+}
+
void Simulator::ExecuteGeneric(Instruction* instr) {
uint32_t opcode = instr->OpcodeBase();
switch (opcode) {
@@ -2297,10 +2430,17 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case MFVSRD: {
- DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
- int64_t frt_val = get_d_register(frt);
+ int64_t frt_val;
+ if (!instr->Bit(0)) {
+ // if double reg (TX=0).
+ frt_val = get_d_register(frt);
+ } else {
+ // if simd reg (TX=1).
+ DCHECK_EQ(instr->Bit(0), 1);
+ frt_val = get_simd_register_by_lane<int64_t>(frt, 0);
+ }
set_register(ra, frt_val);
break;
}
@@ -2313,11 +2453,28 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case MTVSRD: {
- DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
int64_t ra_val = get_register(ra);
- set_d_register(frt, ra_val);
+ if (!instr->Bit(0)) {
+ // if double reg (TX=0).
+ set_d_register(frt, ra_val);
+ } else {
+ // if simd reg (TX=1).
+ DCHECK_EQ(instr->Bit(0), 1);
+ set_simd_register_by_lane<int64_t>(frt, 0,
+ static_cast<int64_t>(ra_val));
+ }
+ break;
+ }
+ case MTVSRDD: {
+ int xt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ set_simd_register_by_lane<int64_t>(
+ xt, 0, static_cast<int64_t>(get_register(ra)));
+ set_simd_register_by_lane<int64_t>(
+ xt, 1, static_cast<int64_t>(get_register(rb)));
break;
}
case MTVSRWA: {
@@ -2780,6 +2937,16 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+ case LDBRX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t result = __builtin_bswap64(ReadDW(ra_val + rb_val));
+ set_register(rt, result);
+ break;
+ }
case STDX:
case STDUX: {
int rs = instr->RSValue();
@@ -3712,7 +3879,1033 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
-
+ case MTCRF: {
+ int rs = instr->RSValue();
+ uint32_t rs_val = static_cast<int32_t>(get_register(rs));
+ uint8_t fxm = instr->Bits(19, 12);
+ uint8_t bit_mask = 0x80;
+ const int field_bit_count = 4;
+ const int max_field_index = 7;
+ uint32_t result = 0;
+ for (int i = 0; i <= max_field_index; i++) {
+ result <<= field_bit_count;
+ uint32_t source = condition_reg_;
+ if ((bit_mask & fxm) != 0) {
+ // take it from rs.
+ source = rs_val;
+ }
+ result |= ((source << i * field_bit_count) >> i * field_bit_count) >>
+ (max_field_index - i) * field_bit_count;
+ bit_mask >>= 1;
+ }
+ condition_reg_ = result;
+ break;
+ }
+ // Vector instructions.
+ case LVX: {
+ DECODE_VX_INSTRUCTION(vrt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ intptr_t addr = (ra_val + rb_val) & 0xFFFFFFFFFFFFFFF0;
+ simdr_t* ptr = reinterpret_cast<simdr_t*>(addr);
+ set_simd_register(vrt, *ptr);
+ break;
+ }
+ case STVX: {
+ DECODE_VX_INSTRUCTION(vrs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ __int128 vrs_val =
+ *(reinterpret_cast<__int128*>(get_simd_register(vrs).int8));
+ WriteQW((ra_val + rb_val) & 0xFFFFFFFFFFFFFFF0, vrs_val);
+ break;
+ }
+ case LXVD: {
+ DECODE_VX_INSTRUCTION(xt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ set_simd_register_by_lane<int64_t>(xt, 0, ReadDW(ra_val + rb_val));
+ set_simd_register_by_lane<int64_t>(
+ xt, 1, ReadDW(ra_val + rb_val + kSystemPointerSize));
+ break;
+ }
+ case STXVD: {
+ DECODE_VX_INSTRUCTION(xs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ WriteDW(ra_val + rb_val, get_simd_register_by_lane<int64_t>(xs, 0));
+ WriteDW(ra_val + rb_val + kSystemPointerSize,
+ get_simd_register_by_lane<int64_t>(xs, 1));
+ break;
+ }
+ case LXSIBZX: {
+ DECODE_VX_INSTRUCTION(xt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ set_simd_register_by_lane<uint64_t>(xt, 0, ReadBU(ra_val + rb_val));
+ break;
+ }
+ case LXSIHZX: {
+ DECODE_VX_INSTRUCTION(xt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ set_simd_register_by_lane<uint64_t>(xt, 0, ReadHU(ra_val + rb_val));
+ break;
+ }
+ case LXSIWZX: {
+ DECODE_VX_INSTRUCTION(xt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ set_simd_register_by_lane<uint64_t>(xt, 0, ReadWU(ra_val + rb_val));
+ break;
+ }
+ case LXSDX: {
+ DECODE_VX_INSTRUCTION(xt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ set_simd_register_by_lane<int64_t>(xt, 0, ReadDW(ra_val + rb_val));
+ break;
+ }
+ case STXSIBX: {
+ DECODE_VX_INSTRUCTION(xs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ WriteB(ra_val + rb_val, get_simd_register_by_lane<int8_t>(xs, 7));
+ break;
+ }
+ case STXSIHX: {
+ DECODE_VX_INSTRUCTION(xs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ WriteH(ra_val + rb_val, get_simd_register_by_lane<int16_t>(xs, 3));
+ break;
+ }
+ case STXSIWX: {
+ DECODE_VX_INSTRUCTION(xs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ WriteW(ra_val + rb_val, get_simd_register_by_lane<int32_t>(xs, 1));
+ break;
+ }
+ case STXSDX: {
+ DECODE_VX_INSTRUCTION(xs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ WriteDW(ra_val + rb_val, get_simd_register_by_lane<int64_t>(xs, 0));
+ break;
+ }
+#define VSPLT(type) \
+ uint32_t uim = instr->Bits(20, 16); \
+ int vrt = instr->RTValue(); \
+ int vrb = instr->RBValue(); \
+ type value = get_simd_register_by_lane<type>(vrb, uim); \
+ FOR_EACH_LANE(i, type) { set_simd_register_by_lane<type>(vrt, i, value); }
+ case VSPLTW: {
+ VSPLT(int32_t)
+ break;
+ }
+ case VSPLTH: {
+ VSPLT(int16_t)
+ break;
+ }
+ case VSPLTB: {
+ VSPLT(int8_t)
+ break;
+ }
+ case XXSPLTIB: {
+ int8_t imm8 = instr->Bits(18, 11);
+ int t = instr->RTValue();
+ FOR_EACH_LANE(i, int8_t) {
+ set_simd_register_by_lane<int8_t>(t, i, imm8);
+ }
+ break;
+ }
+#undef VSPLT
+#define VINSERT(type, element) \
+ uint32_t uim = static_cast<uint32_t>(instr->Bits(20, 16)) / sizeof(type); \
+ int vrt = instr->RTValue(); \
+ int vrb = instr->RBValue(); \
+ set_simd_register_by_lane<type>( \
+ vrt, uim, get_simd_register_by_lane<type>(vrb, element));
+ case VINSERTD: {
+ VINSERT(int64_t, 0)
+ break;
+ }
+ case VINSERTW: {
+ VINSERT(int32_t, 1)
+ break;
+ }
+ case VINSERTH: {
+ VINSERT(int16_t, 3)
+ break;
+ }
+ case VINSERTB: {
+ VINSERT(int8_t, 7)
+ break;
+ }
+#undef VINSERT
+#define VEXTRACT(type, element) \
+ uint32_t uim = static_cast<uint32_t>(instr->Bits(20, 16)) / sizeof(type); \
+ int vrt = instr->RTValue(); \
+ int vrb = instr->RBValue(); \
+ type val = get_simd_register_by_lane<type>(vrb, uim); \
+ set_simd_register_by_lane<uint64_t>(vrt, 0, 0); \
+ set_simd_register_by_lane<uint64_t>(vrt, 1, 0); \
+ set_simd_register_by_lane<type>(vrt, element, val);
+ case VEXTRACTD: {
+ VEXTRACT(uint64_t, 0)
+ break;
+ }
+ case VEXTRACTUW: {
+ VEXTRACT(uint32_t, 1)
+ break;
+ }
+ case VEXTRACTUH: {
+ VEXTRACT(uint16_t, 3)
+ break;
+ }
+ case VEXTRACTUB: {
+ VEXTRACT(uint8_t, 7)
+ break;
+ }
+#undef VEXTRACT
+#define VECTOR_LOGICAL_OP(expr) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, int64_t) { \
+ int64_t a_val = get_simd_register_by_lane<int64_t>(a, i); \
+ int64_t b_val = get_simd_register_by_lane<int64_t>(b, i); \
+ set_simd_register_by_lane<int64_t>(t, i, expr); \
+ }
+ case VAND: {
+ VECTOR_LOGICAL_OP(a_val & b_val)
+ break;
+ }
+ case VANDC: {
+ VECTOR_LOGICAL_OP(a_val & (~b_val))
+ break;
+ }
+ case VOR: {
+ VECTOR_LOGICAL_OP(a_val | b_val)
+ break;
+ }
+ case VNOR: {
+ VECTOR_LOGICAL_OP(~(a_val | b_val))
+ break;
+ }
+ case VXOR: {
+ VECTOR_LOGICAL_OP(a_val ^ b_val)
+ break;
+ }
+#undef VECTOR_LOGICAL_OP
+#define VECTOR_ARITHMETIC_OP(type, op) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, type) { \
+ set_simd_register_by_lane<type>( \
+ t, i, \
+ get_simd_register_by_lane<type>(a, i) \
+ op get_simd_register_by_lane<type>(b, i)); \
+ }
+ case XVADDDP: {
+ VECTOR_ARITHMETIC_OP(double, +)
+ break;
+ }
+ case XVSUBDP: {
+ VECTOR_ARITHMETIC_OP(double, -)
+ break;
+ }
+ case XVMULDP: {
+ VECTOR_ARITHMETIC_OP(double, *)
+ break;
+ }
+ case XVDIVDP: {
+ VECTOR_ARITHMETIC_OP(double, /)
+ break;
+ }
+ case VADDFP: {
+ VECTOR_ARITHMETIC_OP(float, +)
+ break;
+ }
+ case VSUBFP: {
+ VECTOR_ARITHMETIC_OP(float, -)
+ break;
+ }
+ case XVMULSP: {
+ VECTOR_ARITHMETIC_OP(float, *)
+ break;
+ }
+ case XVDIVSP: {
+ VECTOR_ARITHMETIC_OP(float, /)
+ break;
+ }
+ case VADDUDM: {
+ VECTOR_ARITHMETIC_OP(int64_t, +)
+ break;
+ }
+ case VSUBUDM: {
+ VECTOR_ARITHMETIC_OP(int64_t, -)
+ break;
+ }
+ case VADDUWM: {
+ VECTOR_ARITHMETIC_OP(int32_t, +)
+ break;
+ }
+ case VSUBUWM: {
+ VECTOR_ARITHMETIC_OP(int32_t, -)
+ break;
+ }
+ case VMULUWM: {
+ VECTOR_ARITHMETIC_OP(int32_t, *)
+ break;
+ }
+ case VADDUHM: {
+ VECTOR_ARITHMETIC_OP(int16_t, +)
+ break;
+ }
+ case VSUBUHM: {
+ VECTOR_ARITHMETIC_OP(int16_t, -)
+ break;
+ }
+ case VADDUBM: {
+ VECTOR_ARITHMETIC_OP(int8_t, +)
+ break;
+ }
+ case VSUBUBM: {
+ VECTOR_ARITHMETIC_OP(int8_t, -)
+ break;
+ }
+#define VECTOR_MULTIPLY_EVEN_ODD(input_type, result_type, is_odd) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ size_t i = 0, j = 0, k = 0; \
+ size_t lane_size = sizeof(input_type); \
+ if (is_odd) { \
+ i = 1; \
+ j = lane_size; \
+ } \
+ for (; j < kSimd128Size; i += 2, j += lane_size * 2, k++) { \
+ result_type src0 = \
+ static_cast<result_type>(get_simd_register_by_lane<input_type>(a, i)); \
+ result_type src1 = \
+ static_cast<result_type>(get_simd_register_by_lane<input_type>(b, i)); \
+ set_simd_register_by_lane<result_type>(t, k, src0 * src1); \
+ }
+ case VMULEUB: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint8_t, uint16_t, false)
+ break;
+ }
+ case VMULESB: {
+ VECTOR_MULTIPLY_EVEN_ODD(int8_t, int16_t, false)
+ break;
+ }
+ case VMULOUB: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint8_t, uint16_t, true)
+ break;
+ }
+ case VMULOSB: {
+ VECTOR_MULTIPLY_EVEN_ODD(int8_t, int16_t, true)
+ break;
+ }
+ case VMULEUH: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint16_t, uint32_t, false)
+ break;
+ }
+ case VMULESH: {
+ VECTOR_MULTIPLY_EVEN_ODD(int16_t, int32_t, false)
+ break;
+ }
+ case VMULOUH: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint16_t, uint32_t, true)
+ break;
+ }
+ case VMULOSH: {
+ VECTOR_MULTIPLY_EVEN_ODD(int16_t, int32_t, true)
+ break;
+ }
+ case VMULEUW: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint32_t, uint64_t, false)
+ break;
+ }
+ case VMULESW: {
+ VECTOR_MULTIPLY_EVEN_ODD(int32_t, int64_t, false)
+ break;
+ }
+ case VMULOUW: {
+ VECTOR_MULTIPLY_EVEN_ODD(uint32_t, uint64_t, true)
+ break;
+ }
+ case VMULOSW: {
+ VECTOR_MULTIPLY_EVEN_ODD(int32_t, int64_t, true)
+ break;
+ }
+#undef VECTOR_MULTIPLY_EVEN_ODD
+#define VECTOR_MERGE(type, is_low_side) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ constexpr size_t index_limit = (kSimd128Size / sizeof(type)) / 2; \
+ for (size_t i = 0, source_index = is_low_side ? i + index_limit : i; \
+ i < index_limit; i++, source_index++) { \
+ set_simd_register_by_lane<type>( \
+ t, 2 * i, get_simd_register_by_lane<type>(a, source_index)); \
+ set_simd_register_by_lane<type>( \
+ t, (2 * i) + 1, get_simd_register_by_lane<type>(b, source_index)); \
+ }
+ case VMRGLW: {
+ VECTOR_MERGE(int32_t, true)
+ break;
+ }
+ case VMRGHW: {
+ VECTOR_MERGE(int32_t, false)
+ break;
+ }
+ case VMRGLH: {
+ VECTOR_MERGE(int16_t, true)
+ break;
+ }
+ case VMRGHH: {
+ VECTOR_MERGE(int16_t, false)
+ break;
+ }
+#undef VECTOR_MERGE
+#undef VECTOR_ARITHMETIC_OP
+#define VECTOR_MIN_MAX_OP(type, op) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, type) { \
+ type a_val = get_simd_register_by_lane<type>(a, i); \
+ type b_val = get_simd_register_by_lane<type>(b, i); \
+ set_simd_register_by_lane<type>(t, i, a_val op b_val ? a_val : b_val); \
+ }
+ case XVMINDP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ FOR_EACH_LANE(i, double) {
+ double a_val = get_simd_register_by_lane<double>(a, i);
+ double b_val = get_simd_register_by_lane<double>(b, i);
+ set_simd_register_by_lane<double>(t, i, VSXFPMin<double>(a_val, b_val));
+ }
+ break;
+ }
+ case XVMAXDP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ FOR_EACH_LANE(i, double) {
+ double a_val = get_simd_register_by_lane<double>(a, i);
+ double b_val = get_simd_register_by_lane<double>(b, i);
+ set_simd_register_by_lane<double>(t, i, VSXFPMax<double>(a_val, b_val));
+ }
+ break;
+ }
+ case VMINFP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ FOR_EACH_LANE(i, float) {
+ float a_val = get_simd_register_by_lane<float>(a, i);
+ float b_val = get_simd_register_by_lane<float>(b, i);
+ set_simd_register_by_lane<float>(t, i, VMXFPMin(a_val, b_val));
+ }
+ break;
+ }
+ case VMAXFP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ FOR_EACH_LANE(i, float) {
+ float a_val = get_simd_register_by_lane<float>(a, i);
+ float b_val = get_simd_register_by_lane<float>(b, i);
+ set_simd_register_by_lane<float>(t, i, VMXFPMax(a_val, b_val));
+ }
+ break;
+ }
+ case VMINSD: {
+ VECTOR_MIN_MAX_OP(int64_t, <)
+ break;
+ }
+ case VMINUD: {
+ VECTOR_MIN_MAX_OP(uint64_t, <)
+ break;
+ }
+ case VMINSW: {
+ VECTOR_MIN_MAX_OP(int32_t, <)
+ break;
+ }
+ case VMINUW: {
+ VECTOR_MIN_MAX_OP(uint32_t, <)
+ break;
+ }
+ case VMINSH: {
+ VECTOR_MIN_MAX_OP(int16_t, <)
+ break;
+ }
+ case VMINUH: {
+ VECTOR_MIN_MAX_OP(uint16_t, <)
+ break;
+ }
+ case VMINSB: {
+ VECTOR_MIN_MAX_OP(int8_t, <)
+ break;
+ }
+ case VMINUB: {
+ VECTOR_MIN_MAX_OP(uint8_t, <)
+ break;
+ }
+ case VMAXSD: {
+ VECTOR_MIN_MAX_OP(int64_t, >)
+ break;
+ }
+ case VMAXUD: {
+ VECTOR_MIN_MAX_OP(uint64_t, >)
+ break;
+ }
+ case VMAXSW: {
+ VECTOR_MIN_MAX_OP(int32_t, >)
+ break;
+ }
+ case VMAXUW: {
+ VECTOR_MIN_MAX_OP(uint32_t, >)
+ break;
+ }
+ case VMAXSH: {
+ VECTOR_MIN_MAX_OP(int16_t, >)
+ break;
+ }
+ case VMAXUH: {
+ VECTOR_MIN_MAX_OP(uint16_t, >)
+ break;
+ }
+ case VMAXSB: {
+ VECTOR_MIN_MAX_OP(int8_t, >)
+ break;
+ }
+ case VMAXUB: {
+ VECTOR_MIN_MAX_OP(uint8_t, >)
+ break;
+ }
+#undef VECTOR_MIN_MAX_OP
+#define VECTOR_SHIFT_OP(type, op, mask) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, type) { \
+ set_simd_register_by_lane<type>( \
+ t, i, \
+ get_simd_register_by_lane<type>(a, i) \
+ op(get_simd_register_by_lane<type>(b, i) & mask)); \
+ }
+ case VSLD: {
+ VECTOR_SHIFT_OP(int64_t, <<, 0x3f)
+ break;
+ }
+ case VSRAD: {
+ VECTOR_SHIFT_OP(int64_t, >>, 0x3f)
+ break;
+ }
+ case VSRD: {
+ VECTOR_SHIFT_OP(uint64_t, >>, 0x3f)
+ break;
+ }
+ case VSLW: {
+ VECTOR_SHIFT_OP(int32_t, <<, 0x1f)
+ break;
+ }
+ case VSRAW: {
+ VECTOR_SHIFT_OP(int32_t, >>, 0x1f)
+ break;
+ }
+ case VSRW: {
+ VECTOR_SHIFT_OP(uint32_t, >>, 0x1f)
+ break;
+ }
+ case VSLH: {
+ VECTOR_SHIFT_OP(int16_t, <<, 0xf)
+ break;
+ }
+ case VSRAH: {
+ VECTOR_SHIFT_OP(int16_t, >>, 0xf)
+ break;
+ }
+ case VSRH: {
+ VECTOR_SHIFT_OP(uint16_t, >>, 0xf)
+ break;
+ }
+ case VSLB: {
+ VECTOR_SHIFT_OP(int8_t, <<, 0x7)
+ break;
+ }
+ case VSRAB: {
+ VECTOR_SHIFT_OP(int8_t, >>, 0x7)
+ break;
+ }
+ case VSRB: {
+ VECTOR_SHIFT_OP(uint8_t, >>, 0x7)
+ break;
+ }
+#undef VECTOR_SHIFT_OP
+#define VECTOR_COMPARE_OP(type_in, type_out, is_fp, op) \
+ VectorCompareOp<type_in, type_out>( \
+ this, instr, is_fp, [](type_in a, type_in b) { return a op b; });
+ case XVCMPEQDP: {
+ VECTOR_COMPARE_OP(double, int64_t, true, ==)
+ break;
+ }
+ case XVCMPGEDP: {
+ VECTOR_COMPARE_OP(double, int64_t, true, >=)
+ break;
+ }
+ case XVCMPGTDP: {
+ VECTOR_COMPARE_OP(double, int64_t, true, >)
+ break;
+ }
+ case XVCMPEQSP: {
+ VECTOR_COMPARE_OP(float, int32_t, true, ==)
+ break;
+ }
+ case XVCMPGESP: {
+ VECTOR_COMPARE_OP(float, int32_t, true, >=)
+ break;
+ }
+ case XVCMPGTSP: {
+ VECTOR_COMPARE_OP(float, int32_t, true, >)
+ break;
+ }
+ case VCMPEQUD: {
+ VECTOR_COMPARE_OP(uint64_t, int64_t, false, ==)
+ break;
+ }
+ case VCMPGTSD: {
+ VECTOR_COMPARE_OP(int64_t, int64_t, false, >)
+ break;
+ }
+ case VCMPGTUD: {
+ VECTOR_COMPARE_OP(uint64_t, int64_t, false, >)
+ break;
+ }
+ case VCMPEQUW: {
+ VECTOR_COMPARE_OP(uint32_t, int32_t, false, ==)
+ break;
+ }
+ case VCMPGTSW: {
+ VECTOR_COMPARE_OP(int32_t, int32_t, false, >)
+ break;
+ }
+ case VCMPGTUW: {
+ VECTOR_COMPARE_OP(uint32_t, int32_t, false, >)
+ break;
+ }
+ case VCMPEQUH: {
+ VECTOR_COMPARE_OP(uint16_t, int16_t, false, ==)
+ break;
+ }
+ case VCMPGTSH: {
+ VECTOR_COMPARE_OP(int16_t, int16_t, false, >)
+ break;
+ }
+ case VCMPGTUH: {
+ VECTOR_COMPARE_OP(uint16_t, int16_t, false, >)
+ break;
+ }
+ case VCMPEQUB: {
+ VECTOR_COMPARE_OP(uint8_t, int8_t, false, ==)
+ break;
+ }
+ case VCMPGTSB: {
+ VECTOR_COMPARE_OP(int8_t, int8_t, false, >)
+ break;
+ }
+ case VCMPGTUB: {
+ VECTOR_COMPARE_OP(uint8_t, int8_t, false, >)
+ break;
+ }
+#undef VECTOR_COMPARE_OP
+ case XVCVSPSXWS: {
+ VectorConverFromFPSaturate<float, int32_t>(this, instr, kMinInt, kMaxInt);
+ break;
+ }
+ case XVCVSPUXWS: {
+ VectorConverFromFPSaturate<float, uint32_t>(this, instr, 0, kMaxUInt32);
+ break;
+ }
+ case XVCVDPSXWS: {
+ VectorConverFromFPSaturate<double, int32_t>(this, instr, kMinInt, kMaxInt,
+ true);
+ break;
+ }
+ case XVCVDPUXWS: {
+ VectorConverFromFPSaturate<double, uint32_t>(this, instr, 0, kMaxUInt32,
+ true);
+ break;
+ }
+ case XVCVSXWSP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, int32_t) {
+ int32_t b_val = get_simd_register_by_lane<int32_t>(b, i);
+ set_simd_register_by_lane<float>(t, i, static_cast<float>(b_val));
+ }
+ break;
+ }
+ case XVCVUXWSP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, uint32_t) {
+ uint32_t b_val = get_simd_register_by_lane<uint32_t>(b, i);
+ set_simd_register_by_lane<float>(t, i, static_cast<float>(b_val));
+ }
+ break;
+ }
+ case XVCVSXDDP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, int64_t) {
+ int64_t b_val = get_simd_register_by_lane<int64_t>(b, i);
+ set_simd_register_by_lane<double>(t, i, static_cast<double>(b_val));
+ }
+ break;
+ }
+ case XVCVUXDDP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, uint64_t) {
+ uint64_t b_val = get_simd_register_by_lane<uint64_t>(b, i);
+ set_simd_register_by_lane<double>(t, i, static_cast<double>(b_val));
+ }
+ break;
+ }
+ case XVCVSPDP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, double) {
+ float b_val = get_simd_register_by_lane<float>(b, 2 * i);
+ set_simd_register_by_lane<double>(t, i, static_cast<double>(b_val));
+ }
+ break;
+ }
+ case XVCVDPSP: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, double) {
+ double b_val = get_simd_register_by_lane<double>(b, i);
+ set_simd_register_by_lane<float>(t, 2 * i, static_cast<float>(b_val));
+ }
+ break;
+ }
+#define VECTOR_UNPACK(S, D, if_high_side) \
+ int t = instr->RTValue(); \
+ int b = instr->RBValue(); \
+ constexpr size_t kItemCount = kSimd128Size / sizeof(D); \
+ D temps[kItemCount] = {0}; \
+ /* Avoid overwriting src if src and dst are the same register. */ \
+ FOR_EACH_LANE(i, D) { \
+ temps[i] = get_simd_register_by_lane<S>(b, i, if_high_side); \
+ } \
+ FOR_EACH_LANE(i, D) { \
+ set_simd_register_by_lane<D>(t, i, temps[i], if_high_side); \
+ }
+ case VUPKHSB: {
+ VECTOR_UNPACK(int8_t, int16_t, true)
+ break;
+ }
+ case VUPKHSH: {
+ VECTOR_UNPACK(int16_t, int32_t, true)
+ break;
+ }
+ case VUPKHSW: {
+ VECTOR_UNPACK(int32_t, int64_t, true)
+ break;
+ }
+ case VUPKLSB: {
+ VECTOR_UNPACK(int8_t, int16_t, false)
+ break;
+ }
+ case VUPKLSH: {
+ VECTOR_UNPACK(int16_t, int32_t, false)
+ break;
+ }
+ case VUPKLSW: {
+ VECTOR_UNPACK(int32_t, int64_t, false)
+ break;
+ }
+#undef VECTOR_UNPACK
+ case VPKSWSS: {
+ VectorPackSaturate<int32_t, int16_t>(this, instr, kMinInt16, kMaxInt16);
+ break;
+ }
+ case VPKSWUS: {
+ VectorPackSaturate<int32_t, uint16_t>(this, instr, 0, kMaxUInt16);
+ break;
+ }
+ case VPKSHSS: {
+ VectorPackSaturate<int16_t, int8_t>(this, instr, kMinInt8, kMaxInt8);
+ break;
+ }
+ case VPKSHUS: {
+ VectorPackSaturate<int16_t, uint8_t>(this, instr, 0, kMaxUInt8);
+ break;
+ }
+#define VECTOR_ADD_SUB_SATURATE(intermediate_type, result_type, op, min_val, \
+ max_val) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, result_type) { \
+ intermediate_type a_val = static_cast<intermediate_type>( \
+ get_simd_register_by_lane<result_type>(a, i)); \
+ intermediate_type b_val = static_cast<intermediate_type>( \
+ get_simd_register_by_lane<result_type>(b, i)); \
+ intermediate_type t_val = a_val op b_val; \
+ if (t_val > max_val) \
+ t_val = max_val; \
+ else if (t_val < min_val) \
+ t_val = min_val; \
+ set_simd_register_by_lane<result_type>(t, i, \
+ static_cast<result_type>(t_val)); \
+ }
+ case VADDSHS: {
+ VECTOR_ADD_SUB_SATURATE(int32_t, int16_t, +, kMinInt16, kMaxInt16)
+ break;
+ }
+ case VSUBSHS: {
+ VECTOR_ADD_SUB_SATURATE(int32_t, int16_t, -, kMinInt16, kMaxInt16)
+ break;
+ }
+ case VADDUHS: {
+ VECTOR_ADD_SUB_SATURATE(int32_t, uint16_t, +, 0, kMaxUInt16)
+ break;
+ }
+ case VSUBUHS: {
+ VECTOR_ADD_SUB_SATURATE(int32_t, uint16_t, -, 0, kMaxUInt16)
+ break;
+ }
+ case VADDSBS: {
+ VECTOR_ADD_SUB_SATURATE(int16_t, int8_t, +, kMinInt8, kMaxInt8)
+ break;
+ }
+ case VSUBSBS: {
+ VECTOR_ADD_SUB_SATURATE(int16_t, int8_t, -, kMinInt8, kMaxInt8)
+ break;
+ }
+ case VADDUBS: {
+ VECTOR_ADD_SUB_SATURATE(int16_t, uint8_t, +, 0, kMaxUInt8)
+ break;
+ }
+ case VSUBUBS: {
+ VECTOR_ADD_SUB_SATURATE(int16_t, uint8_t, -, 0, kMaxUInt8)
+ break;
+ }
+#undef VECTOR_ADD_SUB_SATURATE
+#define VECTOR_FP_ROUNDING(type, op) \
+ int t = instr->RTValue(); \
+ int b = instr->RBValue(); \
+ FOR_EACH_LANE(i, type) { \
+ type b_val = get_simd_register_by_lane<type>(b, i); \
+ set_simd_register_by_lane<type>(t, i, std::op(b_val)); \
+ }
+ case XVRDPIP: {
+ VECTOR_FP_ROUNDING(double, ceil)
+ break;
+ }
+ case XVRDPIM: {
+ VECTOR_FP_ROUNDING(double, floor)
+ break;
+ }
+ case XVRDPIZ: {
+ VECTOR_FP_ROUNDING(double, trunc)
+ break;
+ }
+ case XVRDPI: {
+ VECTOR_FP_ROUNDING(double, nearbyint)
+ break;
+ }
+ case XVRSPIP: {
+ VECTOR_FP_ROUNDING(float, ceilf)
+ break;
+ }
+ case XVRSPIM: {
+ VECTOR_FP_ROUNDING(float, floorf)
+ break;
+ }
+ case XVRSPIZ: {
+ VECTOR_FP_ROUNDING(float, truncf)
+ break;
+ }
+ case XVRSPI: {
+ VECTOR_FP_ROUNDING(float, nearbyintf)
+ break;
+ }
+#undef VECTOR_FP_ROUNDING
+ case VSEL: {
+ int vrt = instr->RTValue();
+ int vra = instr->RAValue();
+ int vrb = instr->RBValue();
+ int vrc = instr->RCValue();
+ FOR_EACH_LANE(i, int64_t) {
+ int64_t vra_val = get_simd_register_by_lane<int64_t>(vra, i);
+ int64_t vrb_val = get_simd_register_by_lane<int64_t>(vrb, i);
+ int64_t mask = get_simd_register_by_lane<int64_t>(vrc, i);
+ int64_t temp = vra_val ^ vrb_val;
+ temp = temp & mask;
+ set_simd_register_by_lane<int64_t>(vrt, i, temp ^ vra_val);
+ }
+ break;
+ }
+ case VPERM: {
+ int vrt = instr->RTValue();
+ int vra = instr->RAValue();
+ int vrb = instr->RBValue();
+ int vrc = instr->RCValue();
+ int8_t temp[kSimd128Size] = {0};
+ FOR_EACH_LANE(i, int8_t) {
+ int8_t lane_num = get_simd_register_by_lane<int8_t>(vrc, i);
+ // Get the five least significant bits.
+ lane_num = (lane_num << 3) >> 3;
+ int reg = vra;
+ if (lane_num >= kSimd128Size) {
+ lane_num = lane_num - kSimd128Size;
+ reg = vrb;
+ }
+ temp[i] = get_simd_register_by_lane<int8_t>(reg, lane_num);
+ }
+ FOR_EACH_LANE(i, int8_t) {
+ set_simd_register_by_lane<int8_t>(vrt, i, temp[i]);
+ }
+ break;
+ }
+ case VBPERMQ: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ uint16_t result_bits = 0;
+ unsigned __int128 src_bits =
+ *(reinterpret_cast<__int128*>(get_simd_register(a).int8));
+ for (int i = 0; i < kSimd128Size; i++) {
+ result_bits <<= 1;
+ uint8_t selected_bit_index = get_simd_register_by_lane<uint8_t>(b, i);
+ if (selected_bit_index < (kSimd128Size * kBitsPerByte)) {
+ unsigned __int128 bit_value = (src_bits << selected_bit_index) >>
+ (kSimd128Size * kBitsPerByte - 1);
+ result_bits |= bit_value;
+ }
+ }
+ set_simd_register_by_lane<uint64_t>(t, 0, 0);
+ set_simd_register_by_lane<uint64_t>(t, 1, 0);
+ set_simd_register_by_lane<uint16_t>(t, 3, result_bits);
+ break;
+ }
+#define VECTOR_FP_QF(type, sign) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, type) { \
+ type a_val = get_simd_register_by_lane<type>(a, i); \
+ type b_val = get_simd_register_by_lane<type>(b, i); \
+ type t_val = get_simd_register_by_lane<type>(t, i); \
+ type reuslt = sign * ((sign * b_val) + (a_val * t_val)); \
+ if (isinf(a_val)) reuslt = a_val; \
+ if (isinf(b_val)) reuslt = b_val; \
+ if (isinf(t_val)) reuslt = t_val; \
+ set_simd_register_by_lane<type>(t, i, reuslt); \
+ }
+ case XVMADDMDP: {
+ VECTOR_FP_QF(double, +1)
+ break;
+ }
+ case XVNMSUBMDP: {
+ VECTOR_FP_QF(double, -1)
+ break;
+ }
+ case XVMADDMSP: {
+ VECTOR_FP_QF(float, +1)
+ break;
+ }
+ case XVNMSUBMSP: {
+ VECTOR_FP_QF(float, -1)
+ break;
+ }
+#undef VECTOR_FP_QF
+ case VMHRADDSHS: {
+ int vrt = instr->RTValue();
+ int vra = instr->RAValue();
+ int vrb = instr->RBValue();
+ int vrc = instr->RCValue();
+ FOR_EACH_LANE(i, int16_t) {
+ int16_t vra_val = get_simd_register_by_lane<int16_t>(vra, i);
+ int16_t vrb_val = get_simd_register_by_lane<int16_t>(vrb, i);
+ int16_t vrc_val = get_simd_register_by_lane<int16_t>(vrc, i);
+ int32_t temp = vra_val * vrb_val;
+ temp = (temp + 0x00004000) >> 15;
+ temp += vrc_val;
+ if (temp > kMaxInt16)
+ temp = kMaxInt16;
+ else if (temp < kMinInt16)
+ temp = kMinInt16;
+ set_simd_register_by_lane<int16_t>(vrt, i, static_cast<int16_t>(temp));
+ }
+ break;
+ }
+ case VMSUMSHM: {
+ int vrt = instr->RTValue();
+ int vra = instr->RAValue();
+ int vrb = instr->RBValue();
+ int vrc = instr->RCValue();
+ FOR_EACH_LANE(i, int32_t) {
+ int16_t vra_1_val = get_simd_register_by_lane<int16_t>(vra, 2 * i);
+ int16_t vra_2_val =
+ get_simd_register_by_lane<int16_t>(vra, (2 * i) + 1);
+ int16_t vrb_1_val = get_simd_register_by_lane<int16_t>(vrb, 2 * i);
+ int16_t vrb_2_val =
+ get_simd_register_by_lane<int16_t>(vrb, (2 * i) + 1);
+ int32_t vrc_val = get_simd_register_by_lane<int32_t>(vrc, i);
+ int32_t temp1 = vra_1_val * vrb_1_val, temp2 = vra_2_val * vrb_2_val;
+ temp1 = temp1 + temp2 + vrc_val;
+ set_simd_register_by_lane<int32_t>(vrt, i, temp1);
+ }
+ break;
+ }
+#define VECTOR_UNARY_OP(type, op) \
+ int t = instr->RTValue(); \
+ int b = instr->RBValue(); \
+ FOR_EACH_LANE(i, type) { \
+ set_simd_register_by_lane<type>( \
+ t, i, op(get_simd_register_by_lane<type>(b, i))); \
+ }
+ case XVABSDP: {
+ VECTOR_UNARY_OP(double, std::abs)
+ break;
+ }
+ case XVNEGDP: {
+ VECTOR_UNARY_OP(double, -)
+ break;
+ }
+ case XVSQRTDP: {
+ VECTOR_UNARY_OP(double, std::sqrt)
+ break;
+ }
+ case XVABSSP: {
+ VECTOR_UNARY_OP(float, std::abs)
+ break;
+ }
+ case XVNEGSP: {
+ VECTOR_UNARY_OP(float, -)
+ break;
+ }
+ case XVSQRTSP: {
+ VECTOR_UNARY_OP(float, std::sqrt)
+ break;
+ }
+ case XVRESP: {
+ VECTOR_UNARY_OP(float, base::Recip)
+ break;
+ }
+ case XVRSQRTESP: {
+ VECTOR_UNARY_OP(float, base::RecipSqrt)
+ break;
+ }
+#undef VECTOR_UNARY_OP
+#define VECTOR_ROUNDING_AVERAGE(intermediate_type, result_type) \
+ DECODE_VX_INSTRUCTION(t, a, b, T) \
+ FOR_EACH_LANE(i, result_type) { \
+ intermediate_type a_val = static_cast<intermediate_type>( \
+ get_simd_register_by_lane<result_type>(a, i)); \
+ intermediate_type b_val = static_cast<intermediate_type>( \
+ get_simd_register_by_lane<result_type>(b, i)); \
+ intermediate_type t_val = ((a_val + b_val) + 1) >> 1; \
+ set_simd_register_by_lane<result_type>(t, i, \
+ static_cast<result_type>(t_val)); \
+ }
+ case VAVGUH: {
+ VECTOR_ROUNDING_AVERAGE(uint32_t, uint16_t)
+ break;
+ }
+ case VAVGUB: {
+ VECTOR_ROUNDING_AVERAGE(uint16_t, uint8_t)
+ break;
+ }
+#undef VECTOR_ROUNDING_AVERAGE
+ case VPOPCNTB: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ FOR_EACH_LANE(i, uint8_t) {
+ set_simd_register_by_lane<uint8_t>(
+ t, i,
+ base::bits::CountPopulation(
+ get_simd_register_by_lane<uint8_t>(b, i)));
+ }
+ break;
+ }
+#undef FOR_EACH_LANE
+#undef DECODE_VX_INSTRUCTION
+#undef GET_ADDRESS
default: {
UNIMPLEMENTED();
break;
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.h b/deps/v8/src/execution/ppc/simulator-ppc.h
index 83b61091d76..bacd844be05 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.h
+++ b/deps/v8/src/execution/ppc/simulator-ppc.h
@@ -126,7 +126,42 @@ class Simulator : public SimulatorBase {
d29,
d30,
d31,
- kNumFPRs = 32
+ kNumFPRs = 32,
+ // PPC Simd registers are a serapre set from Floating Point registers. Refer
+ // to register-ppc.h for more details.
+ v0 = 0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+ kNumSIMDRs = 32
};
explicit Simulator(Isolate* isolate);
@@ -204,7 +239,6 @@ class Simulator : public SimulatorBase {
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
- private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
@@ -287,10 +321,12 @@ class Simulator : public SimulatorBase {
}
}
-#define RW_VAR_LIST(V) \
- V(DWU, uint64_t) \
- V(DW, int64_t) \
- V(WU, uint32_t) \
+#define RW_VAR_LIST(V) \
+ V(QWU, unsigned __int128) \
+ V(QW, __int128) \
+ V(DWU, uint64_t) \
+ V(DW, int64_t) \
+ V(WU, uint32_t) \
V(W, int32_t) V(HU, uint16_t) V(H, int16_t) V(BU, uint8_t) V(B, int8_t)
#define GENERATE_RW_FUNC(size, type) \
@@ -304,6 +340,7 @@ class Simulator : public SimulatorBase {
void Trace(Instruction* instr);
void SetCR0(intptr_t result, bool setSO = false);
+ void SetCR6(bool true_for_all, bool false_for_all);
void ExecuteBranchConditional(Instruction* instr, BCType type);
void ExecuteGeneric(Instruction* instr);
@@ -342,6 +379,64 @@ class Simulator : public SimulatorBase {
int64_t fp_registers_[kNumFPRs];
+ // Simd registers.
+ union simdr_t {
+ int8_t int8[16];
+ uint8_t uint8[16];
+ int16_t int16[8];
+ uint16_t uint16[8];
+ int32_t int32[4];
+ uint32_t uint32[4];
+ int64_t int64[2];
+ uint64_t uint64[2];
+ float f32[4];
+ double f64[2];
+ };
+ simdr_t simd_registers_[kNumSIMDRs];
+
+ // Vector register lane numbers on IBM machines are reversed compared to
+ // x64. For example, doing an I32x4 extract_lane with lane number 0 on x64
+ // will be equal to lane number 3 on IBM machines. Vector registers are only
+ // used for compiling Wasm code at the moment. To keep the Wasm
+ // simulation accurate, we need to make sure accessing a lane is correctly
+ // simulated and as such we reverse the lane number on the getters and setters
+ // below. We need to be careful when getting/setting values on the Low or High
+ // side of a simulated register. In the simulation, "Low" is equal to the MSB
+ // and "High" is equal to the LSB in memory. "force_ibm_lane_numbering" could
+ // be used to disabled automatic lane number reversal and help with accessing
+ // the Low or High side of a simulated register.
+ template <class T>
+ T get_simd_register_by_lane(int reg, int lane,
+ bool force_ibm_lane_numbering = true) {
+ if (force_ibm_lane_numbering) {
+ lane = (kSimd128Size / sizeof(T)) - 1 - lane;
+ }
+ CHECK_LE(lane, kSimd128Size / sizeof(T));
+ CHECK_LT(reg, kNumSIMDRs);
+ CHECK_GE(lane, 0);
+ CHECK_GE(reg, 0);
+ return (reinterpret_cast<T*>(&simd_registers_[reg]))[lane];
+ }
+
+ template <class T>
+ void set_simd_register_by_lane(int reg, int lane, const T& value,
+ bool force_ibm_lane_numbering = true) {
+ if (force_ibm_lane_numbering) {
+ lane = (kSimd128Size / sizeof(T)) - 1 - lane;
+ }
+ CHECK_LE(lane, kSimd128Size / sizeof(T));
+ CHECK_LT(reg, kNumSIMDRs);
+ CHECK_GE(lane, 0);
+ CHECK_GE(reg, 0);
+ (reinterpret_cast<T*>(&simd_registers_[reg]))[lane] = value;
+ }
+
+ simdr_t get_simd_register(int reg) { return simd_registers_[reg]; }
+
+ void set_simd_register(int reg, const simdr_t& value) {
+ simd_registers_[reg] = value;
+ }
+
// Simulator support.
char* stack_;
static const size_t stack_protection_size_ = 256 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index 9b8bbec9a1f..fc47b9e9954 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/s390/register-s390.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -45,10 +46,11 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {r2, r3, r4, r5, r6, r7, r8}
- static constexpr uint32_t kPushedGpRegs = 0b111111100;
- // {d0 .. d12}
- static constexpr uint32_t kPushedFpRegs = 0b1111111111111;
+ static constexpr RegList kPushedGpRegs =
+ Register::ListOf(r2, r3, r4, r5, r6, r7, r8, cp);
+
+ static constexpr RegList kPushedFpRegs = DoubleRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 30ad95c47b6..435082a3b9d 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -3591,7 +3591,7 @@ EVALUATE(VCDLG) {
break; \
}
EVALUATE(VCGD) {
- DCHECK_OPCODE(VCDG);
+ DCHECK_OPCODE(VCGD);
DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
USE(m4);
switch (m3) {
diff --git a/deps/v8/src/execution/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index 6420c822731..4c1b0a49244 100644
--- a/deps/v8/src/execution/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -399,12 +399,10 @@ class Simulator : public SimulatorBase {
static constexpr fpr_t fp_zero = {{0}};
- fpr_t get_simd_register(int reg) {
- return get_simd_register_by_lane<fpr_t>(reg, 0);
- }
+ fpr_t get_simd_register(int reg) { return fp_registers_[reg]; }
- void set_simd_register(int reg, const fpr_t& v) {
- set_simd_register_by_lane(reg, 0, v);
+ void set_simd_register(int reg, const fpr_t& value) {
+ fp_registers_[reg] = value;
}
// Vector register lane numbers on IBM machines are reversed compared to
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index 941532e40db..dd32f58b98c 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -13,7 +13,10 @@
#include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h"
#include "src/utils/memcopy.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -157,6 +160,16 @@ void StackGuard::ClearInterrupt(InterruptFlag flag) {
if (!has_pending_interrupts(access)) reset_limits(access);
}
+bool StackGuard::HasTerminationRequest() {
+ ExecutionAccess access(isolate_);
+ if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
+ thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ return true;
+ }
+ return false;
+}
+
int StackGuard::FetchAndClearInterrupts() {
ExecutionAccess access(isolate_);
@@ -275,11 +288,23 @@ Object StackGuard::HandleInterrupts() {
isolate_->heap()->HandleGCRequest();
}
+#if V8_ENABLE_WEBASSEMBLY
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory");
BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
}
+ if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
+ TRACE_EVENT0("v8.wasm", "V8.LogCode");
+ isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
+ TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
+ isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCDeoptMarkedAllocationSites");
@@ -299,16 +324,6 @@ Object StackGuard::HandleInterrupts() {
isolate_->InvokeApiInterruptCallbacks();
}
- if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
- TRACE_EVENT0("v8.wasm", "V8.LogCode");
- isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
- }
-
- if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
- TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
- isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
- }
-
isolate_->counters()->stack_interrupts()->Increment();
return ReadOnlyRoots(isolate_).undefined_value();
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index e8bb8aaae3e..d11dec2b890 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -90,6 +90,11 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
// stack overflow, then handle the interruption accordingly.
Object HandleInterrupts();
+ // Special case of {HandleInterrupts}: checks for termination requests only.
+ // This is guaranteed to never cause GC, so can be used to interrupt
+ // long-running computations that are not GC-safe.
+ bool HasTerminationRequest();
+
static constexpr int kSizeInBytes = 7 * kSystemPointerSize;
static char* Iterate(RootVisitor* v, char* thread_storage) {
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 6ce0a02e146..6e1522da258 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/codegen/x64/register-x64.h"
#include "src/execution/frame-constants.h"
namespace v8 {
@@ -60,10 +61,11 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {rax, rcx, rdx, rbx, rsi, rdi, r9}
- static constexpr uint32_t kPushedGpRegs = 0b1011001111;
- // {xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7}
- static constexpr uint32_t kPushedFpRegs = 0b11111111;
+ static constexpr RegList kPushedGpRegs =
+ Register::ListOf(rax, rcx, rdx, rbx, rsi, rdi, r9);
+
+ static constexpr RegList kPushedFpRegs =
+ DoubleRegister::ListOf(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
static constexpr int kNumPushedGpRegisters =
base::bits::CountPopulation(kPushedGpRegs);
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 9d63d760ec3..1911dfc39e8 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,9 +4,11 @@
#include "src/extensions/statistics-extension.h"
+#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/logging/counters.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -124,23 +126,28 @@ void StatisticsExtension::GetCounters(
"amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
+ DisallowGarbageCollection no_gc;
HeapObjectIterator iterator(
reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
int reloc_info_total = 0;
int source_position_table_total = 0;
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
+ Object maybe_source_positions;
if (obj.IsCode()) {
Code code = Code::cast(obj);
reloc_info_total += code.relocation_info().Size();
- ByteArray source_position_table = code.SourcePositionTable();
- if (source_position_table.length() > 0) {
- source_position_table_total += code.SourcePositionTable().Size();
- }
+ maybe_source_positions = code.source_position_table();
} else if (obj.IsBytecodeArray()) {
- source_position_table_total +=
- BytecodeArray::cast(obj).SourcePositionTable().Size();
+ maybe_source_positions =
+ BytecodeArray::cast(obj).source_position_table(kAcquireLoad);
+ } else {
+ continue;
}
+ if (!maybe_source_positions.IsByteArray()) continue;
+ ByteArray source_positions = ByteArray::cast(maybe_source_positions);
+ if (source_positions.length() == 0) continue;
+ source_position_table_total += source_positions.Size();
}
AddNumber(args.GetIsolate(), result, reloc_info_total,
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 09f63e1344d..fa59c204613 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -151,6 +151,18 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_BOOL false
#endif
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+#define COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL true
+#else
+#define COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL false
+#endif
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL true
+#else
+#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL false
+#endif
+
#ifdef V8_HEAP_SANDBOX
#define V8_HEAP_SANDBOX_BOOL true
#else
@@ -163,7 +175,8 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
@@ -246,6 +259,8 @@ DEFINE_BOOL(use_strict, false, "enforce strict mode")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
+// Enabling FinalizationRegistry#cleanupSome also enables weak refs
+DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Update bootstrapper.cc whenever adding a new feature flag.
@@ -257,8 +272,9 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_import_assertions, "harmony import assertions")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
V(harmony_intl_displaynames_date_types, "Intl.DisplayNames date types")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
@@ -266,9 +282,7 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED_BASE(V) \
- V(harmony_top_level_await, "harmony top level await") \
V(harmony_relative_indexing_methods, "harmony relative indexing methods") \
- V(harmony_private_brand_checks, "harmony private brand checks") \
V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
@@ -281,13 +295,13 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_atomics, "harmony atomics") \
- V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
- V(harmony_logical_assignment, "harmony logical assignment") \
- V(harmony_atomics_waitasync, "harmony Atomics.waitAsync") \
- V(harmony_regexp_match_indices, "harmony regexp match indices")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_atomics, "harmony atomics") \
+ V(harmony_weak_refs, "harmony weak references") \
+ V(harmony_regexp_match_indices, "harmony regexp match indices") \
+ V(harmony_private_brand_checks, "harmony private brand checks") \
+ V(harmony_top_level_await, "harmony top level await")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
@@ -424,13 +438,15 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_WEAK_IMPLICATION(future, write_protect_code_memory)
DEFINE_WEAK_IMPLICATION(future, finalize_streaming_on_background)
DEFINE_WEAK_IMPLICATION(future, super_ic)
DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
#endif
+#if V8_SHORT_BUILTIN_CALLS
+DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
+#endif
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -448,9 +464,6 @@ DEFINE_IMPLICATION(jitless, regexp_interpret_all)
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
#endif
-// asm.js validation is disabled since it triggers wasm code generation.
-DEFINE_NEG_IMPLICATION(jitless, validate_asm)
-// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
#ifndef V8_TARGET_ARCH_ARM
// Unsupported on arm. See https://crbug.com/v8/8713.
@@ -490,6 +503,8 @@ DEFINE_BOOL(trace_block_coverage, false,
"trace collected block coverage information")
DEFINE_BOOL(trace_protector_invalidation, false,
"trace protector cell invalidations")
+DEFINE_BOOL(trace_web_snapshot, false, "trace web snapshot deserialization")
+
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
@@ -517,9 +532,9 @@ DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_INT(budget_for_feedback_vector_allocation, 940,
"The budget in amount of bytecode executed by a function before we "
"decide to allocate feedback vectors")
-DEFINE_INT(scale_factor_for_feedback_allocation, 4,
+DEFINE_INT(scale_factor_for_feedback_allocation, 8,
"scale bytecode size for feedback vector allocation.")
-DEFINE_BOOL(feedback_allocation_on_bytecode_size, false,
+DEFINE_BOOL(feedback_allocation_on_bytecode_size, true,
"Instead of a fixed budget for lazy feedback vector allocation, "
"scale it based in the bytecode size.")
DEFINE_IMPLICATION(sparkplug, feedback_allocation_on_bytecode_size)
@@ -581,7 +596,7 @@ DEFINE_BOOL(
turboprop_as_toptier, false,
"enable experimental turboprop compiler without further tierup to turbofan")
DEFINE_IMPLICATION(turboprop_as_toptier, turboprop)
-DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 14 * KB)
+DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 20 * KB)
DEFINE_VALUE_IMPLICATION(turboprop, reuse_opt_code_count, 2)
DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
"max number of map checks to perform in minimorphic state")
@@ -607,12 +622,21 @@ DEFINE_STRING(sparkplug_filter, "*", "filter for Sparkplug baseline compiler")
DEFINE_BOOL(trace_baseline, false, "trace baseline compilation")
#if !defined(V8_OS_MACOSX) || !defined(V8_HOST_ARCH_ARM64)
// Don't disable --write-protect-code-memory on Apple Silicon.
-DEFINE_NEG_IMPLICATION(sparkplug, write_protect_code_memory)
+DEFINE_WEAK_VALUE_IMPLICATION(sparkplug, write_protect_code_memory, false)
#endif
#undef FLAG
#define FLAG FLAG_FULL
+#if !defined(V8_OS_MACOSX) || !defined(V8_HOST_ARCH_ARM64)
+DEFINE_BOOL(write_code_using_rwx, true,
+ "flip permissions to rwx to write page instead of rw")
+DEFINE_NEG_IMPLICATION(jitless, write_code_using_rwx)
+#else
+DEFINE_BOOL_READONLY(write_code_using_rwx, false,
+ "flip permissions to rwx to write page instead of rw")
+#endif
+
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
@@ -807,6 +831,9 @@ DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
DEFINE_BOOL(isolate_script_cache_ageing, true,
"enable ageing of the isolate script cache.")
+DEFINE_INT(script_run_delay, 0, "sleep [ms] on every Script::Run")
+DEFINE_INT(script_run_delay_once, 0, "sleep [ms] on the first Script::Run")
+
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
@@ -822,17 +849,16 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-// Flags for native WebAssembly.
+// Flags for WebAssembly.
+#if V8_ENABLE_WEBASSEMBLY
+
DEFINE_BOOL(wasm_generic_wrapper, true,
"allow use of the generic js-to-wasm wrapper instead of "
"per-signature wrappers")
-#ifdef V8_ENABLE_WEBASSEMBLY
DEFINE_BOOL(expose_wasm, true, "expose wasm interface to JavaScript")
-#else
-DEFINE_BOOL_READONLY(expose_wasm, false, "expose wasm interface to JavaScript")
-#endif
DEFINE_INT(wasm_num_compilation_tasks, 128,
"maximum number of parallel compilation tasks for wasm")
+DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
DEFINE_BOOL(wasm_write_protect_code_memory, false,
@@ -841,6 +867,7 @@ DEFINE_DEBUG_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
+DEFINE_NEG_IMPLICATION(single_threaded, wasm_async_compilation)
DEFINE_BOOL(wasm_test_streaming, false,
"use streaming compilation instead of async compilation for tests")
DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kSpecMaxMemoryPages,
@@ -882,12 +909,19 @@ DEFINE_DEBUG_BOOL(trace_liftoff, false,
"trace Liftoff, the baseline compiler for WebAssembly")
DEFINE_BOOL(trace_wasm_memory, false,
"print all memory updates performed in wasm code")
-// Fuzzers use {wasm_tier_mask_for_testing} together with {liftoff} and
-// {no_wasm_tier_up} to force some functions to be compiled with Turbofan.
+// Fuzzers use {wasm_tier_mask_for_testing} and {wasm_debug_mask_for_testing}
+// together with {liftoff} and {no_wasm_tier_up} to force some functions to be
+// compiled with Turbofan or for debug.
DEFINE_INT(wasm_tier_mask_for_testing, 0,
"bitmask of functions to compile with TurboFan instead of Liftoff")
+DEFINE_INT(wasm_debug_mask_for_testing, 0,
+ "bitmask of functions to compile for debugging, only applies if the "
+ "tier is Liftoff")
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
+// asm.js validation is disabled since it triggers wasm code generation.
+// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
+DEFINE_NEG_IMPLICATION(jitless, validate_asm)
DEFINE_BOOL(suppress_asm_messages, false,
"don't emit asm.js related messages (for golden file testing)")
DEFINE_BOOL(trace_asm_time, false, "print asm.js timing info to the console")
@@ -931,11 +965,12 @@ DEFINE_BOOL(wasm_math_intrinsics, true,
"intrinsify some Math imports into wasm")
DEFINE_BOOL(wasm_loop_unrolling, false,
- "generate and then remove loop exits in wasm turbofan code "
- "(placeholder for future loop unrolling feature)")
+ "enable loop unrolling for wasm functions (experimental)")
DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
+// "no bounds checks" implies "no trap handlers".
+DEFINE_NEG_NEG_IMPLICATION(wasm_bounds_checks, wasm_trap_handler)
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
@@ -952,11 +987,7 @@ DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
-DEFINE_BOOL(wasm_simd_post_mvp, false,
- "allow experimental SIMD operations for prototyping that are not "
- "included in the current proposal")
DEFINE_BOOL(wasm_simd_ssse3_codegen, false, "allow wasm SIMD SSSE3 codegen")
-DEFINE_IMPLICATION(wasm_simd_post_mvp, experimental_wasm_simd)
DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
@@ -968,6 +999,28 @@ DEFINE_INT(wasm_max_initial_code_space_reservation, 0,
DEFINE_BOOL(experimental_wasm_allow_huge_modules, false,
"allow wasm modules bigger than 1GB, but below ~2GB")
+DEFINE_BOOL(trace_wasm, false, "trace wasm function calls")
+
+// Flags for Wasm GDB remote debugging.
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+#define DEFAULT_WASM_GDB_REMOTE_PORT 8765
+DEFINE_BOOL(wasm_gdb_remote, false,
+ "enable GDB-remote for WebAssembly debugging")
+DEFINE_NEG_IMPLICATION(wasm_gdb_remote, wasm_tier_up)
+DEFINE_INT(wasm_gdb_remote_port, DEFAULT_WASM_GDB_REMOTE_PORT,
+ "default port for WebAssembly debugging with LLDB.")
+DEFINE_BOOL(wasm_pause_waiting_for_debugger, false,
+ "pause at the first Webassembly instruction waiting for a debugger "
+ "to attach")
+DEFINE_BOOL(trace_wasm_gdb_remote, false, "trace Webassembly GDB-remote server")
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
+// wasm instance management
+DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
+ "trace creation and collection of wasm instances")
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
DEFINE_INT(stress_sampling_allocation_profiler, 0,
"Enables sampling allocation profiler with X as a sample interval")
@@ -1199,6 +1252,9 @@ DEFINE_BOOL(fast_promotion_new_space, false,
DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
+DEFINE_BOOL(crash_on_aborted_evacuation, false,
+ "crash when evacuation of page fails")
+
DEFINE_BOOL_READONLY(
young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
@@ -1263,11 +1319,11 @@ DEFINE_BOOL(script_streaming, true, "enable parsing on background")
DEFINE_BOOL(stress_background_compile, false,
"stress test parsing on background")
DEFINE_BOOL(
- finalize_streaming_on_background, false,
+ finalize_streaming_on_background, true,
"perform the script streaming finalization on the background thread")
// TODO(leszeks): Parallel compile tasks currently don't support off-thread
// finalization.
-DEFINE_NEG_IMPLICATION(finalize_streaming_on_background, parallel_compile_tasks)
+DEFINE_NEG_IMPLICATION(parallel_compile_tasks, finalize_streaming_on_background)
DEFINE_BOOL(disable_old_api_accessors, false,
"Disable old-style API accessors whose setters trigger through the "
"prototype chain")
@@ -1307,7 +1363,6 @@ DEFINE_NEG_NEG_IMPLICATION(inline_new, turbo_allocation_folding)
// codegen-ia32.cc / codegen-arm.cc
DEFINE_BOOL(trace, false, "trace javascript function calls")
-DEFINE_BOOL(trace_wasm, false, "trace wasm function calls")
// codegen.cc
DEFINE_BOOL(lazy, true, "use lazy compilation")
@@ -1502,6 +1557,23 @@ DEFINE_BOOL(adjust_os_scheduling_parameters, true,
DEFINE_BOOL(experimental_flush_embedded_blob_icache, false,
"Used in an experiment to evaluate icache flushing on certain CPUs")
+// Flags for short builtin calls feature
+#undef FLAG
+#if V8_SHORT_BUILTIN_CALLS
+#define FLAG FLAG_FULL
+#define V8_SHORT_BUILTIN_CALLS_BOOL true
+#else
+#define FLAG FLAG_READONLY
+#define V8_SHORT_BUILTIN_CALLS_BOOL false
+#endif
+
+DEFINE_BOOL(short_builtin_calls, V8_SHORT_BUILTIN_CALLS_BOOL,
+ "Put embedded builtins code into the code range for shorter "
+ "builtin calls/jumps if system has >=4GB memory")
+
+#undef FLAG
+#define FLAG FLAG_FULL
+
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
DEFINE_GENERIC_IMPLICATION(
@@ -1544,6 +1616,7 @@ DEFINE_BOOL(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
+DEFINE_BOOL(trace_regexp_graph, false, "trace the regexp graph")
DEFINE_BOOL(enable_experimental_regexp_engine, false,
"recognize regexps with 'l' flag, run them on experimental engine")
@@ -1639,19 +1712,6 @@ DEFINE_BOOL(multi_mapped_mock_allocator, false,
"Use a multi-mapped mock ArrayBuffer allocator for testing.")
#endif
-// Flags for Wasm GDB remote debugging.
-#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
-#define DEFAULT_WASM_GDB_REMOTE_PORT 8765
-DEFINE_BOOL(wasm_gdb_remote, false,
- "enable GDB-remote for WebAssembly debugging")
-DEFINE_NEG_IMPLICATION(wasm_gdb_remote, wasm_tier_up)
-DEFINE_INT(wasm_gdb_remote_port, DEFAULT_WASM_GDB_REMOTE_PORT,
- "default port for WebAssembly debugging with LLDB.")
-DEFINE_BOOL(wasm_pause_waiting_for_debugger, false,
- "pause at the first Webassembly instruction waiting for a debugger "
- "to attach")
-#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
-
//
// GDB JIT integration flags.
//
@@ -1730,14 +1790,6 @@ DEFINE_BOOL(regexp_possessive_quantifier, false,
// Debugger
DEFINE_BOOL(print_break_location, false, "print source location on debug break")
-// wasm instance management
-DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
- "trace creation and collection of wasm instances")
-
-#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
-DEFINE_BOOL(trace_wasm_gdb_remote, false, "trace Webassembly GDB-remote server")
-#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
-
//
// Logging and profiling flags
//
@@ -1823,7 +1875,9 @@ DEFINE_PERF_PROF_BOOL(
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping.
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
+#if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
+#endif // V8_ENABLE_WEBASSEMBLY
// --perf-prof-unwinding-info is available only on selected architectures.
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
@@ -1874,6 +1928,14 @@ DEFINE_BOOL(interpreted_frames_native_stack, false,
"profilers).")
#endif
+DEFINE_BOOL(enable_system_instrumentation, false,
+ "Enable platform-specific profiling.")
+
+#ifndef V8_TARGET_ARCH_ARM
+DEFINE_IMPLICATION(enable_system_instrumentation,
+ interpreted_frames_native_stack)
+#endif
+
//
// Disassembler only flags
//
@@ -1920,8 +1982,6 @@ DEFINE_IMPLICATION(print_all_code, print_regexp_code)
DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_IMPLICATION(predictable, single_threaded)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
-DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
-DEFINE_NEG_IMPLICATION(single_threaded, wasm_async_compilation)
DEFINE_BOOL(predictable_gc_schedule, false,
"Predictable garbage collection schedule. Fixes heap growing, "
diff --git a/deps/v8/src/flags/flags.cc b/deps/v8/src/flags/flags.cc
index 900499c1940..9825b5b1eaf 100644
--- a/deps/v8/src/flags/flags.cc
+++ b/deps/v8/src/flags/flags.cc
@@ -20,7 +20,10 @@
#include "src/utils/memcopy.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-limits.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flags/flags.h b/deps/v8/src/flags/flags.h
index 5f904be5c8f..f45bbbb0731 100644
--- a/deps/v8/src/flags/flags.h
+++ b/deps/v8/src/flags/flags.h
@@ -8,7 +8,6 @@
#include <vector>
#include "src/common/globals.h"
-#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 8b24de2a05a..6bc290eac71 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -382,11 +382,11 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
- IsolateRoot isolate = GetIsolateForPtrCompr(jsobject);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
- if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(isolate, &pointer)) {
+ if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(cage_base, &pointer)) {
embedder_fields[i] = pointer;
}
}
diff --git a/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc
index 1ff1a27c058..1efcc3430b3 100644
--- a/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc
+++ b/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc
@@ -37,6 +37,10 @@ asm(
" stp x23, x24, [sp, #-16]! \n"
" stp x25, x26, [sp, #-16]! \n"
" stp x27, x28, [sp, #-16]! \n"
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Sign return address.
+ " paciasp \n"
+#endif
" stp fp, lr, [sp, #-16]! \n"
// Maintain frame pointer.
" mov fp, sp \n"
@@ -47,8 +51,12 @@ asm(
// Pass 3rd parameter as sp (stack pointer).
" mov x2, sp \n"
" blr x7 \n"
- // Load return address.
- " ldr lr, [sp, #8] \n"
- // Restore frame pointer and pop all callee-saved registers.
- " ldr fp, [sp], #96 \n"
+ // Load return address and frame pointer.
+ " ldp fp, lr, [sp], #16 \n"
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Authenticate return address.
+ " autiasp \n"
+#endif
+ // Drop all callee-saved registers.
+ " add sp, sp, #80 \n"
" ret \n");
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
index 8b89c26ddb9..6fb0467c39f 100644
--- a/deps/v8/src/heap/basic-memory-chunk.cc
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -62,10 +62,11 @@ bool BasicMemoryChunk::InLargeObjectSpace() const {
}
#ifdef THREAD_SANITIZER
-void BasicMemoryChunk::SynchronizedHeapLoad() {
- CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
- reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
- InReadOnlySpace());
+void BasicMemoryChunk::SynchronizedHeapLoad() const {
+ CHECK(reinterpret_cast<Heap*>(
+ base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(
+ &(const_cast<BasicMemoryChunk*>(this)->heap_)))) != nullptr ||
+ InReadOnlySpaceRaw());
}
#endif
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index e102349fa93..1b2d7cb5dad 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -203,7 +203,18 @@ class BasicMemoryChunk {
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
- bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
+ private:
+ bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
+
+ public:
+ bool InReadOnlySpace() const {
+#ifdef THREAD_SANITIZER
+ // This is needed because TSAN does not process the memory fence
+ // emitted after page initialization.
+ SynchronizedHeapLoad();
+#endif
+ return IsFlagSet(READ_ONLY_HEAP);
+ }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -335,7 +346,7 @@ class BasicMemoryChunk {
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
- void SynchronizedHeapLoad();
+ void SynchronizedHeapLoad() const;
#endif
protected:
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
index a111e17e05d..92007690aa7 100644
--- a/deps/v8/src/heap/collection-barrier.cc
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -6,24 +6,37 @@
#include "src/base/platform/time.h"
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
namespace v8 {
namespace internal {
-void CollectionBarrier::ResumeThreadsAwaitingCollection() {
- base::MutexGuard guard(&mutex_);
- ClearCollectionRequested();
- cond_.NotifyAll();
+bool CollectionBarrier::CollectionRequested() {
+ return main_thread_state_relaxed() == LocalHeap::kCollectionRequested;
+}
+
+LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
+ LocalHeap* main_thread_local_heap =
+ heap_->isolate()->main_thread_local_heap();
+ return main_thread_local_heap->state_relaxed();
}
-void CollectionBarrier::ShutdownRequested() {
+void CollectionBarrier::NotifyShutdownRequested() {
base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop();
- state_.store(RequestState::kShutdown);
- cond_.NotifyAll();
+ shutdown_requested_ = true;
+ cv_wakeup_.NotifyAll();
+}
+
+void CollectionBarrier::ResumeThreadsAwaitingCollection() {
+ base::MutexGuard guard(&mutex_);
+ cv_wakeup_.NotifyAll();
}
class BackgroundCollectionInterruptTask : public CancelableTask {
@@ -44,30 +57,29 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
Heap* heap_;
};
-void CollectionBarrier::AwaitCollectionBackground() {
- bool first;
-
- {
- base::MutexGuard guard(&mutex_);
- first = FirstCollectionRequest();
- if (first) timer_.Start();
- }
+bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
+ ParkedScope scope(local_heap);
+ base::MutexGuard guard(&mutex_);
- if (first) {
- // This is the first background thread requesting collection, ask the main
- // thread for GC.
- ActivateStackGuardAndPostTask();
+ while (CollectionRequested()) {
+ if (shutdown_requested_) return false;
+ cv_wakeup_.Wait(&mutex_);
}
- BlockUntilCollected();
+ return true;
}
void CollectionBarrier::StopTimeToCollectionTimer() {
- base::MutexGuard guard(&mutex_);
- RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
- std::memory_order_relaxed);
- if (old_state == RequestState::kCollectionRequested) {
- DCHECK(timer_.IsStarted());
+ LocalHeap::ThreadState main_thread_state = main_thread_state_relaxed();
+ CHECK(main_thread_state == LocalHeap::kRunning ||
+ main_thread_state == LocalHeap::kCollectionRequested);
+
+ if (main_thread_state == LocalHeap::kCollectionRequested) {
+ base::MutexGuard guard(&mutex_);
+ // The first background thread that requests the GC, starts the timer first
+ // and only then parks itself. Since we are in a safepoint here, the timer
+ // is therefore always initialized here already.
+ CHECK(timer_.IsStarted());
base::TimeDelta delta = timer_.Elapsed();
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GC.TimeToCollectionOnBackground",
@@ -78,9 +90,6 @@ void CollectionBarrier::StopTimeToCollectionTimer() {
->gc_time_to_collection_on_background()
->AddTimedSample(delta);
timer_.Stop();
- } else {
- DCHECK_EQ(old_state, RequestState::kDefault);
- DCHECK(!timer_.IsStarted());
}
}
@@ -88,20 +97,15 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC();
+
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
-}
-void CollectionBarrier::BlockUntilCollected() {
- TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_COLLECTION,
- ThreadKind::kBackground);
base::MutexGuard guard(&mutex_);
-
- while (CollectionRequested()) {
- cond_.Wait(&mutex_);
- }
+ CHECK(!timer_.IsStarted());
+ timer_.Start();
}
} // namespace internal
diff --git a/deps/v8/src/heap/collection-barrier.h b/deps/v8/src/heap/collection-barrier.h
index 418f93ce046..8cbad143ebc 100644
--- a/deps/v8/src/heap/collection-barrier.h
+++ b/deps/v8/src/heap/collection-barrier.h
@@ -8,8 +8,10 @@
#include <atomic>
#include "src/base/optional.h"
+#include "src/base/platform/condition-variable.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/mutex.h"
+#include "src/heap/local-heap.h"
#include "src/logging/counters.h"
namespace v8 {
@@ -21,70 +23,34 @@ class Heap;
class CollectionBarrier {
Heap* heap_;
base::Mutex mutex_;
- base::ConditionVariable cond_;
+ base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
+ bool shutdown_requested_;
- enum class RequestState {
- // Default state, no collection requested and tear down wasn't initated
- // yet.
- kDefault,
-
- // Collection was already requested
- kCollectionRequested,
-
- // Collection was already started
- kCollectionStarted,
-
- // This state is reached after isolate starts to shut down. The main
- // thread can't perform any GCs anymore, so all allocations need to be
- // allowed from here on until background thread finishes.
- kShutdown,
- };
-
- // The current state.
- std::atomic<RequestState> state_;
-
- // Request GC by activating stack guards and posting a task to perform the
- // GC.
- void ActivateStackGuardAndPostTask();
-
- // Returns true when state was successfully updated from kDefault to
- // kCollection.
- bool FirstCollectionRequest() {
- RequestState expected = RequestState::kDefault;
- return state_.compare_exchange_strong(expected,
- RequestState::kCollectionRequested);
- }
-
- // Sets state back to kDefault - invoked at end of GC.
- void ClearCollectionRequested() {
- RequestState old_state =
- state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
- USE(old_state);
- DCHECK_EQ(old_state, RequestState::kCollectionStarted);
- }
+ LocalHeap::ThreadState main_thread_state_relaxed();
public:
explicit CollectionBarrier(Heap* heap)
- : heap_(heap), state_(RequestState::kDefault) {}
+ : heap_(heap), shutdown_requested_(false) {}
+
+ // Returns true when collection was requested.
+ bool CollectionRequested();
- // Checks whether any background thread requested GC.
- bool CollectionRequested() {
- return state_.load(std::memory_order_relaxed) ==
- RequestState::kCollectionRequested;
- }
+ // Resumes all threads waiting for GC when tear down starts.
+ void NotifyShutdownRequested();
+ // Stops the TimeToCollection timer when starting the GC.
void StopTimeToCollectionTimer();
- void BlockUntilCollected();
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
- // Sets current state to kShutdown.
- void ShutdownRequested();
-
// This is the method use by background threads to request and wait for GC.
- void AwaitCollectionBackground();
+ bool AwaitCollectionBackground(LocalHeap* local_heap);
+
+ // Request GC by activating stack guards and posting a task to perform the
+ // GC.
+ void ActivateStackGuardAndPostTask();
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 47cff165a44..6f4bd625c62 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
@@ -33,26 +34,38 @@ void StressConcurrentAllocatorTask::RunInternal() {
// Isolate tear down started, stop allocation...
if (heap->gc_state() == Heap::TEAR_DOWN) return;
- Address address = local_heap.AllocateRawOrFail(
+ AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
- heap->CreateFillerObjectAtBackground(
- address, kSmallObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
- local_heap.Safepoint();
-
- address = local_heap.AllocateRawOrFail(
- kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
- heap->CreateFillerObjectAtBackground(
- address, kMediumObjectSize,
- ClearFreedMemoryMode::kDontClearFreedMemory);
- local_heap.Safepoint();
-
- address = local_heap.AllocateRawOrFail(
- kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
- heap->CreateFillerObjectAtBackground(
- address, kLargeObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (!result.IsRetry()) {
+ heap->CreateFillerObjectAtBackground(
+ result.ToAddress(), kSmallObjectSize,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ } else {
+ local_heap.TryPerformCollection();
+ }
+
+ result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
+ AllocationOrigin::kRuntime,
+ AllocationAlignment::kWordAligned);
+ if (!result.IsRetry()) {
+ heap->CreateFillerObjectAtBackground(
+ result.ToAddress(), kMediumObjectSize,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ } else {
+ local_heap.TryPerformCollection();
+ }
+
+ result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
+ AllocationOrigin::kRuntime,
+ AllocationAlignment::kWordAligned);
+ if (!result.IsRetry()) {
+ heap->CreateFillerObjectAtBackground(
+ result.ToAddress(), kLargeObjectSize,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ } else {
+ local_heap.TryPerformCollection();
+ }
local_heap.Safepoint();
}
@@ -109,7 +122,6 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
-
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f05024039b4..eb1511f71d9 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -27,6 +27,7 @@
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/utils/utils-inl.h"
@@ -110,9 +111,11 @@ class ConcurrentMarkingVisitor final
return VisitJSObjectSubclassFast(map, object);
}
+#if V8_ENABLE_WEBASSEMBLY
int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
}
+#endif // V8_ENABLE_WEBASSEMBLY
int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
@@ -215,12 +218,16 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
int VisitJSObjectSubclass(Map map, T object) {
+ if (!ShouldVisit(object)) return 0;
int size = TBodyDescriptor::SizeOf(map, object);
int used_size = map.UsedInstanceSize();
DCHECK_LE(used_size, size);
DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
- return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
- used_size, size);
+ this->VisitMapPointer(object);
+ // It is important to visit only the used field and ignore the slack fields
+ // because the slack fields may be trimmed concurrently.
+ TBodyDescriptor::IterateBody(map, object, used_size, this);
+ return size;
}
template <typename T>
@@ -252,17 +259,11 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitFullyWithSnapshot(Map map, T object) {
+ if (!ShouldVisit(object)) return 0;
using TBodyDescriptor = typename T::BodyDescriptor;
int size = TBodyDescriptor::SizeOf(map, object);
- return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
- size);
- }
-
- template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
- int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
const SlotSnapshot& snapshot =
- MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
+ MakeSlotSnapshot<T, TBodyDescriptor>(map, object, size);
VisitPointersInSnapshot(object, snapshot);
return size;
}
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index c0683ef7dda..636f666521b 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,6 +10,7 @@
#include "include/cppgc/platform.h"
#include "include/v8-platform.h"
#include "include/v8.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
@@ -64,6 +65,15 @@ cppgc::HeapStatistics CppHeap::CollectStatistics(
detail_level);
}
+void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
+ return internal::CppHeap::From(this)
+ ->EnableDetachedGarbageCollectionsForTesting();
+}
+
+void CppHeap::CollectGarbageForTesting(cppgc::EmbedderStackState stack_state) {
+ return internal::CppHeap::From(this)->CollectGarbageForTesting(stack_state);
+}
+
void JSHeapConsistency::DijkstraMarkingBarrierSlow(
cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) {
auto& heap_base = cppgc::internal::HeapBase::From(heap_handle);
@@ -96,6 +106,11 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
}
std::shared_ptr<TaskRunner> GetForegroundTaskRunner() final {
+ // If no Isolate has been set, there's no task runner to leverage for
+ // foreground tasks. In detached mode the original platform handles the
+ // task runner retrieval.
+ if (!isolate_ && !is_in_detached_mode_) return nullptr;
+
return platform_->GetForegroundTaskRunner(isolate_);
}
@@ -109,10 +124,12 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
}
void SetIsolate(v8::Isolate* isolate) { isolate_ = isolate; }
+ void EnableDetachedModeForTesting() { is_in_detached_mode_ = true; }
private:
v8::Platform* platform_;
v8::Isolate* isolate_ = nullptr;
+ bool is_in_detached_mode_ = false;
};
class UnifiedHeapConcurrentMarker
@@ -144,7 +161,7 @@ UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
- UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap,
+ UnifiedHeapMarker(Key, Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
cppgc::Platform* platform, MarkingConfig config);
~UnifiedHeapMarker() final = default;
@@ -166,7 +183,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
-UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap,
+UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap* v8_heap,
cppgc::internal::HeapBase& heap,
cppgc::Platform* platform,
MarkingConfig config)
@@ -221,6 +238,7 @@ void CppHeap::Terminate() {
}
void CppHeap::AttachIsolate(Isolate* isolate) {
+ CHECK(!in_detached_testing_mode_);
CHECK_NULL(isolate_);
isolate_ = isolate;
static_cast<CppgcPlatformAdapter*>(platform())
@@ -275,10 +293,17 @@ void CppHeap::TracePrologue(TraceFlags flags) {
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
- cppgc::Heap::MarkingType::kIncrementalAndConcurrent,
+ ((current_flags_ & TraceFlags::kForced) &&
+ !force_incremental_marking_for_testing_)
+ ? UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic
+ : UnifiedHeapMarker::MarkingConfig::MarkingType::
+ kIncrementalAndConcurrent,
flags & TraceFlags::kForced
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
+ DCHECK_IMPLIES(!isolate_, (cppgc::Heap::MarkingType::kAtomic ==
+ marking_config.marking_type) ||
+ force_incremental_marking_for_testing_);
if ((flags == TraceFlags::kReduceMemory) || (flags == TraceFlags::kForced)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
@@ -287,7 +312,8 @@ void CppHeap::TracePrologue(TraceFlags flags) {
}
marker_ =
cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
- *isolate_->heap(), AsBase(), platform_.get(), marking_config);
+ isolate_ ? isolate_->heap() : nullptr, AsBase(), platform_.get(),
+ marking_config);
marking_done_ = false;
}
@@ -339,11 +365,18 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
marker_->LeaveAtomicPause();
}
marker_.reset();
+ if (isolate_) {
+ auto* tracer = isolate_->heap()->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
+ tracer->UpdateRemoteStats(
+ stats_collector_->marked_bytes(),
+ stats_collector_->marking_time().InMillisecondsF());
+ }
ExecutePreFinalizers();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
+ verifier.Run(stack_state_of_prev_gc_);
#endif
{
@@ -357,8 +390,15 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
: cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling};
+ DCHECK_IMPLIES(
+ !isolate_,
+ cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic ==
+ sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
+ DCHECK_NOT_NULL(trace_summary);
+ trace_summary->allocated_size = SIZE_MAX;
+ trace_summary->time = 0;
in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
}
@@ -390,5 +430,56 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
buffered_allocated_bytes_ = 0;
}
+void CppHeap::CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::StackState stack_state) {
+ if (in_no_gc_scope()) return;
+
+ // Finish sweeping in case it is still running.
+ sweeper().FinishIfRunning();
+
+ if (isolate_) {
+ // Go through EmbedderHeapTracer API and perform a unified heap collection.
+ GarbageCollectionForTesting(stack_state);
+ } else {
+ // Perform an atomic GC, with starting incremental/concurrent marking and
+ // immediately finalizing the garbage collection.
+ if (!IsMarking()) TracePrologue(TraceFlags::kForced);
+ EnterFinalPause(stack_state);
+ AdvanceTracing(std::numeric_limits<double>::infinity());
+ TraceSummary trace_summary;
+ TraceEpilogue(&trace_summary);
+ DCHECK_EQ(SIZE_MAX, trace_summary.allocated_size);
+ }
+}
+
+void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
+ CHECK(!in_detached_testing_mode_);
+ CHECK_NULL(isolate_);
+ no_gc_scope_--;
+ in_detached_testing_mode_ = true;
+ static_cast<CppgcPlatformAdapter*>(platform())
+ ->EnableDetachedModeForTesting();
+}
+
+void CppHeap::StartIncrementalGarbageCollectionForTesting() {
+ DCHECK(!in_no_gc_scope());
+ DCHECK_NULL(isolate_);
+ if (IsMarking()) return;
+ force_incremental_marking_for_testing_ = true;
+ TracePrologue(TraceFlags::kForced);
+ force_incremental_marking_for_testing_ = false;
+}
+
+void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
+ EmbedderStackState stack_state) {
+ DCHECK(!in_no_gc_scope());
+ DCHECK_NULL(isolate_);
+ DCHECK(IsMarking());
+ if (IsMarking()) {
+ CollectGarbageForTesting(stack_state);
+ }
+ sweeper_.FinishIfRunning();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 47b63a5c5d3..b13fd25a323 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -50,6 +50,11 @@ class V8_EXPORT_PRIVATE CppHeap final
void Terminate();
+ void EnableDetachedGarbageCollectionsForTesting();
+
+ void CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::StackState);
+
// v8::EmbedderHeapTracer interface.
void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) final;
@@ -73,6 +78,9 @@ class V8_EXPORT_PRIVATE CppHeap final
void ReportBufferedAllocationSizeIfPossible();
+ void StartIncrementalGarbageCollectionForTesting() final;
+ void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
+
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
TraceFlags current_flags_ = TraceFlags::kNoFlags;
@@ -83,6 +91,9 @@ class V8_EXPORT_PRIVATE CppHeap final
int64_t buffered_allocated_bytes_ = 0;
v8::WrapperDescriptor wrapper_descriptor_;
+
+ bool in_detached_testing_mode_ = false;
+ bool force_incremental_marking_for_testing_ = false;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index b89ff4f9a97..79a863e3026 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -35,11 +35,19 @@ using cppgc::internal::HeapObjectHeader;
class EmbedderNode : public v8::EmbedderGraph::Node {
public:
explicit EmbedderNode(const char* name, size_t size)
- : name_(name), size_(size) {}
+ : name_(name), size_(size) {
+ USE(size_);
+ }
~EmbedderNode() override = default;
const char* Name() final { return name_; }
- size_t SizeInBytes() final { return size_; }
+ size_t SizeInBytes() final {
+#if CPPGC_SUPPORTS_OBJECT_NAMES
+ return size_;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ return 0;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+ }
void SetWrapperNode(v8::EmbedderGraph::Node* wrapper_node) {
wrapper_node_ = wrapper_node;
@@ -696,6 +704,7 @@ void CppGraphBuilderImpl::Run() {
ParentScope parent_scope(
states_.CreateRootState(AddRootNode("C++ cross-thread roots")));
GraphBuildingVisitor object_visitor(*this, parent_scope);
+ cppgc::internal::PersistentRegionLock guard;
cpp_heap_.GetStrongCrossThreadPersistentRegion().Trace(&object_visitor);
}
}
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index 1a1da3f278a..d98e2b54bfa 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -7,6 +7,7 @@
#include "include/v8-cppgc.h"
#include "include/v8.h"
+#include "src/base/logging.h"
#include "src/heap/heap.h"
namespace v8 {
@@ -22,7 +23,7 @@ class BasicTracedReferenceExtractor {
class UnifiedHeapMarkingState {
public:
- explicit UnifiedHeapMarkingState(Heap& heap) : heap_(heap) {}
+ explicit UnifiedHeapMarkingState(Heap* heap) : heap_(heap) {}
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
@@ -30,11 +31,16 @@ class UnifiedHeapMarkingState {
inline void MarkAndPush(const TracedReferenceBase&);
private:
- Heap& heap_;
+ Heap* heap_;
};
void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
- heap_.RegisterExternallyReferencedObject(
+ // The same visitor is used in testing scenarios without attaching the heap to
+ // an Isolate under the assumption that no non-empty v8 references are found.
+ // Having the following DCHECK crash means that the heap is in detached mode
+ // but we find traceable pointers into an Isolate.
+ DCHECK_NOT_NULL(heap_);
+ heap_->RegisterExternallyReferencedObject(
BasicTracedReferenceExtractor::ObjectReference(ref));
}
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index 4a05746b726..f4498e7fbc3 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -371,6 +372,10 @@ void CompactSpace(NormalPageSpace* space,
MovableReferences& movable_references) {
using Pages = NormalPageSpace::Pages;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ UnmarkedObjectsPoisoner().Traverse(space);
+#endif // V8_USE_ADDRESS_SANITIZER
+
DCHECK(space->is_compactable());
space->free_list().Clear();
@@ -465,7 +470,6 @@ void Compactor::InitializeIfShouldCompact(
compaction_worklists_ = std::make_unique<CompactionWorklists>();
is_enabled_ = true;
- enable_for_next_gc_for_testing_ = false;
}
bool Compactor::CancelIfShouldNotCompact(
@@ -501,9 +505,15 @@ Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
CompactSpace(space, movable_references);
}
+ enable_for_next_gc_for_testing_ = false;
is_enabled_ = false;
return CompactableSpaceHandling::kIgnore;
}
+void Compactor::EnableForNextGCForTesting() {
+ DCHECK_NULL(heap_.heap()->marker());
+ enable_for_next_gc_for_testing_ = true;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index ec7fc950a9a..46a8e1ef53e 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -34,7 +34,7 @@ class V8_EXPORT_PRIVATE Compactor final {
return compaction_worklists_.get();
}
- void EnableForNextGCForTesting() { enable_for_next_gc_for_testing_ = true; }
+ void EnableForNextGCForTesting();
bool IsEnabledForTesting() const { return is_enabled_; }
private:
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
new file mode 100644
index 00000000000..b3ab5f5b515
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -0,0 +1,152 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/explicit-management.h"
+
+#include <tuple>
+
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+std::pair<bool, BasePage*> CanModifyObject(void* object) {
+ // object is guaranteed to be of type GarbageCollected, so getting the
+ // BasePage is okay for regular and large objects.
+ auto* base_page = BasePage::FromPayload(object);
+ auto* heap = base_page->heap();
+ // Whenever the GC is active, avoid modifying the object as it may mess with
+ // state that the GC needs.
+ const bool in_gc = heap->in_atomic_pause() || heap->marker() ||
+ heap->sweeper().IsSweepingInProgress();
+ return {!in_gc, base_page};
+}
+
+} // namespace
+
+void FreeUnreferencedObject(void* object) {
+ bool can_free;
+ BasePage* base_page;
+ std::tie(can_free, base_page) = CanModifyObject(object);
+ if (!can_free) {
+ return;
+ }
+
+ auto& header = HeapObjectHeader::FromPayload(object);
+ header.Finalize();
+
+ if (base_page->is_large()) { // Large object.
+ base_page->space()->RemovePage(base_page);
+ base_page->heap()->stats_collector()->NotifyExplicitFree(
+ LargePage::From(base_page)->PayloadSize());
+ LargePage::Destroy(LargePage::From(base_page));
+ } else { // Regular object.
+ const size_t header_size = header.GetSize();
+ auto* normal_page = NormalPage::From(base_page);
+ auto& normal_space = *static_cast<NormalPageSpace*>(base_page->space());
+ auto& lab = normal_space.linear_allocation_buffer();
+ ConstAddress payload_end = header.PayloadEnd();
+ SET_MEMORY_INACCESSIBLE(&header, header_size);
+ if (payload_end == lab.start()) { // Returning to LAB.
+ lab.Set(reinterpret_cast<Address>(&header), lab.size() + header_size);
+ normal_page->object_start_bitmap().ClearBit(lab.start());
+ } else { // Returning to free list.
+ base_page->heap()->stats_collector()->NotifyExplicitFree(header_size);
+ normal_space.free_list().Add({&header, header_size});
+ // No need to update the bitmap as the same bit is reused for the free
+ // list entry.
+ }
+ }
+}
+
+namespace {
+
+bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
+ size_t size_delta) {
+ DCHECK_GE(new_size, header.GetSize() + kAllocationGranularity);
+ DCHECK_GE(size_delta, kAllocationGranularity);
+ DCHECK(!base_page.is_large());
+
+ auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
+ auto& lab = normal_space.linear_allocation_buffer();
+ if (lab.start() == header.PayloadEnd() && lab.size() >= size_delta) {
+ // LABs are considered used memory which means that no allocated size
+ // adjustments are needed.
+ Address delta_start = lab.Allocate(size_delta);
+ SET_MEMORY_ACCESSIBLE(delta_start, size_delta);
+ header.SetSize(new_size);
+ return true;
+ }
+ return false;
+}
+
+bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
+ size_t size_delta) {
+ DCHECK_GE(header.GetSize(), new_size + kAllocationGranularity);
+ DCHECK_GE(size_delta, kAllocationGranularity);
+ DCHECK(!base_page.is_large());
+
+ auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
+ auto& lab = normal_space.linear_allocation_buffer();
+ Address free_start = header.PayloadEnd() - size_delta;
+ if (lab.start() == header.PayloadEnd()) {
+ DCHECK_EQ(free_start, lab.start() - size_delta);
+ // LABs are considered used memory which means that no allocated size
+ // adjustments are needed.
+ lab.Set(free_start, lab.size() + size_delta);
+ SET_MEMORY_INACCESSIBLE(lab.start(), size_delta);
+ header.SetSize(new_size);
+ return true;
+ }
+ // Heuristic: Only return memory to the free list if the block is larger than
+ // the smallest size class.
+ if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
+ SET_MEMORY_INACCESSIBLE(free_start, size_delta);
+ base_page.heap()->stats_collector()->NotifyExplicitFree(size_delta);
+ normal_space.free_list().Add({free_start, size_delta});
+ NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
+ header.SetSize(new_size);
+ }
+ // Return success in any case, as we want to avoid that embedders start
+ // copying memory because of small deltas.
+ return true;
+}
+
+} // namespace
+
+bool Resize(void* object, size_t new_object_size) {
+ bool can_resize;
+ BasePage* base_page;
+ std::tie(can_resize, base_page) = CanModifyObject(object);
+ if (!can_resize) {
+ return false;
+ }
+
+ // TODO(chromium:1056170): Consider supporting large objects within certain
+ // restrictions.
+ if (base_page->is_large()) {
+ return false;
+ }
+
+ const size_t new_size = RoundUp<kAllocationGranularity>(
+ sizeof(HeapObjectHeader) + new_object_size);
+ auto& header = HeapObjectHeader::FromPayload(object);
+ const size_t old_size = header.GetSize();
+
+ if (new_size > old_size) {
+ return Grow(header, *base_page, new_size, new_size - old_size);
+ } else if (old_size > new_size) {
+ return Shrink(header, *base_page, new_size, old_size - new_size);
+ }
+ // Same size considering internal restrictions, e.g. alignment.
+ return true;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/free-list.cc b/deps/v8/src/heap/cppgc/free-list.cc
index 934aeaf3f86..705d31725ad 100644
--- a/deps/v8/src/heap/cppgc/free-list.cc
+++ b/deps/v8/src/heap/cppgc/free-list.cc
@@ -170,7 +170,7 @@ bool FreeList::IsEmpty() const {
[](const auto* entry) { return !entry; });
}
-bool FreeList::Contains(Block block) const {
+bool FreeList::ContainsForTesting(Block block) const {
for (Entry* list : free_list_heads_) {
for (Entry* entry = list; entry; entry = entry->Next()) {
if (entry <= block.address &&
diff --git a/deps/v8/src/heap/cppgc/free-list.h b/deps/v8/src/heap/cppgc/free-list.h
index 6906952102f..184030a9e87 100644
--- a/deps/v8/src/heap/cppgc/free-list.h
+++ b/deps/v8/src/heap/cppgc/free-list.h
@@ -44,10 +44,10 @@ class V8_EXPORT_PRIVATE FreeList {
size_t Size() const;
bool IsEmpty() const;
- bool Contains(Block) const;
-
void CollectStatistics(HeapStatistics::FreeListStatistics&);
+ bool ContainsForTesting(Block) const;
+
private:
class Entry;
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.cc b/deps/v8/src/heap/cppgc/gc-info-table.cc
index 384f8713efc..6b177848cbb 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.cc
+++ b/deps/v8/src/heap/cppgc/gc-info-table.cc
@@ -125,11 +125,19 @@ void GCInfoTable::CheckMemoryIsZeroed(uintptr_t* base, size_t len) {
#endif // DEBUG
}
-GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
+GCInfoIndex GCInfoTable::RegisterNewGCInfo(
+ std::atomic<GCInfoIndex>& registered_index, const GCInfo& info) {
// Ensuring a new index involves current index adjustment as well as
// potentially resizing the table. For simplicity we use a lock.
v8::base::MutexGuard guard(&table_mutex_);
+ // Check the registered index again after taking the lock as some other
+ // thread may have registered the info at the same time.
+ GCInfoIndex index = registered_index.load(std::memory_order_relaxed);
+ if (index) {
+ return index;
+ }
+
if (current_index_ == limit_) {
Resize();
}
@@ -137,6 +145,7 @@ GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
GCInfoIndex new_index = current_index_++;
CHECK_LT(new_index, GCInfoTable::kMaxIndex);
table_[new_index] = info;
+ registered_index.store(new_index, std::memory_order_release);
return new_index;
}
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index 61de294426b..3ab614fa850 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -54,7 +54,7 @@ class V8_EXPORT GCInfoTable final {
GCInfoTable(const GCInfoTable&) = delete;
GCInfoTable& operator=(const GCInfoTable&) = delete;
- GCInfoIndex RegisterNewGCInfo(const GCInfo& info);
+ GCInfoIndex RegisterNewGCInfo(std::atomic<uint16_t>&, const GCInfo& info);
const GCInfo& GCInfoFromIndex(GCInfoIndex index) const {
DCHECK_GE(index, kMinIndex);
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index 57d49fb322f..de57805dcbf 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -9,12 +9,14 @@
namespace cppgc {
namespace internal {
-RegisteredGCInfoIndex::RegisteredGCInfoIndex(
- FinalizationCallback finalization_callback, TraceCallback trace_callback,
- NameCallback name_callback, bool has_v_table)
- : index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, trace_callback, name_callback,
- has_v_table})) {}
+GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
+ FinalizationCallback finalization_callback,
+ TraceCallback trace_callback,
+ NameCallback name_callback, bool has_v_table) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, name_callback, has_v_table});
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 747b194fea6..bf1e215c229 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -9,6 +9,7 @@
#include <stdint.h>
#include "include/cppgc/internal/gc-info.h"
+#include "src/base/build_config.h"
namespace cppgc {
namespace internal {
@@ -30,9 +31,11 @@ enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
// This means that any scalar type with stricter alignment requirements (in
// practice: long double) cannot be used unrestricted in garbage-collected
// objects.
-//
-// Note: We use the same allocation granularity on 32-bit and 64-bit systems.
+#if defined(V8_TARGET_ARCH_64_BIT)
constexpr size_t kAllocationGranularity = 8;
+#else // !V8_TARGET_ARCH_64_BIT
+constexpr size_t kAllocationGranularity = 4;
+#endif // !V8_TARGET_ARCH_64_BIT
constexpr size_t kAllocationMask = kAllocationGranularity - 1;
constexpr size_t kPageSizeLog2 = 17;
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index 05cfb7fb470..f89c4c9f112 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -77,7 +77,7 @@ HeapBase::HeapBase(
compactor_(raw_heap_),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
- sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
+ sweeper_(*this),
stack_support_(stack_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
@@ -111,11 +111,12 @@ void HeapBase::Terminate() {
// Clear root sets.
strong_persistent_region_.ClearAllUsedNodes();
- strong_cross_thread_persistent_region_.ClearAllUsedNodes();
- // Clear weak root sets, as the GC below does not execute weakness
- // callbacks.
weak_persistent_region_.ClearAllUsedNodes();
- weak_cross_thread_persistent_region_.ClearAllUsedNodes();
+ {
+ PersistentRegionLock guard;
+ strong_cross_thread_persistent_region_.ClearAllUsedNodes();
+ weak_cross_thread_persistent_region_.ClearAllUsedNodes();
+ }
stats_collector()->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
@@ -131,6 +132,11 @@ void HeapBase::Terminate() {
object_allocator().Terminate();
disallow_gc_scope_++;
+
+ CHECK_EQ(0u, strong_persistent_region_.NodesInUse());
+ CHECK_EQ(0u, weak_persistent_region_.NodesInUse());
+ CHECK_EQ(0u, strong_cross_thread_persistent_region_.NodesInUse());
+ CHECK_EQ(0u, weak_cross_thread_persistent_region_.NodesInUse());
}
HeapStatistics HeapBase::CollectStatistics(
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 16441a59932..f9bdb95c04a 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -14,6 +14,7 @@
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/compactor.h"
+#include "src/heap/cppgc/garbage-collector.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
@@ -39,6 +40,7 @@ class NoGarbageCollectionScope;
} // namespace subtle
namespace testing {
+class Heap;
class OverrideEmbedderStackStateScope;
} // namespace testing
@@ -128,16 +130,18 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
const PersistentRegion& GetWeakPersistentRegion() const {
return weak_persistent_region_;
}
- PersistentRegion& GetStrongCrossThreadPersistentRegion() {
+ CrossThreadPersistentRegion& GetStrongCrossThreadPersistentRegion() {
return strong_cross_thread_persistent_region_;
}
- const PersistentRegion& GetStrongCrossThreadPersistentRegion() const {
+ const CrossThreadPersistentRegion& GetStrongCrossThreadPersistentRegion()
+ const {
return strong_cross_thread_persistent_region_;
}
- PersistentRegion& GetWeakCrossThreadPersistentRegion() {
+ CrossThreadPersistentRegion& GetWeakCrossThreadPersistentRegion() {
return weak_cross_thread_persistent_region_;
}
- const PersistentRegion& GetWeakCrossThreadPersistentRegion() const {
+ const CrossThreadPersistentRegion& GetWeakCrossThreadPersistentRegion()
+ const {
return weak_cross_thread_persistent_region_;
}
@@ -161,7 +165,21 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapStatistics CollectStatistics(HeapStatistics::DetailLevel);
+ EmbedderStackState stack_state_of_prev_gc() const {
+ return stack_state_of_prev_gc_;
+ }
+ void SetStackStateOfPrevGC(EmbedderStackState stack_state) {
+ stack_state_of_prev_gc_ = stack_state;
+ }
+
+ void SetInAtomicPauseForTesting(bool value) { in_atomic_pause_ = value; }
+
+ virtual void StartIncrementalGarbageCollectionForTesting() = 0;
+ virtual void FinalizeIncrementalGarbageCollectionForTesting(
+ EmbedderStackState) = 0;
+
protected:
+ // Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) = 0;
@@ -189,8 +207,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
PersistentRegion strong_persistent_region_;
PersistentRegion weak_persistent_region_;
- PersistentRegion strong_cross_thread_persistent_region_;
- PersistentRegion weak_cross_thread_persistent_region_;
+ CrossThreadPersistentRegion strong_cross_thread_persistent_region_;
+ CrossThreadPersistentRegion weak_cross_thread_persistent_region_;
ProcessHeapStatisticsUpdater::AllocationObserverImpl
allocation_observer_for_PROCESS_HEAP_STATISTICS_;
@@ -202,6 +220,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
size_t disallow_gc_scope_ = 0;
const StackSupport stack_support_;
+ EmbedderStackState stack_state_of_prev_gc_ =
+ EmbedderStackState::kNoHeapPointers;
std::unique_ptr<EmbedderStackState> override_stack_state_;
bool in_atomic_pause_ = false;
@@ -210,6 +230,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
friend class testing::TestWithHeap;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
friend class cppgc::subtle::NoGarbageCollectionScope;
+ friend class cppgc::testing::Heap;
friend class cppgc::testing::OverrideEmbedderStackStateScope;
};
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
index 4ed2cf73ba0..0f5530114cb 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.cc
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -7,6 +7,8 @@
#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -21,6 +23,13 @@ void HeapObjectHeader::CheckApiConstants() {
}
void HeapObjectHeader::Finalize() {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ const size_t size =
+ IsLargeObject()
+ ? LargePage::From(BasePage::FromPayload(this))->ObjectSize()
+ : ObjectSize();
+ ASAN_UNPOISON_MEMORY_REGION(Payload(), size);
+#endif // V8_USE_ADDRESS_SANITIZER
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
if (gc_info.finalize) {
gc_info.finalize(Payload());
@@ -32,10 +41,5 @@ HeapObjectName HeapObjectHeader::GetName() const {
return gc_info.name(Payload());
}
-void HeapObjectHeader::Trace(Visitor* visitor) const {
- const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
- return gc_info.trace(visitor, Payload());
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 45ff4aa00cd..7e940ca3477 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -37,16 +37,16 @@ namespace internal {
// | unused | 1 | |
// | in construction | 1 | In construction encoded as |false|. |
// +-----------------+------+------------------------------------------+
-// | size | 14 | 17 bits because allocations are aligned. |
-// | unused | 1 | |
+// | size | 15 | 17 bits because allocations are aligned. |
// | mark bit | 1 | |
// +-----------------+------+------------------------------------------+
//
// Notes:
// - See |GCInfoTable| for constraints on GCInfoIndex.
-// - |size| for regular objects is encoded with 14 bits but can actually
+// - |size| for regular objects is encoded with 15 bits but can actually
// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
-// always 8 byte aligned (see kAllocationGranularity).
+// always 4 byte aligned (see kAllocationGranularity) on 32bit. 64bit uses
+// 8 byte aligned allocations which leaves 1 bit unused.
// - |size| for large objects is encoded as 0. The size of a large object is
// stored in |LargeObjectPage::PayloadSize()|.
// - |mark bit| and |in construction| bits are located in separate 16-bit halves
@@ -64,6 +64,8 @@ class HeapObjectHeader {
// The payload starts directly after the HeapObjectHeader.
inline Address Payload() const;
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ inline Address PayloadEnd() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline GCInfoIndex GetGCInfoIndex() const;
@@ -73,7 +75,7 @@ class HeapObjectHeader {
inline void SetSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
- inline size_t PayloadSize() const;
+ inline size_t ObjectSize() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline bool IsLargeObject() const;
@@ -100,7 +102,8 @@ class HeapObjectHeader {
V8_EXPORT_PRIVATE HeapObjectName GetName() const;
- V8_EXPORT_PRIVATE void Trace(Visitor*) const;
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Trace(Visitor*) const;
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
@@ -111,18 +114,17 @@ class HeapObjectHeader {
using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
// Used in |encoded_low_|.
using MarkBitField = v8::base::BitField16<bool, 0, 1>;
- using UnusedField2 = MarkBitField::Next<bool, 1>;
using SizeField = void; // Use EncodeSize/DecodeSize instead.
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
- using SizeField = UnusedField2::Next<size_t, 14>;
+ using SizeField = MarkBitField::Next<size_t, 15>;
return SizeField::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
- using SizeField = UnusedField2::Next<size_t, 14>;
+ using SizeField = MarkBitField::Next<size_t, 15>;
return SizeField::encode(size / kAllocationGranularity);
}
@@ -142,6 +144,10 @@ class HeapObjectHeader {
uint16_t encoded_low_;
};
+static_assert(kAllocationGranularity == sizeof(HeapObjectHeader),
+ "sizeof(HeapObjectHeader) must match allocation granularity to "
+ "guarantee alignment");
+
// static
HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
@@ -183,6 +189,13 @@ Address HeapObjectHeader::Payload() const {
}
template <AccessMode mode>
+Address HeapObjectHeader::PayloadEnd() const {
+ DCHECK(!IsLargeObject());
+ return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
+ GetSize<mode>();
+}
+
+template <AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
@@ -201,11 +214,11 @@ size_t HeapObjectHeader::GetSize() const {
void HeapObjectHeader::SetSize(size_t size) {
DCHECK(!IsMarked());
- encoded_low_ |= EncodeSize(size);
+ encoded_low_ = EncodeSize(size);
}
template <AccessMode mode>
-size_t HeapObjectHeader::PayloadSize() const {
+size_t HeapObjectHeader::ObjectSize() const {
return GetSize<mode>() - sizeof(HeapObjectHeader);
}
@@ -265,6 +278,13 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
+template <AccessMode mode>
+void HeapObjectHeader::Trace(Visitor* visitor) const {
+ const GCInfo& gc_info =
+ GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex<mode>());
+ return gc_info.trace(visitor, Payload());
+}
+
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index 5e238e5bb7e..1a66b8593e6 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -210,6 +210,7 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
ConstAddress PayloadEnd() const;
size_t PayloadSize() const { return payload_size_; }
+ size_t ObjectSize() const { return payload_size_ - sizeof(HeapObjectHeader); }
bool PayloadContains(ConstAddress address) const {
return (PayloadStart() <= address) && (address < PayloadEnd());
diff --git a/deps/v8/src/heap/cppgc/heap-state.cc b/deps/v8/src/heap/cppgc/heap-state.cc
index 32084697c1f..364f03c643f 100644
--- a/deps/v8/src/heap/cppgc/heap-state.cc
+++ b/deps/v8/src/heap/cppgc/heap-state.cc
@@ -11,21 +11,25 @@ namespace subtle {
// static
bool HeapState::IsMarking(const HeapHandle& heap_handle) {
- const auto& heap_base = internal::HeapBase::From(heap_handle);
- const internal::MarkerBase* marker = heap_base.marker();
+ const internal::MarkerBase* marker =
+ internal::HeapBase::From(heap_handle).marker();
return marker && marker->IsMarking();
}
// static
bool HeapState::IsSweeping(const HeapHandle& heap_handle) {
- const auto& heap_base = internal::HeapBase::From(heap_handle);
- return heap_base.sweeper().IsSweepingInProgress();
+ return internal::HeapBase::From(heap_handle).sweeper().IsSweepingInProgress();
}
// static
bool HeapState::IsInAtomicPause(const HeapHandle& heap_handle) {
- const auto& heap_base = internal::HeapBase::From(heap_handle);
- return heap_base.in_atomic_pause();
+ return internal::HeapBase::From(heap_handle).in_atomic_pause();
+}
+
+// static
+bool HeapState::PreviousGCWasConservative(const HeapHandle& heap_handle) {
+ return internal::HeapBase::From(heap_handle).stack_state_of_prev_gc() ==
+ EmbedderStackState::kMayContainHeapPointers;
}
} // namespace subtle
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 875eb198897..c0c9cec9292 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -115,10 +115,10 @@ void Heap::CollectGarbage(Config config) {
config_ = config;
- if (!IsMarking()) StartGarbageCollection(config);
-
+ if (!IsMarking()) {
+ StartGarbageCollection(config);
+ }
DCHECK(IsMarking());
-
FinalizeGarbageCollection(config.stack_state);
}
@@ -209,5 +209,22 @@ void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
FinalizeGarbageCollection(stack_state);
}
+void Heap::StartIncrementalGarbageCollectionForTesting() {
+ DCHECK(!IsMarking());
+ DCHECK(!in_no_gc_scope());
+ StartGarbageCollection({Config::CollectionType::kMajor,
+ Config::StackState::kNoHeapPointers,
+ Config::MarkingType::kIncrementalAndConcurrent,
+ Config::SweepingType::kIncrementalAndConcurrent});
+}
+
+void Heap::FinalizeIncrementalGarbageCollectionForTesting(
+ EmbedderStackState stack_state) {
+ DCHECK(!in_no_gc_scope());
+ DCHECK(IsMarking());
+ FinalizeGarbageCollection(stack_state);
+ sweeper_.FinishIfRunning();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index 41ef0cfd1c3..b57e40b13b1 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -46,6 +46,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void FinalizeIncrementalGarbageCollectionIfNeeded(Config::StackState) final;
+ void StartIncrementalGarbageCollectionForTesting() final;
+ void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
+
Config config_;
GCInvoker gc_invoker_;
HeapGrowing growing_;
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index b4f8cf2366c..d30bb0a8ec2 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -275,6 +275,7 @@ void MarkerBase::LeaveAtomicPause() {
ProcessWeakness();
}
g_process_mutex.Pointer()->Unlock();
+ heap().SetStackStateOfPrevGC(config_.stack_state);
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
@@ -508,9 +509,9 @@ void MarkerBase::MarkNotFullyConstructedObjects() {
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) {
DCHECK(object);
- if (!mutator_marking_state_.MarkNoPush(*object)) continue;
- // TraceConservativelyIfNeeded will either push to a worklist
- // or trace conservatively and call AccountMarkedBytes.
+ // TraceConservativelyIfNeeded delegates to either in-construction or
+ // fully constructed handling. Both handlers have their own marked bytes
+ // accounting and markbit handling (bailout).
conservative_visitor().TraceConservativelyIfNeeded(*object);
}
}
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 777b396f008..6e08fc3e10e 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -451,9 +451,7 @@ void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
DCHECK(header.IsMarked<mode>());
- const GCInfo& gcinfo =
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>());
- gcinfo.trace(&visitor, header.Payload());
+ header.Trace<mode>(&visitor);
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 76f39230edb..42e3c4eb3e8 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -95,7 +95,7 @@ class VerificationVisitor final : public cppgc::Visitor {
void VisitWeakContainer(const void* object, TraceDescriptor,
TraceDescriptor weak_desc, WeakCallback,
- const void*) {
+ const void*) final {
if (!object) return;
// Contents of weak containers are found themselves through page iteration
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 60ad19a9849..366900b0f92 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -100,6 +100,8 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
} // namespace
+constexpr size_t ObjectAllocator::kSmallestSpaceSize;
+
ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
StatsCollector* stats_collector)
: raw_heap_(heap),
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 1768a638ea8..56faef1c833 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -31,6 +31,8 @@ class PageBackend;
class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
+ static constexpr size_t kSmallestSpaceSize = 32;
+
ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
StatsCollector* stats_collector);
@@ -85,8 +87,10 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
// static
RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
size_t size) {
+ static_assert(kSmallestSpaceSize == 32,
+ "should be half the next larger size");
if (size < 64) {
- if (size < 32) return RawHeap::RegularSpaceType::kNormal1;
+ if (size < kSmallestSpaceSize) return RawHeap::RegularSpaceType::kNormal1;
return RawHeap::RegularSpaceType::kNormal2;
}
if (size < 128) return RawHeap::RegularSpaceType::kNormal3;
diff --git a/deps/v8/src/heap/cppgc/object-poisoner.h b/deps/v8/src/heap/cppgc/object-poisoner.h
new file mode 100644
index 00000000000..fd2462d6694
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-poisoner.h
@@ -0,0 +1,40 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_POISONER_H_
+#define V8_HEAP_CPPGC_OBJECT_POISONER_H_
+
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+
+// Poisons the payload of unmarked objects.
+class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
+ friend class HeapVisitor<UnmarkedObjectsPoisoner>;
+
+ private:
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree() || header->IsMarked()) return true;
+
+ const size_t size =
+ header->IsLargeObject()
+ ? LargePage::From(BasePage::FromPayload(header))->ObjectSize()
+ : header->ObjectSize();
+ ASAN_POISON_MEMORY_REGION(header->Payload(), size);
+ return true;
+ }
+};
+
+#endif // V8_USE_ADDRESS_SANITIZER
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_POISONER_H_
diff --git a/deps/v8/src/heap/cppgc/object-size-trait.cc b/deps/v8/src/heap/cppgc/object-size-trait.cc
index bd0dd3d6407..11c50b3c4d1 100644
--- a/deps/v8/src/heap/cppgc/object-size-trait.cc
+++ b/deps/v8/src/heap/cppgc/object-size-trait.cc
@@ -16,8 +16,8 @@ size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
const auto& header = HeapObjectHeader::FromPayload(object);
return header.IsLargeObject()
? static_cast<const LargePage*>(BasePage::FromPayload(&header))
- ->PayloadSize()
- : header.PayloadSize();
+ ->ObjectSize()
+ : header.ObjectSize();
}
// static
@@ -29,7 +29,7 @@ size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollectedMixin(
BasePage::FromPayload(address)
->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
DCHECK(!header.IsLargeObject());
- return header.PayloadSize();
+ return header.ObjectSize();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index db3a01cafd5..ff3c17d37fb 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -95,5 +95,26 @@ void PersistentRegionLock::AssertLocked() {
return g_process_mutex.Pointer()->AssertHeld();
}
+CrossThreadPersistentRegion::~CrossThreadPersistentRegion() {
+ PersistentRegionLock guard;
+ persistent_region_.ClearAllUsedNodes();
+ persistent_region_.nodes_.clear();
+}
+
+void CrossThreadPersistentRegion::Trace(Visitor* visitor) {
+ PersistentRegionLock::AssertLocked();
+ return persistent_region_.Trace(visitor);
+}
+
+size_t CrossThreadPersistentRegion::NodesInUse() const {
+ // This method does not require a lock.
+ return persistent_region_.NodesInUse();
+}
+
+void CrossThreadPersistentRegion::ClearAllUsedNodes() {
+ PersistentRegionLock::AssertLocked();
+ return persistent_region_.ClearAllUsedNodes();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 4544763bf35..fdc435af17c 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -33,14 +33,14 @@ PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(
return heap->GetWeakPersistentRegion();
}
-PersistentRegion& StrongCrossThreadPersistentPolicy::GetPersistentRegion(
- const void* object) {
+CrossThreadPersistentRegion&
+StrongCrossThreadPersistentPolicy::GetPersistentRegion(const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetStrongCrossThreadPersistentRegion();
}
-PersistentRegion& WeakCrossThreadPersistentPolicy::GetPersistentRegion(
- const void* object) {
+CrossThreadPersistentRegion&
+WeakCrossThreadPersistentPolicy::GetPersistentRegion(const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetWeakCrossThreadPersistentRegion();
}
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 677216f7723..135ccc12a75 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -7,7 +7,9 @@
#include <algorithm>
#include <cmath>
+#include "src/base/atomicops.h"
#include "src/base/logging.h"
+#include "src/base/platform/time.h"
#include "src/heap/cppgc/metric-recorder.h"
namespace cppgc {
@@ -55,6 +57,10 @@ void StatsCollector::NotifySafePointForConservativeCollection() {
}
}
+void StatsCollector::NotifySafePointForTesting() {
+ AllocatedObjectSizeSafepointImpl();
+}
+
void StatsCollector::AllocatedObjectSizeSafepointImpl() {
allocated_bytes_since_end_of_marking_ +=
static_cast<int64_t>(allocated_bytes_since_safepoint_) -
@@ -218,7 +224,7 @@ void StatsCollector::NotifySweepingCompleted() {
}
size_t StatsCollector::allocated_memory_size() const {
- return memory_allocated_bytes_;
+ return memory_allocated_bytes_ - memory_freed_bytes_since_end_of_marking_;
}
size_t StatsCollector::allocated_object_size() const {
@@ -234,6 +240,28 @@ size_t StatsCollector::allocated_object_size() const {
allocated_bytes_since_end_of_marking_);
}
+size_t StatsCollector::marked_bytes() const {
+ DCHECK_NE(GarbageCollectionState::kMarking, gc_state_);
+ // During sweeping we refer to the current Event as that already holds the
+ // correct marking information. In all other phases, the previous event holds
+ // the most up-to-date marking information.
+ const Event& event =
+ gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
+ return event.marked_bytes;
+}
+
+v8::base::TimeDelta StatsCollector::marking_time() const {
+ DCHECK_NE(GarbageCollectionState::kMarking, gc_state_);
+ // During sweeping we refer to the current Event as that already holds the
+ // correct marking information. In all other phases, the previous event holds
+ // the most up-to-date marking information.
+ const Event& event =
+ gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
+ return event.scope_data[kAtomicMark] + event.scope_data[kIncrementalMark] +
+ v8::base::TimeDelta::FromMicroseconds(v8::base::Relaxed_Load(
+ &event.concurrent_scope_data[kConcurrentMark]));
+}
+
void StatsCollector::NotifyAllocatedMemory(int64_t size) {
memory_allocated_bytes_ += size;
ForAllAllocationObservers([size](AllocationObserver* observer) {
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index c9945e28d57..2a8583c7304 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -261,6 +261,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// their actual allocation/reclamation as possible.
void NotifySafePointForConservativeCollection();
+ void NotifySafePointForTesting();
+
// Indicates a new garbage collection cycle.
void NotifyMarkingStarted(CollectionType, IsForcedGC);
// Indicates that marking of the current garbage collection cycle is
@@ -275,6 +277,13 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// bytes and the bytes allocated since last marking.
size_t allocated_object_size() const;
+ // Returns the most recent marked bytes count. Should not be called during
+ // marking.
+ size_t marked_bytes() const;
+ // Returns the overall duration of the most recent marking phase. Should not
+ // be called during marking.
+ v8::base::TimeDelta marking_time() const;
+
double GetRecentAllocationSpeedInBytesPerMs() const;
const Event& GetPreviousEventForTesting() const { return previous_; }
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 573838b4c46..937a52afc59 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sanitizers.h"
@@ -160,6 +161,9 @@ class DeferredFinalizationBuilder final {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
+ // Unmarked memory may have been poisoned. In the non-concurrent case this
+ // is taken care of by finalizing a header.
+ ASAN_UNPOISON_MEMORY_REGION(header, size);
SET_MEMORY_INACCESSIBLE(header, size);
}
}
@@ -478,7 +482,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
};
// This visitor:
-// - resets linear allocation buffers and clears free lists for all spaces;
+// - clears free lists for all spaces;
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
@@ -497,11 +501,17 @@ class PrepareForSweepVisitor final
return true;
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
+#ifdef V8_USE_ADDRESS_SANITIZER
+ UnmarkedObjectsPoisoner().Traverse(space);
+#endif // V8_USE_ADDRESS_SANITIZER
ExtractPages(space);
return true;
}
bool VisitLargePageSpace(LargePageSpace* space) {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ UnmarkedObjectsPoisoner().Traverse(space);
+#endif // V8_USE_ADDRESS_SANITIZER
ExtractPages(space);
return true;
}
@@ -521,25 +531,24 @@ class PrepareForSweepVisitor final
class Sweeper::SweeperImpl final {
public:
- SweeperImpl(RawHeap* heap, cppgc::Platform* platform,
- StatsCollector* stats_collector)
+ SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
: heap_(heap),
stats_collector_(stats_collector),
- space_states_(heap->size()),
- platform_(platform) {}
+ space_states_(heap.size()) {}
~SweeperImpl() { CancelSweepers(); }
- void Start(SweepingConfig config) {
- StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
+ void Start(SweepingConfig config, cppgc::Platform* platform) {
+ StatsCollector::EnabledScope stats_scope(stats_collector_,
StatsCollector::kAtomicSweep);
is_in_progress_ = true;
+ platform_ = platform;
#if DEBUG
// Verify bitmap for all spaces regardless of |compactable_space_handling|.
- ObjectStartBitmapVerifier().Verify(heap_);
+ ObjectStartBitmapVerifier().Verify(&heap_);
#endif
PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
- .Traverse(heap_);
+ .Traverse(&heap_);
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
@@ -558,10 +567,10 @@ class Sweeper::SweeperImpl final {
// allocate new memory.
if (is_sweeping_on_mutator_thread_) return false;
- StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
+ StatsCollector::EnabledScope stats_scope(stats_collector_,
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
- heap_->heap()->stats_collector(), StatsCollector::kSweepOnAllocation);
+ stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
SpaceState& space_state = space_states_[space->index()];
@@ -597,8 +606,8 @@ class Sweeper::SweeperImpl final {
{
StatsCollector::EnabledScope stats_scope(
- heap_->heap()->stats_collector(), StatsCollector::kIncrementalSweep);
- StatsCollector::EnabledScope inner_scope(heap_->heap()->stats_collector(),
+ stats_collector_, StatsCollector::kIncrementalSweep);
+ StatsCollector::EnabledScope inner_scope(stats_collector_,
StatsCollector::kSweepFinalize);
if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
@@ -629,6 +638,7 @@ class Sweeper::SweeperImpl final {
void FinalizeSweep() {
// Synchronize with the concurrent sweeper and call remaining finalizers.
SynchronizeAndFinalizeConcurrentSweeping();
+ platform_ = nullptr;
is_in_progress_ = false;
notify_done_pending_ = true;
}
@@ -698,15 +708,14 @@ class Sweeper::SweeperImpl final {
bool sweep_complete;
{
StatsCollector::EnabledScope stats_scope(
- sweeper_->heap_->heap()->stats_collector(),
- StatsCollector::kIncrementalSweep);
+ sweeper_->stats_collector_, StatsCollector::kIncrementalSweep);
MutatorThreadSweeper sweeper(&sweeper_->space_states_,
sweeper_->platform_);
{
StatsCollector::EnabledScope stats_scope(
- sweeper_->heap_->heap()->stats_collector(),
- StatsCollector::kSweepIdleStep, "idleDeltaInSeconds",
+ sweeper_->stats_collector_, StatsCollector::kSweepIdleStep,
+ "idleDeltaInSeconds",
(deadline_in_seconds -
sweeper_->platform_->MonotonicallyIncreasingTime()));
@@ -742,7 +751,7 @@ class Sweeper::SweeperImpl final {
concurrent_sweeper_handle_ = platform_->PostJob(
cppgc::TaskPriority::kUserVisible,
- std::make_unique<ConcurrentSweepTask>(*heap_->heap(), &space_states_));
+ std::make_unique<ConcurrentSweepTask>(*heap_.heap(), &space_states_));
}
void CancelSweepers() {
@@ -758,8 +767,8 @@ class Sweeper::SweeperImpl final {
finalizer.FinalizeHeap(&space_states_);
}
- RawHeap* heap_;
- StatsCollector* stats_collector_;
+ RawHeap& heap_;
+ StatsCollector* const stats_collector_;
SpaceStates space_states_;
cppgc::Platform* platform_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
@@ -772,13 +781,16 @@ class Sweeper::SweeperImpl final {
bool is_sweeping_on_mutator_thread_ = false;
};
-Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
- StatsCollector* stats_collector)
- : impl_(std::make_unique<SweeperImpl>(heap, platform, stats_collector)) {}
+Sweeper::Sweeper(HeapBase& heap)
+ : heap_(heap),
+ impl_(std::make_unique<SweeperImpl>(heap.raw_heap(),
+ heap.stats_collector())) {}
Sweeper::~Sweeper() = default;
-void Sweeper::Start(SweepingConfig config) { impl_->Start(config); }
+void Sweeper::Start(SweepingConfig config) {
+ impl_->Start(config, heap_.platform());
+}
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 7d6ffc25871..4c77ec69173 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -16,8 +16,7 @@ class Platform;
namespace internal {
-class StatsCollector;
-class RawHeap;
+class HeapBase;
class ConcurrentSweeperTest;
class NormalPageSpace;
@@ -32,7 +31,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
CompactableSpaceHandling::kSweep;
};
- Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
+ explicit Sweeper(HeapBase&);
~Sweeper();
Sweeper(const Sweeper&) = delete;
@@ -54,6 +53,8 @@ class V8_EXPORT_PRIVATE Sweeper final {
void WaitForConcurrentSweepingForTesting();
class SweeperImpl;
+
+ HeapBase& heap_;
std::unique_ptr<SweeperImpl> impl_;
friend class ConcurrentSweeperTest;
diff --git a/deps/v8/src/heap/cppgc/testing.cc b/deps/v8/src/heap/cppgc/testing.cc
index bd72a3dfe19..0c81d7003b3 100644
--- a/deps/v8/src/heap/cppgc/testing.cc
+++ b/deps/v8/src/heap/cppgc/testing.cc
@@ -19,8 +19,39 @@ OverrideEmbedderStackStateScope::OverrideEmbedderStackStateScope(
}
OverrideEmbedderStackStateScope::~OverrideEmbedderStackStateScope() {
- auto& heap = internal::HeapBase::From(heap_handle_);
- heap.override_stack_state_.reset();
+ internal::HeapBase::From(heap_handle_).override_stack_state_.reset();
+}
+
+StandaloneTestingHeap::StandaloneTestingHeap(HeapHandle& heap_handle)
+ : heap_handle_(heap_handle) {}
+
+void StandaloneTestingHeap::StartGarbageCollection() {
+ internal::HeapBase::From(heap_handle_)
+ .StartIncrementalGarbageCollectionForTesting();
+}
+
+bool StandaloneTestingHeap::PerformMarkingStep(EmbedderStackState stack_state) {
+ return internal::HeapBase::From(heap_handle_)
+ .marker()
+ ->IncrementalMarkingStepForTesting(stack_state);
+}
+
+void StandaloneTestingHeap::FinalizeGarbageCollection(
+ EmbedderStackState stack_state) {
+ internal::HeapBase::From(heap_handle_)
+ .FinalizeIncrementalGarbageCollectionForTesting(stack_state);
+}
+
+void StandaloneTestingHeap::ToggleMainThreadMarking(bool should_mark) {
+ internal::HeapBase::From(heap_handle_)
+ .marker()
+ ->SetMainThreadMarkingDisabledForTesting(!should_mark);
+}
+
+void StandaloneTestingHeap::ForceCompactionForNextGarbageCollection() {
+ internal::HeapBase::From(heap_handle_)
+ .compactor()
+ .EnableForNextGCForTesting();
}
} // namespace testing
diff --git a/deps/v8/src/heap/cppgc/trace-trait.cc b/deps/v8/src/heap/cppgc/trace-trait.cc
index 9f410b9c128..bf3759881b4 100644
--- a/deps/v8/src/heap/cppgc/trace-trait.cc
+++ b/deps/v8/src/heap/cppgc/trace-trait.cc
@@ -14,9 +14,10 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
const void* address) {
// address is guaranteed to be on a normal page because this is used only for
// mixins.
+ const BasePage* page = BasePage::FromPayload(address);
+ page->SynchronizedLoad();
const HeapObjectHeader& header =
- BasePage::FromPayload(address)
- ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
+ page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
header.GetGCInfoIndex<AccessMode::kAtomic>())
.trace};
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index c83cbcb5a26..4fd747a964e 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -34,14 +34,19 @@ void LocalEmbedderHeapTracer::TraceEpilogue() {
EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary);
- remote_stats_.used_size = summary.allocated_size;
+ if (summary.allocated_size == SIZE_MAX) return;
+ UpdateRemoteStats(summary.allocated_size, summary.time);
+}
+
+void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
+ double time) {
+ remote_stats_.used_size = allocated_size;
// Force a check next time increased memory is reported. This allows for
// setting limits close to actual heap sizes.
remote_stats_.allocated_size_limit_for_check = 0;
constexpr double kMinReportingTimeMs = 0.5;
- if (summary.time > kMinReportingTimeMs) {
- isolate_->heap()->tracer()->RecordEmbedderSpeed(summary.allocated_size,
- summary.time);
+ if (time > kMinReportingTimeMs) {
+ isolate_->heap()->tracer()->RecordEmbedderSpeed(allocated_size, time);
}
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 80b98394b57..8a1b14a32b4 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -128,6 +128,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
wrapper_descriptor_ = wrapper_descriptor;
}
+ void UpdateRemoteStats(size_t, double);
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index bc6d60edac3..45577f7bf91 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
+#include "src/common/assert-scope.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
@@ -19,6 +20,7 @@
#include "src/objects/module-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
#include "src/objects/string.h"
@@ -51,11 +53,17 @@ FactoryBase<LocalFactory>::NewHeapNumber<AllocationType::kOld>();
template <typename Impl>
Handle<Struct> FactoryBase<Impl>::NewStruct(InstanceType type,
AllocationType allocation) {
+ return handle(NewStructInternal(type, allocation), isolate());
+}
+
+template <typename Impl>
+Struct FactoryBase<Impl>::NewStructInternal(InstanceType type,
+ AllocationType allocation) {
Map map = Map::GetInstanceTypeMap(read_only_roots(), type);
int size = map.instance_size();
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
- Handle<Struct> str = handle(Struct::cast(result), isolate());
- str->InitializeBody(size);
+ Struct str = Struct::cast(result);
+ str.InitializeBody(size);
return str;
}
@@ -63,8 +71,10 @@ template <typename Impl>
Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
- accessors->set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
- accessors->set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ AccessorPair raw = *accessors;
+ DisallowGarbageCollection no_gc;
+ raw.set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ raw.set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
return accessors;
}
@@ -103,13 +113,14 @@ Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithFiller(
Handle<Map> map, int length, Handle<Oddball> filler,
AllocationType allocation) {
HeapObject result = AllocateRawFixedArray(length, allocation);
+ DisallowGarbageCollection no_gc;
DCHECK(ReadOnlyHeap::Contains(*map));
DCHECK(ReadOnlyHeap::Contains(*filler));
result.set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
- Handle<FixedArray> array = handle(FixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(array->data_start(), *filler, length);
- return array;
+ FixedArray array = FixedArray::cast(result);
+ array.set_length(length);
+ MemsetTagged(array.data_start(), *filler, length);
+ return handle(array, isolate());
}
template <typename Impl>
@@ -123,10 +134,10 @@ Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
Map map = read_only_roots().fixed_double_array_map();
HeapObject result =
AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned);
- Handle<FixedDoubleArray> array =
- handle(FixedDoubleArray::cast(result), isolate());
- array->set_length(length);
- return array;
+ DisallowGarbageCollection no_gc;
+ FixedDoubleArray array = FixedDoubleArray::cast(result);
+ array.set_length(length);
+ return handle(array, isolate());
}
template <typename Impl>
@@ -139,14 +150,13 @@ Handle<WeakFixedArray> FactoryBase<Impl>::NewWeakFixedArrayWithMap(
HeapObject result =
AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
-
- Handle<WeakFixedArray> array =
- handle(WeakFixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(ObjectSlot(array->data_start()),
+ DisallowGarbageCollection no_gc;
+ WeakFixedArray array = WeakFixedArray::cast(result);
+ array.set_length(length);
+ MemsetTagged(ObjectSlot(array.data_start()),
read_only_roots().undefined_value(), length);
- return array;
+ return handle(array, isolate());
}
template <typename Impl>
@@ -167,10 +177,11 @@ Handle<ByteArray> FactoryBase<Impl>::NewByteArray(int length,
int size = ByteArray::SizeFor(length);
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().byte_array_map());
- Handle<ByteArray> array(ByteArray::cast(result), isolate());
- array->set_length(length);
- array->clear_padding();
- return array;
+ DisallowGarbageCollection no_gc;
+ ByteArray array = ByteArray::cast(result);
+ array.set_length(length);
+ array.clear_padding();
+ return handle(array, isolate());
}
template <typename Impl>
@@ -187,23 +198,24 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
int size = BytecodeArray::SizeFor(length);
HeapObject result = AllocateRawWithImmortalMap(
size, AllocationType::kOld, read_only_roots().bytecode_array_map());
- Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
- instance->set_length(length);
- instance->set_frame_size(frame_size);
- instance->set_parameter_count(parameter_count);
- instance->set_incoming_new_target_or_generator_register(
+ DisallowGarbageCollection no_gc;
+ BytecodeArray instance = BytecodeArray::cast(result);
+ instance.set_length(length);
+ instance.set_frame_size(frame_size);
+ instance.set_parameter_count(parameter_count);
+ instance.set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_osr_loop_nesting_level(0);
- instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
- instance->set_constant_pool(*constant_pool);
- instance->set_handler_table(read_only_roots().empty_byte_array());
- instance->set_source_position_table(read_only_roots().undefined_value(),
- kReleaseStore);
- CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
+ instance.set_osr_loop_nesting_level(0);
+ instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
+ instance.set_constant_pool(*constant_pool);
+ instance.set_handler_table(read_only_roots().empty_byte_array(),
+ SKIP_WRITE_BARRIER);
+ instance.set_source_position_table(read_only_roots().undefined_value(),
+ kReleaseStore, SKIP_WRITE_BARRIER);
+ CopyBytes(reinterpret_cast<byte*>(instance.GetFirstBytecodeAddress()),
raw_bytecodes, length);
- instance->clear_padding();
-
- return instance;
+ instance.clear_padding();
+ return handle(instance, isolate());
}
template <typename Impl>
@@ -220,20 +232,25 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
ReadOnlyRoots roots = read_only_roots();
Handle<Script> script =
Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
- script->set_source(*source);
- script->set_name(roots.undefined_value());
- script->set_id(script_id);
- script->set_line_offset(0);
- script->set_column_offset(0);
- script->set_context_data(roots.undefined_value());
- script->set_type(Script::TYPE_NORMAL);
- script->set_line_ends(roots.undefined_value());
- script->set_eval_from_shared_or_wrapped_arguments(roots.undefined_value());
- script->set_eval_from_position(0);
- script->set_shared_function_infos(roots.empty_weak_fixed_array(),
- SKIP_WRITE_BARRIER);
- script->set_flags(0);
- script->set_host_defined_options(roots.empty_fixed_array());
+ {
+ DisallowGarbageCollection no_gc;
+ Script raw = *script;
+ raw.set_source(*source);
+ raw.set_name(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ raw.set_id(script_id);
+ raw.set_line_offset(0);
+ raw.set_column_offset(0);
+ raw.set_context_data(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ raw.set_type(Script::TYPE_NORMAL);
+ raw.set_line_ends(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ raw.set_eval_from_shared_or_wrapped_arguments(roots.undefined_value(),
+ SKIP_WRITE_BARRIER);
+ raw.set_eval_from_position(0);
+ raw.set_shared_function_infos(roots.empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ raw.set_flags(0);
+ raw.set_host_defined_options(roots.empty_fixed_array(), SKIP_WRITE_BARRIER);
+ }
if (script_id != Script::kTemporaryScriptId) {
impl()->AddToScriptList(script);
@@ -261,16 +278,15 @@ template <typename Impl>
Handle<PreparseData> FactoryBase<Impl>::NewPreparseData(int data_length,
int children_length) {
int size = PreparseData::SizeFor(data_length, children_length);
- Handle<PreparseData> result = handle(
- PreparseData::cast(AllocateRawWithImmortalMap(
- size, AllocationType::kOld, read_only_roots().preparse_data_map())),
- isolate());
- result->set_data_length(data_length);
- result->set_children_length(children_length);
- MemsetTagged(result->inner_data_start(), read_only_roots().null_value(),
+ PreparseData result = PreparseData::cast(AllocateRawWithImmortalMap(
+ size, AllocationType::kOld, read_only_roots().preparse_data_map()));
+ DisallowGarbageCollection no_gc;
+ result.set_data_length(data_length);
+ result.set_children_length(children_length);
+ MemsetTagged(result.inner_data_start(), read_only_roots().null_value(),
children_length);
- result->clear_padding();
- return result;
+ result.clear_padding();
+ return handle(result, isolate());
}
template <typename Impl>
@@ -297,15 +313,16 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo();
-
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo raw = *shared;
// Function names are assumed to be flat elsewhere.
Handle<String> shared_name;
bool has_shared_name = maybe_name.ToHandle(&shared_name);
if (has_shared_name) {
DCHECK(shared_name->IsFlat());
- shared->set_name_or_scope_info(*shared_name, kReleaseStore);
+ raw.set_name_or_scope_info(*shared_name, kReleaseStore);
} else {
- DCHECK_EQ(shared->name_or_scope_info(kAcquireLoad),
+ DCHECK_EQ(raw.name_or_scope_info(kAcquireLoad),
SharedFunctionInfo::kNoSharedNameSentinel);
}
@@ -316,19 +333,19 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
DCHECK_IMPLIES(function_data->IsCode(),
!Code::cast(*function_data).is_builtin());
- shared->set_function_data(*function_data, kReleaseStore);
+ raw.set_function_data(*function_data, kReleaseStore);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
- shared->set_builtin_id(maybe_builtin_index);
+ raw.set_builtin_id(maybe_builtin_index);
} else {
- DCHECK(shared->HasBuiltinId());
- DCHECK_EQ(Builtins::kIllegal, shared->builtin_id());
+ DCHECK(raw.HasBuiltinId());
+ DCHECK_EQ(Builtins::kIllegal, raw.builtin_id());
}
- shared->CalculateConstructAsBuiltin();
- shared->set_kind(kind);
+ raw.CalculateConstructAsBuiltin();
+ raw.set_kind(kind);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) shared->SharedFunctionInfoVerify(isolate());
+ if (FLAG_verify_heap) raw.SharedFunctionInfoVerify(isolate());
#endif // VERIFY_HEAP
return shared;
}
@@ -380,8 +397,10 @@ FactoryBase<Impl>::NewArrayBoilerplateDescription(
Handle<ArrayBoilerplateDescription> result =
Handle<ArrayBoilerplateDescription>::cast(
NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
- result->set_elements_kind(elements_kind);
- result->set_constant_elements(*constant_values);
+ DisallowGarbageCollection no_gc;
+ ArrayBoilerplateDescription raw = *result;
+ raw.set_elements_kind(elements_kind);
+ raw.set_constant_elements(*constant_values);
return result;
}
@@ -393,9 +412,11 @@ FactoryBase<Impl>::NewRegExpBoilerplateDescription(Handle<FixedArray> data,
Handle<RegExpBoilerplateDescription> result =
Handle<RegExpBoilerplateDescription>::cast(NewStruct(
REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
- result->set_data(*data);
- result->set_source(*source);
- result->set_flags(flags.value());
+ DisallowGarbageCollection no_gc;
+ RegExpBoilerplateDescription raw = *result;
+ raw.set_data(*data);
+ raw.set_source(*source);
+ raw.set_flags(flags.value());
return result;
}
@@ -408,8 +429,10 @@ FactoryBase<Impl>::NewTemplateObjectDescription(
Handle<TemplateObjectDescription> result =
Handle<TemplateObjectDescription>::cast(
NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
- result->set_raw_strings(*raw_strings);
- result->set_cooked_strings(*cooked_strings);
+ DisallowGarbageCollection no_gc;
+ TemplateObjectDescription raw = *result;
+ raw.set_raw_strings(*raw_strings);
+ raw.set_cooked_strings(*cooked_strings);
return result;
}
@@ -418,19 +441,18 @@ Handle<FeedbackMetadata> FactoryBase<Impl>::NewFeedbackMetadata(
int slot_count, int create_closure_slot_count, AllocationType allocation) {
DCHECK_LE(0, slot_count);
int size = FeedbackMetadata::SizeFor(slot_count);
- HeapObject result = AllocateRawWithImmortalMap(
- size, allocation, read_only_roots().feedback_metadata_map());
- Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
- data->set_slot_count(slot_count);
- data->set_create_closure_slot_count(create_closure_slot_count);
+ FeedbackMetadata result = FeedbackMetadata::cast(AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().feedback_metadata_map()));
+ result.set_slot_count(slot_count);
+ result.set_create_closure_slot_count(create_closure_slot_count);
// Initialize the data section to 0.
int data_size = size - FeedbackMetadata::kHeaderSize;
- Address data_start = data->address() + FeedbackMetadata::kHeaderSize;
+ Address data_start = result.address() + FeedbackMetadata::kHeaderSize;
memset(reinterpret_cast<byte*>(data_start), 0, data_size);
// Fields have been zeroed out but not initialized, so this object will not
// pass object verification at this point.
- return data;
+ return handle(result, isolate());
}
template <typename Impl>
@@ -440,17 +462,14 @@ Handle<CoverageInfo> FactoryBase<Impl>::NewCoverageInfo(
int size = CoverageInfo::SizeFor(slot_count);
Map map = read_only_roots().coverage_info_map();
- HeapObject result =
- AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
- Handle<CoverageInfo> info(CoverageInfo::cast(result), isolate());
-
- info->set_slot_count(slot_count);
+ CoverageInfo info = CoverageInfo::cast(
+ AllocateRawWithImmortalMap(size, AllocationType::kOld, map));
+ info.set_slot_count(slot_count);
for (int i = 0; i < slot_count; i++) {
SourceRange range = slots[i];
- info->InitializeSlot(i, range.start, range.end);
+ info.InitializeSlot(i, range.start, range.end);
}
-
- return info;
+ return handle(info, isolate());
}
template <typename Impl>
@@ -539,14 +558,13 @@ MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
int size = SeqOneByteString::SizeFor(length);
DCHECK_GE(SeqOneByteString::kMaxSize, size);
- HeapObject result = AllocateRawWithImmortalMap(
- size, allocation, read_only_roots().one_byte_string_map());
- Handle<SeqOneByteString> string =
- handle(SeqOneByteString::cast(result), isolate());
- string->set_length(length);
- string->set_raw_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, string->Size());
- return string;
+ SeqOneByteString string = SeqOneByteString::cast(AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().one_byte_string_map()));
+ DisallowGarbageCollection no_gc;
+ string.set_length(length);
+ string.set_raw_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string.Size());
+ return handle(string, isolate());
}
template <typename Impl>
@@ -559,14 +577,13 @@ MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
int size = SeqTwoByteString::SizeFor(length);
DCHECK_GE(SeqTwoByteString::kMaxSize, size);
- HeapObject result = AllocateRawWithImmortalMap(
- size, allocation, read_only_roots().string_map());
- Handle<SeqTwoByteString> string =
- handle(SeqTwoByteString::cast(result), isolate());
- string->set_length(length);
- string->set_raw_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, string->Size());
- return string;
+ SeqTwoByteString string = SeqTwoByteString::cast(AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().string_map()));
+ DisallowGarbageCollection no_gc;
+ string.set_length(length);
+ string.set_raw_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string.Size());
+ return handle(string, isolate());
}
template <typename Impl>
@@ -655,23 +672,19 @@ Handle<String> FactoryBase<Impl>::NewConsString(Handle<String> left,
DCHECK_GE(length, ConsString::kMinLength);
DCHECK_LE(length, String::kMaxLength);
- Handle<ConsString> result = handle(
- ConsString::cast(
- one_byte
- ? NewWithImmortalMap(read_only_roots().cons_one_byte_string_map(),
- allocation)
- : NewWithImmortalMap(read_only_roots().cons_string_map(),
- allocation)),
- isolate());
+ ConsString result = ConsString::cast(
+ one_byte ? NewWithImmortalMap(
+ read_only_roots().cons_one_byte_string_map(), allocation)
+ : NewWithImmortalMap(read_only_roots().cons_string_map(),
+ allocation));
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- result->set_raw_hash_field(String::kEmptyHashField);
- result->set_length(length);
- result->set_first(*left, mode);
- result->set_second(*right, mode);
- return result;
+ WriteBarrierMode mode = result.GetWriteBarrierMode(no_gc);
+ result.set_raw_hash_field(String::kEmptyHashField);
+ result.set_length(length);
+ result.set_first(*left, mode);
+ result.set_second(*right, mode);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -682,6 +695,7 @@ Handle<FreshlyAllocatedBigInt> FactoryBase<Impl>::NewBigInt(
}
HeapObject result = AllocateRawWithImmortalMap(
BigInt::SizeFor(length), allocation, read_only_roots().bigint_map());
+ DisallowGarbageCollection no_gc;
FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
bigint.clear_padding();
return handle(bigint, isolate());
@@ -691,11 +705,13 @@ template <typename Impl>
Handle<ScopeInfo> FactoryBase<Impl>::NewScopeInfo(int length,
AllocationType type) {
DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
- Handle<HeapObject> result =
- Handle<HeapObject>::cast(NewFixedArray(length, type));
- result->set_map_after_allocation(*read_only_roots().scope_info_map_handle(),
- SKIP_WRITE_BARRIER);
- return Handle<ScopeInfo>::cast(result);
+ int size = ScopeInfo::SizeFor(length);
+ HeapObject obj = AllocateRawWithImmortalMap(
+ size, type, read_only_roots().scope_info_map());
+ ScopeInfo scope_info = ScopeInfo::cast(obj);
+ MemsetTagged(scope_info.data_start(), read_only_roots().undefined_value(),
+ length);
+ return handle(scope_info, isolate());
}
template <typename Impl>
@@ -709,20 +725,20 @@ template <typename Impl>
Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo() {
Map map = read_only_roots().shared_function_info_map();
- Handle<SharedFunctionInfo> shared = handle(
- SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld)),
- isolate());
+ SharedFunctionInfo shared =
+ SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
int unique_id = -1;
#if V8_SFI_HAS_UNIQUE_ID
unique_id = isolate()->GetNextUniqueSharedFunctionInfoId();
#endif // V8_SFI_HAS_UNIQUE_ID
- shared->Init(read_only_roots(), unique_id);
+ shared.Init(read_only_roots(), unique_id);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) shared->SharedFunctionInfoVerify(isolate());
+ if (FLAG_verify_heap) shared.SharedFunctionInfoVerify(isolate());
#endif // VERIFY_HEAP
- return shared;
+ return handle(shared, isolate());
}
template <typename Impl>
@@ -766,12 +782,12 @@ FactoryBase<Impl>::AllocateRawOneByteInternalizedString(
impl()->CanAllocateInReadOnlySpace() ? AllocationType::kReadOnly
: AllocationType::kOld,
map);
- Handle<SeqOneByteString> answer =
- handle(SeqOneByteString::cast(result), isolate());
- answer->set_length(length);
- answer->set_raw_hash_field(raw_hash_field);
- DCHECK_EQ(size, answer->Size());
- return answer;
+ SeqOneByteString answer = SeqOneByteString::cast(result);
+ DisallowGarbageCollection no_gc;
+ answer.set_length(length);
+ answer.set_raw_hash_field(raw_hash_field);
+ DCHECK_EQ(size, answer.Size());
+ return handle(answer, isolate());
}
template <typename Impl>
@@ -783,14 +799,13 @@ FactoryBase<Impl>::AllocateRawTwoByteInternalizedString(
Map map = read_only_roots().internalized_string_map();
int size = SeqTwoByteString::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
- Handle<SeqTwoByteString> answer =
- handle(SeqTwoByteString::cast(result), isolate());
- answer->set_length(length);
- answer->set_raw_hash_field(raw_hash_field);
- DCHECK_EQ(size, result.Size());
- return answer;
+ SeqTwoByteString answer = SeqTwoByteString::cast(
+ AllocateRawWithImmortalMap(size, AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ answer.set_length(length);
+ answer.set_raw_hash_field(raw_hash_field);
+ DCHECK_EQ(size, answer.Size());
+ return handle(answer, isolate());
}
template <typename Impl>
@@ -839,6 +854,7 @@ HeapObject FactoryBase<Impl>::AllocateRawWithImmortalMap(
// noone does so this check is sufficient.
DCHECK(ReadOnlyHeap::Contains(map));
HeapObject result = AllocateRaw(size, allocation, alignment);
+ DisallowGarbageCollection no_gc;
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -872,11 +888,11 @@ FactoryBase<Impl>::NewSwissNameDictionaryWithCapacity(
Map map = read_only_roots().swiss_name_dictionary_map();
int size = SwissNameDictionary::SizeFor(capacity);
- HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
- Handle<SwissNameDictionary> table(SwissNameDictionary::cast(result),
- isolate());
- table->Initialize(isolate(), *meta_table, capacity);
- return table;
+ SwissNameDictionary table = SwissNameDictionary::cast(
+ AllocateRawWithImmortalMap(size, allocation, map));
+ DisallowGarbageCollection no_gc;
+ table.Initialize(isolate(), *meta_table, capacity);
+ return handle(table, isolate());
}
template <typename Impl>
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index e6cc12e9963..b964f6b2346 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -234,6 +234,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
+ Struct NewStructInternal(InstanceType type,
+ AllocationType allocation = AllocationType::kYoung);
+
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
AllocationAlignment alignment = kWordAligned);
@@ -252,6 +255,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
private:
+ friend class WebSnapshotDeserializer;
Impl* impl() { return static_cast<Impl*>(this); }
auto isolate() { return impl()->isolate(); }
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index e1ab5e381db..72d53014fd2 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -67,7 +67,9 @@ Handle<Object> Factory::NewURIError() {
MessageTemplate::kURIMalformed);
}
-ReadOnlyRoots Factory::read_only_roots() { return ReadOnlyRoots(isolate()); }
+ReadOnlyRoots Factory::read_only_roots() const {
+ return ReadOnlyRoots(isolate());
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index ca9d316e2bb..0c89a3fa9cc 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -15,6 +15,7 @@
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/isolate-inl.h"
@@ -44,9 +45,11 @@
#include "src/objects/fixed-array-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-objects.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
@@ -72,7 +75,7 @@ Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
: isolate_(isolate),
code_desc_(desc),
kind_(kind),
- source_position_table_(isolate_->factory()->empty_byte_array()) {}
+ position_table_(isolate_->factory()->empty_byte_array()) {}
MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
bool retry_allocation_or_fail) {
@@ -145,7 +148,8 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*factory->code_map(), SKIP_WRITE_BARRIER);
- code = handle(Code::cast(result), isolate_);
+ Code raw_code = Code::cast(result);
+ code = handle(raw_code, isolate_);
if (is_executable_) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
@@ -156,20 +160,27 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
constexpr bool kIsNotOffHeapTrampoline = false;
- code->set_raw_instruction_size(code_desc_.instruction_size());
- code->set_raw_metadata_size(code_desc_.metadata_size());
- code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind_, is_turbofanned_, stack_slots_,
- kIsNotOffHeapTrampoline);
- code->set_builtin_index(builtin_index_);
- code->set_inlined_bytecode_size(inlined_bytecode_size_);
- code->set_code_data_container(*data_container, kReleaseStore);
- code->set_deoptimization_data(*deoptimization_data_);
- code->set_source_position_table(*source_position_table_);
- code->set_handler_table_offset(code_desc_.handler_table_offset_relative());
- code->set_constant_pool_offset(code_desc_.constant_pool_offset_relative());
- code->set_code_comments_offset(code_desc_.code_comments_offset_relative());
- code->set_unwinding_info_offset(
+ raw_code.set_raw_instruction_size(code_desc_.instruction_size());
+ raw_code.set_raw_metadata_size(code_desc_.metadata_size());
+ raw_code.set_relocation_info(*reloc_info);
+ raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
+ kIsNotOffHeapTrampoline);
+ raw_code.set_builtin_index(builtin_index_);
+ raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
+ raw_code.set_code_data_container(*data_container, kReleaseStore);
+ raw_code.set_deoptimization_data(*deoptimization_data_);
+ if (kind_ == CodeKind::BASELINE) {
+ raw_code.set_bytecode_offset_table(*position_table_);
+ } else {
+ raw_code.set_source_position_table(*position_table_);
+ }
+ raw_code.set_handler_table_offset(
+ code_desc_.handler_table_offset_relative());
+ raw_code.set_constant_pool_offset(
+ code_desc_.constant_pool_offset_relative());
+ raw_code.set_code_comments_offset(
+ code_desc_.code_comments_offset_relative());
+ raw_code.set_unwinding_info_offset(
code_desc_.unwinding_info_offset_relative());
// Allow self references to created code object by patching the handle to
@@ -200,12 +211,12 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// that are dereferenced during the copy to point directly to the actual
// heap objects. These pointers can include references to the code object
// itself, through the self_reference parameter.
- code->CopyFromNoFlush(heap, code_desc_);
+ raw_code.CopyFromNoFlush(heap, code_desc_);
- code->clear_padding();
+ raw_code.clear_padding();
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify(isolate_);
+ if (FLAG_verify_heap) raw_code.ObjectVerify(isolate_);
#endif
// Flush the instruction cache before changing the permissions.
@@ -213,7 +224,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// some older ARM kernels there is a bug which causes an access error on
// cache flush instructions to trigger access error on non-writable memory.
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- code->FlushICache();
+ raw_code.FlushICache();
}
if (profiler_data_ && FLAG_turbo_profiling_verbose) {
@@ -303,40 +314,43 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
}
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
- Handle<PrototypeInfo> result = Handle<PrototypeInfo>::cast(
- NewStruct(PROTOTYPE_INFO_TYPE, AllocationType::kOld));
- result->set_prototype_users(Smi::zero());
- result->set_registry_slot(PrototypeInfo::UNREGISTERED);
- result->set_bit_field(0);
- result->set_module_namespace(*undefined_value());
- return result;
+ PrototypeInfo result = PrototypeInfo::cast(
+ NewStructInternal(PROTOTYPE_INFO_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ result.set_prototype_users(Smi::zero());
+ result.set_registry_slot(PrototypeInfo::UNREGISTERED);
+ result.set_bit_field(0);
+ result.set_module_namespace(*undefined_value(), SKIP_WRITE_BARRIER);
+ return handle(result, isolate());
}
Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<FixedArray> indices) {
- Handle<EnumCache> result =
- Handle<EnumCache>::cast(NewStruct(ENUM_CACHE_TYPE, AllocationType::kOld));
- result->set_keys(*keys);
- result->set_indices(*indices);
- return result;
+ EnumCache result =
+ EnumCache::cast(NewStructInternal(ENUM_CACHE_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ result.set_keys(*keys);
+ result.set_indices(*indices);
+ return handle(result, isolate());
}
Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation) {
- Handle<Tuple2> result =
- Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE, allocation));
- result->set_value1(*value1);
- result->set_value2(*value2);
- return result;
+ Tuple2 result = Tuple2::cast(NewStructInternal(TUPLE2_TYPE, allocation));
+ DisallowGarbageCollection no_gc;
+ result.set_value1(*value1);
+ result.set_value2(*value2);
+ return handle(result, isolate());
}
Handle<BaselineData> Factory::NewBaselineData(
Handle<Code> code, Handle<HeapObject> function_data) {
- Handle<BaselineData> baseline_data = Handle<BaselineData>::cast(
- NewStruct(BASELINE_DATA_TYPE, AllocationType::kOld));
- baseline_data->set_baseline_code(*code);
- baseline_data->set_data(*function_data);
- return baseline_data;
+ BaselineData baseline_data = BaselineData::cast(
+ NewStructInternal(BASELINE_DATA_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ baseline_data.set_baseline_code(*code);
+ baseline_data.set_data(*function_data);
+ return handle(baseline_data, isolate());
}
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
@@ -365,11 +379,12 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
- Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
- array->initialize_length(length);
- MemsetTagged(array->data_start(), *undefined_value(), length);
- return array;
+ PropertyArray array = PropertyArray::cast(result);
+ array.initialize_length(length);
+ MemsetTagged(array.data_start(), read_only_roots().undefined_value(), length);
+ return handle(array, isolate());
}
MaybeHandle<FixedArray> Factory::TryNewFixedArray(
@@ -387,12 +402,12 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
+ DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
- Handle<FixedArray> array(FixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
- length);
- return array;
+ FixedArray array = FixedArray::cast(result);
+ array.set_length(length);
+ MemsetTagged(array.data_start(), *undefined_value(), length);
+ return handle(array, isolate());
}
Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
@@ -428,43 +443,42 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
- HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
- *feedback_vector_map());
- Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
- vector->set_shared_function_info(*shared);
- vector->set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
- kReleaseStore);
- vector->set_length(length);
- vector->set_invocation_count(0);
- vector->set_profiler_ticks(0);
- vector->InitializeOptimizationState();
- vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
+ FeedbackVector vector = FeedbackVector::cast(AllocateRawWithImmortalMap(
+ size, AllocationType::kOld, *feedback_vector_map()));
+ DisallowGarbageCollection no_gc;
+ vector.set_shared_function_info(*shared);
+ vector.set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
+ kReleaseStore);
+ vector.set_length(length);
+ vector.set_invocation_count(0);
+ vector.set_profiler_ticks(0);
+ vector.InitializeOptimizationState();
+ vector.set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
- MemsetTagged(ObjectSlot(vector->slots_start()), *undefined_value(), length);
- return vector;
+ MemsetTagged(ObjectSlot(vector.slots_start()), *undefined_value(), length);
+ return handle(vector, isolate());
}
Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
DCHECK_LE(0, length);
int size = EmbedderDataArray::SizeFor(length);
-
- HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
- *embedder_data_array_map());
- Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
- array->set_length(length);
+ EmbedderDataArray array = EmbedderDataArray::cast(AllocateRawWithImmortalMap(
+ size, AllocationType::kYoung, *embedder_data_array_map()));
+ DisallowGarbageCollection no_gc;
+ array.set_length(length);
if (length > 0) {
- ObjectSlot start(array->slots_start());
- ObjectSlot end(array->slots_end());
+ ObjectSlot start(array.slots_start());
+ ObjectSlot end(array.slots_end());
size_t slot_count = end - start;
MemsetTagged(start, *undefined_value(), slot_count);
for (int i = 0; i < length; i++) {
// TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
- EmbedderDataSlot(*array, i).AllocateExternalPointerEntry(isolate());
+ EmbedderDataSlot(array, i).AllocateExternalPointerEntry(isolate());
}
}
- return array;
+ return handle(array, isolate());
}
Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
@@ -543,14 +557,16 @@ Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
}
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
- Handle<PropertyDescriptorObject> object =
- Handle<PropertyDescriptorObject>::cast(
- NewStruct(PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung));
- object->set_flags(0);
- object->set_value(*the_hole_value(), SKIP_WRITE_BARRIER);
- object->set_get(*the_hole_value(), SKIP_WRITE_BARRIER);
- object->set_set(*the_hole_value(), SKIP_WRITE_BARRIER);
- return object;
+ PropertyDescriptorObject object =
+ PropertyDescriptorObject::cast(NewStructInternal(
+ PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ object.set_flags(0);
+ Oddball the_hole = read_only_roots().the_hole_value();
+ object.set_value(the_hole, SKIP_WRITE_BARRIER);
+ object.set_get(the_hole, SKIP_WRITE_BARRIER);
+ object.set_set(the_hole, SKIP_WRITE_BARRIER);
+ return handle(object, isolate());
}
Handle<SwissNameDictionary> Factory::CreateCanonicalEmptySwissNameDictionary() {
@@ -562,7 +578,7 @@ Handle<SwissNameDictionary> Factory::CreateCanonicalEmptySwissNameDictionary() {
ReadOnlyRoots roots(isolate());
Handle<ByteArray> empty_meta_table =
- NewByteArray(SwissNameDictionary::kMetaTableEnumerationTableStartOffset,
+ NewByteArray(SwissNameDictionary::kMetaTableEnumerationDataStartIndex,
AllocationType::kReadOnly);
Map map = roots.swiss_name_dictionary_map();
@@ -744,10 +760,6 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(
namespace {
-bool inline IsOneByte(Handle<String> str) {
- return str->IsOneByteRepresentation();
-}
-
inline void WriteOneByteData(Handle<String> s, uint8_t* chars, int len) {
DCHECK(s->length() == len);
String::WriteToFlat(*s, chars, 0, len);
@@ -777,30 +789,29 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
size = SeqTwoByteString::SizeFor(chars);
}
- HeapObject result =
+ String result = String::cast(
AllocateRawWithImmortalMap(size,
isolate()->heap()->CanAllocateInReadOnlySpace()
? AllocationType::kReadOnly
: AllocationType::kOld,
- map);
- Handle<String> answer(String::cast(result), isolate());
- answer->set_length(chars);
- answer->set_raw_hash_field(hash_field);
- DCHECK_EQ(size, answer->Size());
+ map));
DisallowGarbageCollection no_gc;
+ result.set_length(chars);
+ result.set_raw_hash_field(hash_field);
+ DCHECK_EQ(size, result.Size());
if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(*answer).GetChars(no_gc), chars);
+ WriteOneByteData(t, SeqOneByteString::cast(result).GetChars(no_gc), chars);
} else {
- WriteTwoByteData(t, SeqTwoByteString::cast(*answer).GetChars(no_gc), chars);
+ WriteTwoByteData(t, SeqTwoByteString::cast(result).GetChars(no_gc), chars);
}
- return answer;
+ return handle(result, isolate());
}
Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
int chars,
uint32_t hash_field) {
- if (IsOneByte(string)) {
+ if (string->IsOneByteRepresentation()) {
return AllocateInternalizedStringImpl<true>(string, chars, hash_field);
}
return AllocateInternalizedStringImpl<false>(string, chars, hash_field);
@@ -830,22 +841,22 @@ MaybeHandle<Map> Factory::InternalizedStringMapForString(
// If the string is in the young generation, it cannot be used as
// internalized.
if (Heap::InYoungGeneration(*string)) return MaybeHandle<Map>();
-
return GetInternalizedStringMap(this, string);
}
template <class StringClass>
Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
- Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
- Handle<StringClass> external_string(
- StringClass::cast(New(map, AllocationType::kOld)), isolate());
- external_string->AllocateExternalPointerEntries(isolate());
- external_string->set_length(cast_string->length());
- external_string->set_raw_hash_field(cast_string->raw_hash_field());
- external_string->SetResource(isolate(), nullptr);
- isolate()->heap()->RegisterExternalString(*external_string);
- return external_string;
+ StringClass external_string =
+ StringClass::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ external_string.AllocateExternalPointerEntries(isolate());
+ StringClass cast_string = StringClass::cast(*string);
+ external_string.set_length(cast_string.length());
+ external_string.set_raw_hash_field(cast_string.raw_hash_field());
+ external_string.SetResource(isolate(), nullptr);
+ isolate()->heap()->RegisterExternalString(external_string);
+ return handle(external_string, isolate());
}
template Handle<ExternalOneByteString>
@@ -943,14 +954,13 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
Handle<Map> map = str->IsOneByteRepresentation()
? sliced_one_byte_string_map()
: sliced_string_map();
- Handle<SlicedString> slice(
- SlicedString::cast(New(map, AllocationType::kYoung)), isolate());
-
- slice->set_raw_hash_field(String::kEmptyHashField);
- slice->set_length(length);
- slice->set_parent(*str);
- slice->set_offset(offset);
- return slice;
+ SlicedString slice = SlicedString::cast(New(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ slice.set_raw_hash_field(String::kEmptyHashField);
+ slice.set_length(length);
+ slice.set_parent(*str);
+ slice.set_offset(offset);
+ return handle(slice, isolate());
}
MaybeHandle<String> Factory::NewExternalStringFromOneByte(
@@ -964,15 +974,16 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
Handle<Map> map = resource->IsCacheable()
? external_one_byte_string_map()
: uncached_external_one_byte_string_map();
- Handle<ExternalOneByteString> external_string(
- ExternalOneByteString::cast(New(map, AllocationType::kOld)), isolate());
- external_string->AllocateExternalPointerEntries(isolate());
- external_string->set_length(static_cast<int>(length));
- external_string->set_raw_hash_field(String::kEmptyHashField);
- external_string->SetResource(isolate(), resource);
- isolate()->heap()->RegisterExternalString(*external_string);
+ ExternalOneByteString external_string =
+ ExternalOneByteString::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ external_string.AllocateExternalPointerEntries(isolate());
+ external_string.set_length(static_cast<int>(length));
+ external_string.set_raw_hash_field(String::kEmptyHashField);
+ external_string.SetResource(isolate(), resource);
+ isolate()->heap()->RegisterExternalString(external_string);
- return external_string;
+ return Handle<String>(external_string, isolate());
}
MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
@@ -985,15 +996,15 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
Handle<Map> map = resource->IsCacheable() ? external_string_map()
: uncached_external_string_map();
- Handle<ExternalTwoByteString> external_string(
- ExternalTwoByteString::cast(New(map, AllocationType::kOld)), isolate());
- external_string->AllocateExternalPointerEntries(isolate());
- external_string->set_length(static_cast<int>(length));
- external_string->set_raw_hash_field(String::kEmptyHashField);
- external_string->SetResource(isolate(), resource);
- isolate()->heap()->RegisterExternalString(*external_string);
-
- return external_string;
+ ExternalTwoByteString string =
+ ExternalTwoByteString::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ string.AllocateExternalPointerEntries(isolate());
+ string.set_length(static_cast<int>(length));
+ string.set_raw_hash_field(String::kEmptyHashField);
+ string.SetResource(isolate(), resource);
+ isolate()->heap()->RegisterExternalString(string);
+ return Handle<ExternalTwoByteString>(string, isolate());
}
Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
@@ -1002,47 +1013,54 @@ Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
Handle<String> flat_string = String::Flatten(isolate(), string);
Handle<JSStringIterator> iterator =
Handle<JSStringIterator>::cast(NewJSObjectFromMap(map));
- iterator->set_string(*flat_string);
- iterator->set_index(0);
+ DisallowGarbageCollection no_gc;
+ JSStringIterator raw = *iterator;
+ raw.set_string(*flat_string);
+ raw.set_index(0);
return iterator;
}
-Handle<Symbol> Factory::NewSymbol(AllocationType allocation) {
+Symbol Factory::NewSymbolInternal(AllocationType allocation) {
DCHECK(allocation != AllocationType::kYoung);
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
- HeapObject result =
- AllocateRawWithImmortalMap(Symbol::kSize, allocation, *symbol_map());
-
+ Symbol symbol = Symbol::cast(AllocateRawWithImmortalMap(
+ Symbol::kSize, allocation, read_only_roots().symbol_map()));
+ DisallowGarbageCollection no_gc;
// Generate a random hash value.
int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
-
- Handle<Symbol> symbol(Symbol::cast(result), isolate());
- symbol->set_raw_hash_field(Name::kIsNotIntegerIndexMask |
- (hash << Name::kHashShift));
- symbol->set_description(*undefined_value());
- symbol->set_flags(0);
- DCHECK(!symbol->is_private());
+ symbol.set_raw_hash_field(Name::kIsNotIntegerIndexMask |
+ (hash << Name::kHashShift));
+ symbol.set_description(read_only_roots().undefined_value(),
+ SKIP_WRITE_BARRIER);
+ symbol.set_flags(0);
+ DCHECK(!symbol.is_private());
return symbol;
}
+Handle<Symbol> Factory::NewSymbol(AllocationType allocation) {
+ return handle(NewSymbolInternal(allocation), isolate());
+}
+
Handle<Symbol> Factory::NewPrivateSymbol(AllocationType allocation) {
DCHECK(allocation != AllocationType::kYoung);
- Handle<Symbol> symbol = NewSymbol(allocation);
- symbol->set_is_private(true);
- return symbol;
+ Symbol symbol = NewSymbolInternal(allocation);
+ DisallowGarbageCollection no_gc;
+ symbol.set_is_private(true);
+ return handle(symbol, isolate());
}
Handle<Symbol> Factory::NewPrivateNameSymbol(Handle<String> name) {
- Handle<Symbol> symbol = NewSymbol();
- symbol->set_is_private_name();
- symbol->set_description(*name);
- return symbol;
+ Symbol symbol = NewSymbolInternal();
+ DisallowGarbageCollection no_gc;
+ symbol.set_is_private_name();
+ symbol.set_description(*name);
+ return handle(symbol, isolate());
}
-Handle<Context> Factory::NewContext(Handle<Map> map, int size,
+Context Factory::NewContextInternal(Handle<Map> map, int size,
int variadic_part_length,
AllocationType allocation) {
DCHECK_LE(Context::kTodoHeaderSize, size);
@@ -1053,12 +1071,13 @@ Handle<Context> Factory::NewContext(Handle<Map> map, int size,
HeapObject result =
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
result.set_map_after_allocation(*map);
- Handle<Context> context(Context::cast(result), isolate());
- context->set_length(variadic_part_length);
- DCHECK_EQ(context->SizeFromMap(*map), size);
+ DisallowGarbageCollection no_gc;
+ Context context = Context::cast(result);
+ context.set_length(variadic_part_length);
+ DCHECK_EQ(context.SizeFromMap(*map), size);
if (size > Context::kTodoHeaderSize) {
- ObjectSlot start = context->RawField(Context::kTodoHeaderSize);
- ObjectSlot end = context->RawField(size);
+ ObjectSlot start = context.RawField(Context::kTodoHeaderSize);
+ ObjectSlot end = context.RawField(size);
size_t slot_count = end - start;
MemsetTagged(start, *undefined_value(), slot_count);
}
@@ -1067,36 +1086,39 @@ Handle<Context> Factory::NewContext(Handle<Map> map, int size,
Handle<NativeContext> Factory::NewNativeContext() {
Handle<Map> map = NewMap(NATIVE_CONTEXT_TYPE, kVariableSizeSentinel);
- Handle<NativeContext> context = Handle<NativeContext>::cast(
- NewContext(map, NativeContext::kSize, NativeContext::NATIVE_CONTEXT_SLOTS,
- AllocationType::kOld));
- context->set_native_context_map(*map);
- map->set_native_context(*context);
- context->AllocateExternalPointerEntries(isolate());
- context->set_scope_info(ReadOnlyRoots(isolate()).native_scope_info());
- context->set_previous(Context::unchecked_cast(Smi::zero()));
- context->set_extension(*undefined_value());
- context->set_errors_thrown(Smi::zero());
- context->set_math_random_index(Smi::zero());
- context->set_serialized_objects(*empty_fixed_array());
- context->set_microtask_queue(isolate(), nullptr);
- context->set_osr_code_cache(*empty_weak_fixed_array());
- context->set_retained_maps(*empty_weak_array_list());
- return context;
+ NativeContext context = NativeContext::cast(NewContextInternal(
+ map, NativeContext::kSize, NativeContext::NATIVE_CONTEXT_SLOTS,
+ AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ context.set_native_context_map(*map);
+ map->set_native_context(context);
+ // The ExternalPointerTable is a C++ object.
+ context.AllocateExternalPointerEntries(isolate());
+ context.set_scope_info(*native_scope_info());
+ context.set_previous(Context::unchecked_cast(Smi::zero()));
+ context.set_extension(*undefined_value());
+ context.set_errors_thrown(Smi::zero());
+ context.set_math_random_index(Smi::zero());
+ context.set_serialized_objects(*empty_fixed_array());
+ context.set_microtask_queue(isolate(), nullptr);
+ context.set_osr_code_cache(*empty_weak_fixed_array());
+ context.set_retained_maps(*empty_weak_array_list());
+ return handle(context, isolate());
}
Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context =
- NewContext(handle(outer->script_context_map(), isolate()),
- Context::SizeFor(variadic_part_length), variadic_part_length,
- AllocationType::kOld);
- context->set_scope_info(*scope_info);
- context->set_previous(*outer);
- DCHECK(context->IsScriptContext());
- return context;
+ Context context =
+ NewContextInternal(handle(outer->script_context_map(), isolate()),
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kOld);
+ DisallowGarbageCollection no_gc;
+ context.set_scope_info(*scope_info);
+ context.set_previous(*outer);
+ DCHECK(context.IsScriptContext());
+ return handle(context, isolate());
}
Handle<ScriptContextTable> Factory::NewScriptContextTable() {
@@ -1112,14 +1134,15 @@ Handle<Context> Factory::NewModuleContext(Handle<SourceTextModule> module,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context = NewContext(
+ Context context = NewContextInternal(
isolate()->module_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kOld);
- context->set_scope_info(*scope_info);
- context->set_previous(*outer);
- context->set_extension(*module);
- DCHECK(context->IsModuleContext());
- return context;
+ DisallowGarbageCollection no_gc;
+ context.set_scope_info(*scope_info);
+ context.set_previous(*outer);
+ context.set_extension(*module);
+ DCHECK(context.IsModuleContext());
+ return handle(context, isolate());
}
Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
@@ -1136,12 +1159,13 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
UNREACHABLE();
}
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context =
- NewContext(map, Context::SizeFor(variadic_part_length),
- variadic_part_length, AllocationType::kYoung);
- context->set_scope_info(*scope_info);
- context->set_previous(*outer);
- return context;
+ Context context =
+ NewContextInternal(map, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ context.set_scope_info(*scope_info);
+ context.set_previous(*outer);
+ return handle(context, isolate());
}
Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
@@ -1151,13 +1175,15 @@ Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
// TODO(ishell): Take the details from CatchContext class.
int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 1;
- Handle<Context> context = NewContext(
+ Context context = NewContextInternal(
isolate()->catch_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
- context->set_scope_info(*scope_info);
- context->set_previous(*previous);
- context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
- return context;
+ DisallowGarbageCollection no_gc;
+ DCHECK(Heap::InYoungGeneration(context));
+ context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
+ context.set_previous(*previous, SKIP_WRITE_BARRIER);
+ context.set(Context::THROWN_OBJECT_INDEX, *thrown_object, SKIP_WRITE_BARRIER);
+ return handle(context, isolate());
}
Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
@@ -1173,15 +1199,22 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
: Handle<HeapObject>::cast(extension);
// TODO(ishell): Take the details from DebugEvaluateContextContext class.
int variadic_part_length = Context::MIN_CONTEXT_EXTENDED_SLOTS + 2;
- Handle<Context> c = NewContext(isolate()->debug_evaluate_context_map(),
- Context::SizeFor(variadic_part_length),
- variadic_part_length, AllocationType::kYoung);
- c->set_scope_info(*scope_info);
- c->set_previous(*previous);
- c->set_extension(*ext);
- if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
- if (!blocklist.is_null()) c->set(Context::BLOCK_LIST_INDEX, *blocklist);
- return c;
+ Context context =
+ NewContextInternal(isolate()->debug_evaluate_context_map(),
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ DCHECK(Heap::InYoungGeneration(context));
+ context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
+ context.set_previous(*previous, SKIP_WRITE_BARRIER);
+ context.set_extension(*ext, SKIP_WRITE_BARRIER);
+ if (!wrapped.is_null()) {
+ context.set(Context::WRAPPED_CONTEXT_INDEX, *wrapped, SKIP_WRITE_BARRIER);
+ }
+ if (!blocklist.is_null()) {
+ context.set(Context::BLOCK_LIST_INDEX, *blocklist, SKIP_WRITE_BARRIER);
+ }
+ return handle(context, isolate());
}
Handle<Context> Factory::NewWithContext(Handle<Context> previous,
@@ -1190,13 +1223,15 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
DCHECK_EQ(scope_info->scope_type(), WITH_SCOPE);
// TODO(ishell): Take the details from WithContext class.
int variadic_part_length = Context::MIN_CONTEXT_EXTENDED_SLOTS;
- Handle<Context> context = NewContext(
+ Context context = NewContextInternal(
isolate()->with_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
- context->set_scope_info(*scope_info);
- context->set_previous(*previous);
- context->set_extension(*extension);
- return context;
+ DisallowGarbageCollection no_gc;
+ DCHECK(Heap::InYoungGeneration(context));
+ context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
+ context.set_previous(*previous, SKIP_WRITE_BARRIER);
+ context.set_extension(*extension, SKIP_WRITE_BARRIER);
+ return handle(context, isolate());
}
Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
@@ -1204,48 +1239,52 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
DCHECK_IMPLIES(scope_info->scope_type() != BLOCK_SCOPE,
scope_info->scope_type() == CLASS_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context = NewContext(
+ Context context = NewContextInternal(
isolate()->block_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
- context->set_scope_info(*scope_info);
- context->set_previous(*previous);
- return context;
+ DisallowGarbageCollection no_gc;
+ DCHECK(Heap::InYoungGeneration(context));
+ context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
+ context.set_previous(*previous, SKIP_WRITE_BARRIER);
+ return handle(context, isolate());
}
Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
int variadic_part_length) {
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
- Handle<Context> context = NewContext(
+ Context context = NewContextInternal(
isolate()->function_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
- context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
- context->set_previous(*native_context);
- return context;
+ DisallowGarbageCollection no_gc;
+ DCHECK(Heap::InYoungGeneration(context));
+ context.set_scope_info(read_only_roots().empty_scope_info(),
+ SKIP_WRITE_BARRIER);
+ context.set_previous(*native_context, SKIP_WRITE_BARRIER);
+ return handle(context, isolate());
}
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
- Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
- NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung));
- entry->set_aliased_context_slot(aliased_context_slot);
- return entry;
+ AliasedArgumentsEntry entry = AliasedArgumentsEntry::cast(
+ NewStructInternal(ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung));
+ entry.set_aliased_context_slot(aliased_context_slot);
+ return handle(entry, isolate());
}
Handle<AccessorInfo> Factory::NewAccessorInfo() {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(
- NewStruct(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ AccessorInfo info = AccessorInfo::cast(
+ NewStructInternal(ACCESSOR_INFO_TYPE, AllocationType::kOld));
DisallowGarbageCollection no_gc;
- info->set_name(*empty_string());
- info->set_flags(0); // Must clear the flags, it was initialized as undefined.
- info->set_is_sloppy(true);
- info->set_initial_property_attributes(NONE);
+ info.set_name(*empty_string(), SKIP_WRITE_BARRIER);
+ info.set_flags(0); // Must clear the flags, it was initialized as undefined.
+ info.set_is_sloppy(true);
+ info.set_initial_property_attributes(NONE);
// Clear some other fields that should not be undefined.
- info->set_getter(Smi::zero());
- info->set_setter(Smi::zero());
- info->set_js_getter(Smi::zero());
-
- return info;
+ info.set_getter(Smi::zero(), SKIP_WRITE_BARRIER);
+ info.set_setter(Smi::zero(), SKIP_WRITE_BARRIER);
+ info.set_js_getter(Smi::zero(), SKIP_WRITE_BARRIER);
+ return handle(info, isolate());
}
void Factory::AddToScriptList(Handle<Script> script) {
@@ -1258,76 +1297,85 @@ void Factory::AddToScriptList(Handle<Script> script) {
Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
int script_id = isolate()->GetNextScriptId();
- Handle<Script> new_script =
+ Handle<Script> new_script_handle =
Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
- new_script->set_source(script->source());
- new_script->set_name(script->name());
- new_script->set_id(script_id);
- new_script->set_line_offset(script->line_offset());
- new_script->set_column_offset(script->column_offset());
- new_script->set_context_data(script->context_data());
- new_script->set_type(script->type());
- new_script->set_line_ends(ReadOnlyRoots(heap).undefined_value());
- new_script->set_eval_from_shared_or_wrapped_arguments(
- script->eval_from_shared_or_wrapped_arguments());
- new_script->set_shared_function_infos(*empty_weak_fixed_array(),
- SKIP_WRITE_BARRIER);
- new_script->set_eval_from_position(script->eval_from_position());
- new_script->set_flags(script->flags());
- new_script->set_host_defined_options(script->host_defined_options());
+ {
+ DisallowGarbageCollection no_gc;
+ Script new_script = *new_script_handle;
+ const Script old_script = *script;
+ new_script.set_source(old_script.source());
+ new_script.set_name(old_script.name());
+ new_script.set_id(script_id);
+ new_script.set_line_offset(old_script.line_offset());
+ new_script.set_column_offset(old_script.column_offset());
+ new_script.set_context_data(old_script.context_data());
+ new_script.set_type(old_script.type());
+ new_script.set_line_ends(*undefined_value(), SKIP_WRITE_BARRIER);
+ new_script.set_eval_from_shared_or_wrapped_arguments(
+ script->eval_from_shared_or_wrapped_arguments());
+ new_script.set_shared_function_infos(*empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ new_script.set_eval_from_position(old_script.eval_from_position());
+ new_script.set_flags(old_script.flags());
+ new_script.set_host_defined_options(old_script.host_defined_options());
+ }
Handle<WeakArrayList> scripts = script_list();
scripts = WeakArrayList::AddToEnd(isolate(), scripts,
- MaybeObjectHandle::Weak(new_script));
+ MaybeObjectHandle::Weak(new_script_handle));
heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
- return new_script;
+ return new_script_handle;
}
Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<Context> context) {
DCHECK(callable->IsCallable());
- Handle<CallableTask> microtask =
- Handle<CallableTask>::cast(NewStruct(CALLABLE_TASK_TYPE));
- microtask->set_callable(*callable);
- microtask->set_context(*context);
- return microtask;
+ CallableTask microtask = CallableTask::cast(
+ NewStructInternal(CALLABLE_TASK_TYPE, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ microtask.set_callable(*callable, SKIP_WRITE_BARRIER);
+ microtask.set_context(*context, SKIP_WRITE_BARRIER);
+ return handle(microtask, isolate());
}
Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
Handle<Foreign> data) {
- Handle<CallbackTask> microtask =
- Handle<CallbackTask>::cast(NewStruct(CALLBACK_TASK_TYPE));
- microtask->set_callback(*callback);
- microtask->set_data(*data);
- return microtask;
+ CallbackTask microtask = CallbackTask::cast(
+ NewStructInternal(CALLBACK_TASK_TYPE, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ microtask.set_callback(*callback, SKIP_WRITE_BARRIER);
+ microtask.set_data(*data, SKIP_WRITE_BARRIER);
+ return handle(microtask, isolate());
}
Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> thenable,
Handle<JSReceiver> then, Handle<Context> context) {
DCHECK(then->IsCallable());
- Handle<PromiseResolveThenableJobTask> microtask =
- Handle<PromiseResolveThenableJobTask>::cast(
- NewStruct(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE));
- microtask->set_promise_to_resolve(*promise_to_resolve);
- microtask->set_thenable(*thenable);
- microtask->set_then(*then);
- microtask->set_context(*context);
- return microtask;
+ PromiseResolveThenableJobTask microtask =
+ PromiseResolveThenableJobTask::cast(NewStructInternal(
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ microtask.set_promise_to_resolve(*promise_to_resolve, SKIP_WRITE_BARRIER);
+ microtask.set_thenable(*thenable, SKIP_WRITE_BARRIER);
+ microtask.set_then(*then, SKIP_WRITE_BARRIER);
+ microtask.set_context(*context, SKIP_WRITE_BARRIER);
+ return handle(microtask, isolate());
}
Handle<Foreign> Factory::NewForeign(Address addr) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
Map map = *foreign_map();
- HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
- AllocationType::kYoung, map);
- Handle<Foreign> foreign(Foreign::cast(result), isolate());
- foreign->AllocateExternalPointerEntries(isolate());
- foreign->set_foreign_address(isolate(), addr);
- return foreign;
+ Foreign foreign = Foreign::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kYoung, map));
+ DisallowGarbageCollection no_gc;
+ foreign.AllocateExternalPointerEntries(isolate());
+ foreign.set_foreign_address(isolate(), addr);
+ return handle(foreign, isolate());
}
+#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
Handle<Map> opt_parent) {
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
@@ -1340,56 +1388,74 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
supertypes->set(supertypes->length() - 1, *opt_parent);
}
Map map = *wasm_type_info_map();
- HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
- AllocationType::kYoung, map);
- Handle<WasmTypeInfo> info(WasmTypeInfo::cast(result), isolate());
- info->AllocateExternalPointerEntries(isolate());
- info->set_foreign_address(isolate(), type_address);
- info->set_supertypes(*supertypes);
- info->set_subtypes(*subtypes);
- return info;
+ WasmTypeInfo result = WasmTypeInfo::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kYoung, map));
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), type_address);
+ result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
+ result.set_subtypes(*subtypes, SKIP_WRITE_BARRIER);
+ return handle(result, isolate());
+}
+
+Handle<SharedFunctionInfo>
+Factory::NewSharedFunctionInfoForWasmExportedFunction(
+ Handle<String> name, Handle<WasmExportedFunctionData> data) {
+ return NewSharedFunctionInfo(name, data, Builtins::kNoBuiltinId);
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
+ Handle<String> name, Handle<WasmJSFunctionData> data) {
+ return NewSharedFunctionInfo(name, data, Builtins::kNoBuiltinId);
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
+ Handle<WasmCapiFunctionData> data) {
+ return NewSharedFunctionInfo(MaybeHandle<String>(), data,
+ Builtins::kNoBuiltinId, kConciseMethod);
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<Cell> Factory::NewCell(Handle<Object> value) {
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject result = AllocateRawWithImmortalMap(
- Cell::kSize, AllocationType::kOld, *cell_map());
- Handle<Cell> cell(Cell::cast(result), isolate());
- cell->set_value(*value);
- return cell;
+ Cell result = Cell::cast(AllocateRawWithImmortalMap(
+ Cell::kSize, AllocationType::kOld, *cell_map()));
+ DisallowGarbageCollection no_gc;
+ result.set_value(*value);
+ return handle(result, isolate());
}
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
- HeapObject result =
- AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld, *no_closures_cell_map());
- Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
- cell->set_value(*value);
- cell->SetInitialInterruptBudget();
- cell->clear_padding();
- return cell;
+ FeedbackCell result = FeedbackCell::cast(AllocateRawWithImmortalMap(
+ FeedbackCell::kAlignedSize, AllocationType::kOld,
+ *no_closures_cell_map()));
+ DisallowGarbageCollection no_gc;
+ result.set_value(*value);
+ result.SetInitialInterruptBudget();
+ result.clear_padding();
+ return handle(result, isolate());
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
- HeapObject result =
- AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld, *one_closure_cell_map());
- Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
- cell->set_value(*value);
- cell->SetInitialInterruptBudget();
- cell->clear_padding();
- return cell;
+ FeedbackCell result = FeedbackCell::cast(AllocateRawWithImmortalMap(
+ FeedbackCell::kAlignedSize, AllocationType::kOld,
+ *one_closure_cell_map()));
+ DisallowGarbageCollection no_gc;
+ result.set_value(*value);
+ result.SetInitialInterruptBudget();
+ result.clear_padding();
+ return handle(result, isolate());
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld,
- *many_closures_cell_map());
- Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
- cell->set_value(*value);
- cell->SetInitialInterruptBudget();
- cell->clear_padding();
- return cell;
+ FeedbackCell result = FeedbackCell::cast(AllocateRawWithImmortalMap(
+ FeedbackCell::kAlignedSize, AllocationType::kOld,
+ *many_closures_cell_map()));
+ DisallowGarbageCollection no_gc;
+ result.set_value(*value);
+ result.SetInitialInterruptBudget();
+ result.clear_padding();
+ return handle(result, isolate());
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
@@ -1398,15 +1464,18 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
AllocationType allocation) {
DCHECK(name->IsUniqueName());
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject result = AllocateRawWithImmortalMap(
- PropertyCell::kSize, allocation, *global_property_cell_map());
- Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
- cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
- cell->set_name(*name);
- cell->set_value(*value);
- cell->set_property_details_raw(details.AsSmi());
- return cell;
+ PropertyCell cell = PropertyCell::cast(AllocateRawWithImmortalMap(
+ PropertyCell::kSize, allocation, *global_property_cell_map()));
+ DisallowGarbageCollection no_gc;
+ cell.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ WriteBarrierMode mode = allocation == AllocationType::kYoung
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
+ cell.set_name(*name, mode);
+ cell.set_value(*value, mode);
+ cell.set_property_details_raw(details.AsSmi(), SKIP_WRITE_BARRIER);
+ return handle(cell, isolate());
}
Handle<PropertyCell> Factory::NewProtector() {
@@ -1461,6 +1530,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
IsTerminalElementsKind(elements_kind));
HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
Map::kSize, AllocationType::kMap);
+ DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
elements_kind, inobject_properties),
@@ -1470,9 +1540,11 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
ElementsKind elements_kind,
int inobject_properties) {
+ DisallowGarbageCollection no_gc;
map.set_instance_type(type);
- map.set_prototype(*null_value(), SKIP_WRITE_BARRIER);
- map.set_constructor_or_back_pointer(*null_value(), SKIP_WRITE_BARRIER);
+ HeapObject raw_null_value = *null_value();
+ map.set_prototype(raw_null_value, SKIP_WRITE_BARRIER);
+ map.set_constructor_or_back_pointer(raw_null_value, SKIP_WRITE_BARRIER);
map.set_instance_size(instance_size);
if (map.IsJSObjectMap()) {
DCHECK(!ReadOnlyHeap::Contains(map));
@@ -1483,16 +1555,20 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
} else {
DCHECK_EQ(inobject_properties, 0);
map.set_inobject_properties_start_or_constructor_function_index(0);
- map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid),
+ SKIP_WRITE_BARRIER);
}
map.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
+ map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()),
+ SKIP_WRITE_BARRIER);
map.SetInObjectUnusedPropertyFields(inobject_properties);
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
// Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- map.set_bit_field(0);
+ // TODO(solanes, v8:7790, v8:11353): set_relaxed_bit_field could be an atomic
+ // set if TSAN could see the transitions happening in StoreIC.
+ map.set_relaxed_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
int bit_field3 =
Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -1518,18 +1594,23 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// We can only clone regexps, normal objects, api objects, errors or arrays.
// Copying anything else will break invariants.
- CHECK(map->instance_type() == JS_REG_EXP_TYPE ||
- map->instance_type() == JS_OBJECT_TYPE ||
- map->instance_type() == JS_ERROR_TYPE ||
- map->instance_type() == JS_ARRAY_TYPE ||
- map->instance_type() == JS_API_OBJECT_TYPE ||
- map->instance_type() == WASM_GLOBAL_OBJECT_TYPE ||
- map->instance_type() == WASM_INSTANCE_OBJECT_TYPE ||
- map->instance_type() == WASM_MEMORY_OBJECT_TYPE ||
- map->instance_type() == WASM_MODULE_OBJECT_TYPE ||
- map->instance_type() == WASM_TABLE_OBJECT_TYPE ||
- map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
- DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type()));
+ InstanceType instance_type = map->instance_type();
+ bool is_clonable_js_type =
+ instance_type == JS_REG_EXP_TYPE || instance_type == JS_OBJECT_TYPE ||
+ instance_type == JS_ERROR_TYPE || instance_type == JS_ARRAY_TYPE ||
+ instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+ bool is_clonable_wasm_type = false;
+#if V8_ENABLE_WEBASSEMBLY
+ is_clonable_wasm_type = instance_type == WASM_GLOBAL_OBJECT_TYPE ||
+ instance_type == WASM_INSTANCE_OBJECT_TYPE ||
+ instance_type == WASM_MEMORY_OBJECT_TYPE ||
+ instance_type == WASM_MODULE_OBJECT_TYPE ||
+ instance_type == WASM_TABLE_OBJECT_TYPE;
+#endif // V8_ENABLE_WEBASSEMBLY
+ CHECK(is_clonable_js_type || is_clonable_wasm_type);
+
+ DCHECK(site.is_null() || AllocationSite::CanTrack(instance_type));
int object_size = map->instance_size();
int adjusted_object_size =
@@ -1581,32 +1662,34 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
clone->set_raw_properties_or_hash(*prop);
}
} else {
- Handle<FixedArray> properties =
- handle(V8_DICT_MODE_PROTOTYPES_BOOL
- ? FixedArray::cast(source->property_dictionary_ordered())
- : FixedArray::cast(source->property_dictionary()),
- isolate());
- Handle<FixedArray> prop = CopyFixedArray(properties);
- clone->set_raw_properties_or_hash(*prop);
+ Handle<Object> copied_properties;
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ copied_properties = SwissNameDictionary::ShallowCopy(
+ isolate(), handle(source->property_dictionary_swiss(), isolate()));
+ } else {
+ copied_properties =
+ CopyFixedArray(handle(source->property_dictionary(), isolate()));
+ }
+ clone->set_raw_properties_or_hash(*copied_properties);
}
return clone;
}
namespace {
template <typename T>
-void initialize_length(Handle<T> array, int length) {
- array->set_length(length);
+void initialize_length(T array, int length) {
+ array.set_length(length);
}
template <>
-void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
- array->initialize_length(length);
+void initialize_length<PropertyArray>(PropertyArray array, int length) {
+ array.initialize_length(length);
}
-inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) {
- auto count = obj->GetEmbedderFieldCount();
+inline void ZeroEmbedderFields(i::JSObject obj) {
+ int count = obj.GetEmbedderFieldCount();
for (int i = 0; i < count; i++) {
- obj->SetEmbedderField(i, Smi::zero());
+ obj.SetEmbedderField(i, Smi::zero());
}
}
@@ -1615,16 +1698,15 @@ inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) {
template <typename T>
Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
int len = src->length();
- HeapObject obj = AllocateRawFixedArray(len, AllocationType::kYoung);
- obj.set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
-
- Handle<T> result(T::cast(obj), isolate());
- initialize_length(result, len);
-
+ HeapObject new_object = AllocateRawFixedArray(len, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- result->CopyElements(isolate(), 0, *src, 0, len, mode);
- return result;
+ new_object.set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
+ T result = T::cast(new_object);
+ initialize_length(result, len);
+ // Copy the content.
+ WriteBarrierMode mode = result.GetWriteBarrierMode(no_gc);
+ result.CopyElements(isolate(), 0, *src, 0, len, mode);
+ return handle(result, isolate());
}
template <typename T>
@@ -1634,19 +1716,17 @@ Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
DCHECK_LE(grow_by, kMaxInt - src->length());
int old_len = src->length();
int new_len = old_len + grow_by;
- HeapObject obj = AllocateRawFixedArray(new_len, allocation);
- obj.set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
-
- Handle<T> result(T::cast(obj), isolate());
+ HeapObject new_object = AllocateRawFixedArray(new_len, allocation);
+ DisallowGarbageCollection no_gc;
+ new_object.set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+ T result = T::cast(new_object);
initialize_length(result, new_len);
-
// Copy the content.
- DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = obj.GetWriteBarrierMode(no_gc);
- result->CopyElements(isolate(), 0, *src, 0, old_len, mode);
- MemsetTagged(ObjectSlot(result->data_start() + old_len),
- ReadOnlyRoots(isolate()).undefined_value(), grow_by);
- return result;
+ WriteBarrierMode mode = result.GetWriteBarrierMode(no_gc);
+ result.CopyElements(isolate(), 0, *src, 0, old_len, mode);
+ MemsetTagged(ObjectSlot(result.data_start() + old_len),
+ read_only_roots().undefined_value(), grow_by);
+ return handle(result, isolate());
}
Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
@@ -1664,13 +1744,14 @@ Handle<WeakArrayList> Factory::NewUninitializedWeakArrayList(
DCHECK_LE(0, capacity);
if (capacity == 0) return empty_weak_array_list();
- HeapObject obj = AllocateRawWeakArrayList(capacity, allocation);
- obj.set_map_after_allocation(*weak_array_list_map(), SKIP_WRITE_BARRIER);
-
- Handle<WeakArrayList> result(WeakArrayList::cast(obj), isolate());
- result->set_length(0);
- result->set_capacity(capacity);
- return result;
+ HeapObject heap_object = AllocateRawWeakArrayList(capacity, allocation);
+ DisallowGarbageCollection no_gc;
+ heap_object.set_map_after_allocation(*weak_array_list_map(),
+ SKIP_WRITE_BARRIER);
+ WeakArrayList result = WeakArrayList::cast(heap_object);
+ result.set_length(0);
+ result.set_capacity(capacity);
+ return handle(result, isolate());
}
Handle<WeakArrayList> Factory::NewWeakArrayList(int capacity,
@@ -1678,7 +1759,7 @@ Handle<WeakArrayList> Factory::NewWeakArrayList(int capacity,
Handle<WeakArrayList> result =
NewUninitializedWeakArrayList(capacity, allocation);
MemsetTagged(ObjectSlot(result->data_start()),
- ReadOnlyRoots(isolate()).undefined_value(), capacity);
+ read_only_roots().undefined_value(), capacity);
return result;
}
@@ -1695,16 +1776,15 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
DCHECK_GE(new_capacity, old_capacity);
Handle<WeakArrayList> result =
NewUninitializedWeakArrayList(new_capacity, allocation);
+ DisallowGarbageCollection no_gc;
+ WeakArrayList raw = *result;
int old_len = src->length();
- result->set_length(old_len);
-
+ raw.set_length(old_len);
// Copy the content.
- DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- result->CopyElements(isolate(), 0, *src, 0, old_len, mode);
- MemsetTagged(ObjectSlot(result->data_start() + old_len),
- ReadOnlyRoots(isolate()).undefined_value(),
- new_capacity - old_len);
+ WriteBarrierMode mode = raw.GetWriteBarrierMode(no_gc);
+ raw.CopyElements(isolate(), 0, *src, 0, old_len, mode);
+ MemsetTagged(ObjectSlot(raw.data_start() + old_len),
+ read_only_roots().undefined_value(), new_capacity - old_len);
return result;
}
@@ -1716,18 +1796,19 @@ Handle<WeakArrayList> Factory::CompactWeakArrayList(Handle<WeakArrayList> src,
// Copy the content.
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- int copy_to = 0, length = src->length();
+ WeakArrayList raw_src = *src;
+ WeakArrayList raw_result = *result;
+ WriteBarrierMode mode = raw_result.GetWriteBarrierMode(no_gc);
+ int copy_to = 0, length = raw_src.length();
for (int i = 0; i < length; i++) {
- MaybeObject element = src->Get(i);
+ MaybeObject element = raw_src.Get(i);
if (element->IsCleared()) continue;
- result->Set(copy_to++, element, mode);
+ raw_result.Set(copy_to++, element, mode);
}
- result->set_length(copy_to);
+ raw_result.set_length(copy_to);
- MemsetTagged(ObjectSlot(result->data_start() + copy_to),
- ReadOnlyRoots(isolate()).undefined_value(),
- new_capacity - copy_to);
+ MemsetTagged(ObjectSlot(raw_result.data_start() + copy_to),
+ read_only_roots().undefined_value(), new_capacity - copy_to);
return result;
}
@@ -1742,17 +1823,15 @@ Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
DCHECK_LE(0, new_len);
DCHECK_LE(new_len, array->length());
if (new_len == 0) return empty_fixed_array();
-
- HeapObject obj = AllocateRawFixedArray(new_len, allocation);
- obj.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
- Handle<FixedArray> result(FixedArray::cast(obj), isolate());
- result->set_length(new_len);
-
- // Copy the content.
+ HeapObject heap_object = AllocateRawFixedArray(new_len, allocation);
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- result->CopyElements(isolate(), 0, *array, 0, new_len, mode);
- return result;
+ heap_object.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ FixedArray result = FixedArray::cast(heap_object);
+ result.set_length(new_len);
+ // Copy the content.
+ WriteBarrierMode mode = result.GetWriteBarrierMode(no_gc);
+ result.CopyElements(isolate(), 0, *array, 0, new_len, mode);
+ return handle(result, isolate());
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
@@ -1890,13 +1969,13 @@ Handle<JSObject> Factory::NewExternal(void* value) {
Handle<CodeDataContainer> Factory::NewCodeDataContainer(
int flags, AllocationType allocation) {
- Handle<CodeDataContainer> data_container(
- CodeDataContainer::cast(New(code_data_container_map(), allocation)),
- isolate());
- data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
- data_container->set_kind_specific_flags(flags);
- data_container->clear_padding();
- return data_container;
+ CodeDataContainer data_container =
+ CodeDataContainer::cast(New(code_data_container_map(), allocation));
+ DisallowGarbageCollection no_gc;
+ data_container.set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
+ data_container.set_kind_specific_flags(flags);
+ data_container.clear_padding();
+ return handle(data_container, isolate());
}
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
@@ -1922,35 +2001,38 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
// The trampoline code object must inherit specific flags from the original
// builtin (e.g. the safepoint-table offset). We set them manually here.
{
+ DisallowGarbageCollection no_gc;
CodePageMemoryModificationScope code_allocation(*result);
+ Code raw_code = *code;
+ Code raw_result = *result;
const bool set_is_off_heap_trampoline = true;
const int stack_slots =
- code->has_safepoint_info() ? code->stack_slots() : 0;
- result->initialize_flags(code->kind(), code->is_turbofanned(), stack_slots,
- set_is_off_heap_trampoline);
- result->set_builtin_index(code->builtin_index());
- result->set_handler_table_offset(code->handler_table_offset());
- result->set_constant_pool_offset(code->constant_pool_offset());
- result->set_code_comments_offset(code->code_comments_offset());
- result->set_unwinding_info_offset(code->unwinding_info_offset());
+ raw_code.has_safepoint_info() ? raw_code.stack_slots() : 0;
+ raw_result.initialize_flags(raw_code.kind(), raw_code.is_turbofanned(),
+ stack_slots, set_is_off_heap_trampoline);
+ raw_result.set_builtin_index(raw_code.builtin_index());
+ raw_result.set_handler_table_offset(raw_code.handler_table_offset());
+ raw_result.set_constant_pool_offset(raw_code.constant_pool_offset());
+ raw_result.set_code_comments_offset(raw_code.code_comments_offset());
+ raw_result.set_unwinding_info_offset(raw_code.unwinding_info_offset());
// Replace the newly generated trampoline's RelocInfo ByteArray with the
// canonical one stored in the roots to avoid duplicating it for every
// single builtin.
ByteArray canonical_reloc_info =
generate_jump_to_instruction_stream
- ? ReadOnlyRoots(isolate()).off_heap_trampoline_relocation_info()
- : ReadOnlyRoots(isolate()).empty_byte_array();
+ ? read_only_roots().off_heap_trampoline_relocation_info()
+ : read_only_roots().empty_byte_array();
#ifdef DEBUG
// Verify that the contents are the same.
- ByteArray reloc_info = result->relocation_info();
+ ByteArray reloc_info = raw_result.relocation_info();
DCHECK_EQ(reloc_info.length(), canonical_reloc_info.length());
for (int i = 0; i < reloc_info.length(); ++i) {
DCHECK_EQ(reloc_info.get(i), canonical_reloc_info.get(i));
}
#endif
- result->set_relocation_info(canonical_reloc_info);
+ raw_result.set_relocation_info(canonical_reloc_info);
}
return result;
@@ -1999,26 +2081,25 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
return new_code;
}
-Handle<BytecodeArray> Factory::CopyBytecodeArray(
- Handle<BytecodeArray> bytecode_array) {
- int size = BytecodeArray::SizeFor(bytecode_array->length());
- HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
- *bytecode_array_map());
-
- Handle<BytecodeArray> copy(BytecodeArray::cast(result), isolate());
- copy->set_length(bytecode_array->length());
- copy->set_frame_size(bytecode_array->frame_size());
- copy->set_parameter_count(bytecode_array->parameter_count());
- copy->set_incoming_new_target_or_generator_register(
- bytecode_array->incoming_new_target_or_generator_register());
- copy->set_constant_pool(bytecode_array->constant_pool());
- copy->set_handler_table(bytecode_array->handler_table());
- copy->set_source_position_table(
- bytecode_array->source_position_table(kAcquireLoad), kReleaseStore);
- copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
- copy->set_bytecode_age(bytecode_array->bytecode_age());
- bytecode_array->CopyBytecodesTo(*copy);
- return copy;
+Handle<BytecodeArray> Factory::CopyBytecodeArray(Handle<BytecodeArray> source) {
+ int size = BytecodeArray::SizeFor(source->length());
+ BytecodeArray copy = BytecodeArray::cast(AllocateRawWithImmortalMap(
+ size, AllocationType::kOld, *bytecode_array_map()));
+ DisallowGarbageCollection no_gc;
+ BytecodeArray raw_source = *source;
+ copy.set_length(raw_source.length());
+ copy.set_frame_size(raw_source.frame_size());
+ copy.set_parameter_count(raw_source.parameter_count());
+ copy.set_incoming_new_target_or_generator_register(
+ raw_source.incoming_new_target_or_generator_register());
+ copy.set_constant_pool(raw_source.constant_pool());
+ copy.set_handler_table(raw_source.handler_table());
+ copy.set_source_position_table(raw_source.source_position_table(kAcquireLoad),
+ kReleaseStore);
+ copy.set_osr_loop_nesting_level(raw_source.osr_loop_nesting_level());
+ copy.set_bytecode_age(raw_source.bytecode_age());
+ raw_source.CopyBytecodesTo(copy);
+ return handle(copy, isolate());
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
@@ -2065,7 +2146,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
- Handle<DescriptorArray> descs(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descs(map->instance_descriptors(isolate()),
isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
@@ -2083,28 +2164,29 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Allocate the global object and initialize it with the backing store.
Handle<JSGlobalObject> global(
JSGlobalObject::cast(New(map, AllocationType::kOld)), isolate());
- InitializeJSObjectFromMap(global, dictionary, map);
+ InitializeJSObjectFromMap(*global, *dictionary, *map);
// Create a new map for the global object.
Handle<Map> new_map = Map::CopyDropDescriptors(isolate(), map);
- new_map->set_may_have_interesting_symbols(true);
- new_map->set_is_dictionary_map(true);
- LOG(isolate(), MapDetails(*new_map));
+ Map raw_map = *new_map;
+ raw_map.set_may_have_interesting_symbols(true);
+ raw_map.set_is_dictionary_map(true);
+ LOG(isolate(), MapDetails(raw_map));
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary, kReleaseStore);
- global->synchronized_set_map(*new_map);
+ global->synchronized_set_map(raw_map);
// Make sure result is a global object with properties in dictionary.
DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
return global;
}
-void Factory::InitializeJSObjectFromMap(Handle<JSObject> obj,
- Handle<Object> properties,
- Handle<Map> map) {
- obj->set_raw_properties_or_hash(*properties);
- obj->initialize_elements();
+void Factory::InitializeJSObjectFromMap(JSObject obj, Object properties,
+ Map map) {
+ DisallowGarbageCollection no_gc;
+ obj.set_raw_properties_or_hash(properties);
+ obj.initialize_elements();
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
// instance type is JS_ARRAY_TYPE, the length field should be initialized
@@ -2115,10 +2197,10 @@ void Factory::InitializeJSObjectFromMap(Handle<JSObject> obj,
InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
}
-void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
- int start_offset) {
- if (start_offset == map->instance_size()) return;
- DCHECK_LT(start_offset, map->instance_size());
+void Factory::InitializeJSObjectBody(JSObject obj, Map map, int start_offset) {
+ DisallowGarbageCollection no_gc;
+ if (start_offset == map.instance_size()) return;
+ DCHECK_LT(start_offset, map.instance_size());
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their embedder fields to be initialized
@@ -2129,16 +2211,16 @@ void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
- bool in_progress = map->IsInobjectSlackTrackingInProgress();
+ bool in_progress = map.IsInobjectSlackTrackingInProgress();
Object filler;
if (in_progress) {
filler = *one_pointer_filler_map();
} else {
filler = *undefined_value();
}
- obj->InitializeBody(*map, start_offset, *undefined_value(), filler);
+ obj.InitializeBody(map, start_offset, *undefined_value(), filler);
if (in_progress) {
- map->FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
+ map.FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
}
}
@@ -2153,16 +2235,15 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
// AllocateGlobalObject to be properly initialized.
DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- HeapObject obj =
- AllocateRawWithAllocationSite(map, allocation, allocation_site);
- Handle<JSObject> js_obj(JSObject::cast(obj), isolate());
+ JSObject js_obj = JSObject::cast(
+ AllocateRawWithAllocationSite(map, allocation, allocation_site));
- InitializeJSObjectFromMap(js_obj, empty_fixed_array(), map);
+ InitializeJSObjectFromMap(js_obj, *empty_fixed_array(), *map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasTypedArrayElements() ||
- js_obj->HasFastStringWrapperElements() ||
- js_obj->HasFastArgumentsElements() || js_obj->HasDictionaryElements());
- return js_obj;
+ DCHECK(js_obj.HasFastElements() || js_obj.HasTypedArrayElements() ||
+ js_obj.HasFastStringWrapperElements() ||
+ js_obj.HasFastArgumentsElements() || js_obj.HasDictionaryElements());
+ return handle(js_obj, isolate());
}
Handle<JSObject> Factory::NewSlowJSObjectFromMap(
@@ -2170,9 +2251,8 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(
Handle<AllocationSite> allocation_site) {
DCHECK(map->is_dictionary_map());
Handle<HeapObject> object_properties;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- object_properties =
- OrderedNameDictionary::Allocate(isolate(), capacity).ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ object_properties = NewSwissNameDictionary(capacity, allocation);
} else {
object_properties = NameDictionary::New(isolate(), capacity);
}
@@ -2185,9 +2265,10 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(
Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
Handle<HeapObject> prototype, Handle<HeapObject> properties,
Handle<FixedArrayBase> elements) {
- DCHECK_IMPLIES(V8_DICT_MODE_PROTOTYPES_BOOL,
- properties->IsOrderedNameDictionary());
- DCHECK_IMPLIES(!V8_DICT_MODE_PROTOTYPES_BOOL, properties->IsNameDictionary());
+ DCHECK_IMPLIES(V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL,
+ properties->IsSwissNameDictionary());
+ DCHECK_IMPLIES(!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL,
+ properties->IsNameDictionary());
Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
if (object_map->prototype() != *prototype) {
@@ -2197,7 +2278,7 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
Handle<JSObject> object =
NewJSObjectFromMap(object_map, AllocationType::kYoung);
object->set_raw_properties_or_hash(*properties);
- if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
+ if (*elements != read_only_roots().empty_fixed_array()) {
DCHECK(elements->IsNumberDictionary());
object_map =
JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
@@ -2247,8 +2328,9 @@ Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
Handle<JSArray> array = Handle<JSArray>::cast(
NewJSObjectFromMap(handle(map, isolate()), allocation));
DisallowGarbageCollection no_gc;
- array->set_elements(*elements);
- array->set_length(Smi::FromInt(length));
+ JSArray raw = *array;
+ raw.set_elements(*elements);
+ raw.set_length(Smi::FromInt(length));
return array;
}
@@ -2257,17 +2339,20 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
DCHECK(capacity >= length);
if (capacity == 0) {
- array->set_length(Smi::zero());
- array->set_elements(*empty_fixed_array());
+ JSArray raw = *array;
+ DisallowGarbageCollection no_gc;
+ raw.set_length(Smi::zero());
+ raw.set_elements(*empty_fixed_array());
return;
}
HandleScope inner_scope(isolate());
Handle<FixedArrayBase> elms =
NewJSArrayStorage(array->GetElementsKind(), capacity, mode);
-
- array->set_elements(*elms);
- array->set_length(Smi::FromInt(length));
+ DisallowGarbageCollection no_gc;
+ JSArray raw = *array;
+ raw.set_elements(*elms);
+ raw.set_length(Smi::FromInt(length));
}
Handle<FixedArrayBase> Factory::NewJSArrayStorage(
@@ -2312,8 +2397,8 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
FieldIndex index = FieldIndex::ForDescriptor(
*map, InternalIndex(JSModuleNamespace::kToStringTagFieldIndex));
- module_namespace->FastPropertyAtPut(index,
- ReadOnlyRoots(isolate()).Module_string());
+ module_namespace->FastPropertyAtPut(index, read_only_roots().Module_string(),
+ SKIP_WRITE_BARRIER);
return module_namespace;
}
@@ -2346,30 +2431,29 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0);
ReadOnlyRoots roots(isolate());
- Handle<SourceTextModule> module(
- SourceTextModule::cast(
- New(source_text_module_map(), AllocationType::kOld)),
- isolate());
- module->set_code(*sfi);
- module->set_exports(*exports);
- module->set_regular_exports(*regular_exports);
- module->set_regular_imports(*regular_imports);
- module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
- module->set_module_namespace(roots.undefined_value());
- module->set_requested_modules(*requested_modules);
- module->set_status(Module::kUninstantiated);
- module->set_exception(roots.the_hole_value());
- module->set_top_level_capability(roots.undefined_value());
- module->set_import_meta(roots.the_hole_value());
- module->set_dfs_index(-1);
- module->set_dfs_ancestor_index(-1);
- module->set_flags(0);
- module->set_async(IsAsyncModule(sfi->kind()));
- module->set_async_evaluating_ordinal(SourceTextModule::kNotAsyncEvaluated);
- module->set_cycle_root(roots.the_hole_value());
- module->set_async_parent_modules(*async_parent_modules);
- module->set_pending_async_dependencies(0);
- return module;
+ SourceTextModule module = SourceTextModule::cast(
+ New(source_text_module_map(), AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ module.set_code(*sfi);
+ module.set_exports(*exports);
+ module.set_regular_exports(*regular_exports);
+ module.set_regular_imports(*regular_imports);
+ module.set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module.set_module_namespace(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ module.set_requested_modules(*requested_modules);
+ module.set_status(Module::kUninstantiated);
+ module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ module.set_import_meta(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_dfs_index(-1);
+ module.set_dfs_ancestor_index(-1);
+ module.set_flags(0);
+ module.set_async(IsAsyncModule(sfi->kind()));
+ module.set_async_evaluating_ordinal(SourceTextModule::kNotAsyncEvaluated);
+ module.set_cycle_root(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_async_parent_modules(*async_parent_modules);
+ module.set_pending_async_dependencies(0);
+ return handle(module, isolate());
}
Handle<SyntheticModule> Factory::NewSyntheticModule(
@@ -2382,19 +2466,19 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
Handle<Foreign> evaluation_steps_foreign =
NewForeign(reinterpret_cast<i::Address>(evaluation_steps));
- Handle<SyntheticModule> module(
- SyntheticModule::cast(New(synthetic_module_map(), AllocationType::kOld)),
- isolate());
- module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
- module->set_module_namespace(roots.undefined_value());
- module->set_status(Module::kUninstantiated);
- module->set_exception(roots.the_hole_value());
- module->set_top_level_capability(roots.undefined_value());
- module->set_name(*module_name);
- module->set_export_names(*export_names);
- module->set_exports(*exports);
- module->set_evaluation_steps(*evaluation_steps_foreign);
- return module;
+ SyntheticModule module =
+ SyntheticModule::cast(New(synthetic_module_map(), AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ module.set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module.set_module_namespace(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ module.set_status(Module::kUninstantiated);
+ module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ module.set_name(*module_name);
+ module.set_export_names(*export_names);
+ module.set_exports(*exports);
+ module.set_evaluation_steps(*evaluation_steps_foreign);
+ return handle(module, isolate());
}
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
@@ -2440,10 +2524,12 @@ Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
bool done) {
Handle<Map> map(isolate()->native_context()->iterator_result_map(),
isolate());
- Handle<JSIteratorResult> js_iter_result =
- Handle<JSIteratorResult>::cast(NewJSObjectFromMap(map));
- js_iter_result->set_value(*value);
- js_iter_result->set_done(*ToBoolean(done));
+ Handle<JSIteratorResult> js_iter_result = Handle<JSIteratorResult>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ JSIteratorResult raw = *js_iter_result;
+ raw.set_value(*value, SKIP_WRITE_BARRIER);
+ raw.set_done(*ToBoolean(done), SKIP_WRITE_BARRIER);
return js_iter_result;
}
@@ -2452,10 +2538,12 @@ Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map(),
isolate());
Handle<JSAsyncFromSyncIterator> iterator =
- Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
-
- iterator->set_sync_iterator(*sync_iterator);
- iterator->set_next(*next);
+ Handle<JSAsyncFromSyncIterator>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ JSAsyncFromSyncIterator raw = *iterator;
+ raw.set_sync_iterator(*sync_iterator, SKIP_WRITE_BARRIER);
+ raw.set_next(*next, SKIP_WRITE_BARRIER);
return iterator;
}
@@ -2517,12 +2605,14 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
CHECK_LE(byte_offset + byte_length, buffer->byte_length());
Handle<JSArrayBufferView> array_buffer_view = Handle<JSArrayBufferView>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
- array_buffer_view->set_elements(*elements);
- array_buffer_view->set_buffer(*buffer);
- array_buffer_view->set_byte_offset(byte_offset);
- array_buffer_view->set_byte_length(byte_length);
- ZeroEmbedderFields(array_buffer_view);
- DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(),
+ DisallowGarbageCollection no_gc;
+ JSArrayBufferView raw = *array_buffer_view;
+ raw.set_elements(*elements, SKIP_WRITE_BARRIER);
+ raw.set_buffer(*buffer, SKIP_WRITE_BARRIER);
+ raw.set_byte_offset(byte_offset);
+ raw.set_byte_length(byte_length);
+ ZeroEmbedderFields(raw);
+ DCHECK_EQ(raw.GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
}
@@ -2558,10 +2648,11 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray> typed_array =
Handle<JSTypedArray>::cast(NewJSArrayBufferView(
map, empty_byte_array(), buffer, byte_offset, byte_length));
- typed_array->AllocateExternalPointerEntries(isolate());
- typed_array->set_length(length);
- typed_array->SetOffHeapDataPtr(isolate(), buffer->backing_store(),
- byte_offset);
+ JSTypedArray raw = *typed_array;
+ DisallowGarbageCollection no_gc;
+ raw.AllocateExternalPointerEntries(isolate());
+ raw.set_length(length);
+ raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
return typed_array;
}
@@ -2619,11 +2710,13 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
// Setup the JSBoundFunction instance.
- Handle<JSBoundFunction> result =
- Handle<JSBoundFunction>::cast(NewJSObjectFromMap(map));
- result->set_bound_target_function(*target_function);
- result->set_bound_this(*bound_this);
- result->set_bound_arguments(*bound_arguments);
+ Handle<JSBoundFunction> result = Handle<JSBoundFunction>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ JSBoundFunction raw = *result;
+ raw.set_bound_target_function(*target_function, SKIP_WRITE_BARRIER);
+ raw.set_bound_this(*bound_this, SKIP_WRITE_BARRIER);
+ raw.set_bound_arguments(*bound_arguments, SKIP_WRITE_BARRIER);
return result;
}
@@ -2642,12 +2735,12 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
map = Handle<Map>(isolate()->proxy_map());
}
DCHECK(map->prototype().IsNull(isolate()));
- Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
- isolate());
- result->initialize_properties(isolate());
- result->set_target(*target);
- result->set_handler(*handler);
- return result;
+ JSProxy result = JSProxy::cast(New(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ result.initialize_properties(isolate());
+ result.set_target(*target, SKIP_WRITE_BARRIER);
+ result.set_handler(*handler, SKIP_WRITE_BARRIER);
+ return handle(result, isolate());
}
Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
@@ -2655,9 +2748,13 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
// via ReinitializeJSGlobalProxy later.
Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, size);
// Maintain invariant expected from any JSGlobalProxy.
- map->set_is_access_check_needed(true);
- map->set_may_have_interesting_symbols(true);
- LOG(isolate(), MapDetails(*map));
+ {
+ DisallowGarbageCollection no_gc;
+ Map raw = *map;
+ raw.set_is_access_check_needed(true);
+ raw.set_may_have_interesting_symbols(true);
+ LOG(isolate(), MapDetails(raw));
+ }
Handle<JSGlobalProxy> proxy = Handle<JSGlobalProxy>::cast(
NewJSObjectFromMap(map, AllocationType::kOld));
// Create identity hash early in case there is any JS collection containing
@@ -2693,10 +2790,11 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
DisallowGarbageCollection no_gc;
// Reset the map for the object.
- object->synchronized_set_map(*map);
+ JSGlobalProxy raw = *object;
+ raw.synchronized_set_map(*map);
// Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(object, raw_properties_or_hash, map);
+ InitializeJSObjectFromMap(raw, *raw_properties_or_hash, *map);
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
@@ -2704,37 +2802,38 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int end_position, Handle<SharedFunctionInfo> shared_info,
int bytecode_offset, Handle<Script> script, Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
- Handle<JSMessageObject> message_obj(
- JSMessageObject::cast(New(map, AllocationType::kYoung)), isolate());
- message_obj->set_raw_properties_or_hash(*empty_fixed_array(),
- SKIP_WRITE_BARRIER);
- message_obj->initialize_elements();
- message_obj->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- message_obj->set_type(message);
- message_obj->set_argument(*argument);
- message_obj->set_start_position(start_position);
- message_obj->set_end_position(end_position);
- message_obj->set_script(*script);
+ JSMessageObject message_obj =
+ JSMessageObject::cast(New(map, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ message_obj.set_raw_properties_or_hash(*empty_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ message_obj.initialize_elements();
+ message_obj.set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message_obj.set_type(message);
+ message_obj.set_argument(*argument, SKIP_WRITE_BARRIER);
+ message_obj.set_start_position(start_position);
+ message_obj.set_end_position(end_position);
+ message_obj.set_script(*script, SKIP_WRITE_BARRIER);
if (start_position >= 0) {
// If there's a start_position, then there's no need to store the
// SharedFunctionInfo as it will never be necessary to regenerate the
// position.
- message_obj->set_shared_info(*undefined_value());
- message_obj->set_bytecode_offset(Smi::FromInt(0));
+ message_obj.set_shared_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ message_obj.set_bytecode_offset(Smi::FromInt(0));
} else {
- message_obj->set_bytecode_offset(Smi::FromInt(bytecode_offset));
+ message_obj.set_bytecode_offset(Smi::FromInt(bytecode_offset));
if (shared_info.is_null()) {
- message_obj->set_shared_info(*undefined_value());
+ message_obj.set_shared_info(*undefined_value(), SKIP_WRITE_BARRIER);
DCHECK_EQ(bytecode_offset, -1);
} else {
- message_obj->set_shared_info(*shared_info);
+ message_obj.set_shared_info(*shared_info, SKIP_WRITE_BARRIER);
DCHECK_GE(bytecode_offset, kFunctionEntryBytecodeOffset);
}
}
- message_obj->set_stack_frames(*stack_frames);
- message_obj->set_error_level(v8::Isolate::kMessageError);
- return message_obj;
+ message_obj.set_stack_frames(*stack_frames, SKIP_WRITE_BARRIER);
+ message_obj.set_error_level(v8::Isolate::kMessageError);
+ return handle(message_obj, isolate());
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
@@ -2745,23 +2844,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
return shared;
}
-Handle<SharedFunctionInfo>
-Factory::NewSharedFunctionInfoForWasmExportedFunction(
- Handle<String> name, Handle<WasmExportedFunctionData> data) {
- return NewSharedFunctionInfo(name, data, Builtins::kNoBuiltinId);
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
- Handle<String> name, Handle<WasmJSFunctionData> data) {
- return NewSharedFunctionInfo(name, data, Builtins::kNoBuiltinId);
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
- Handle<WasmCapiFunctionData> data) {
- return NewSharedFunctionInfo(MaybeHandle<String>(), data,
- Builtins::kNoBuiltinId, kConciseMethod);
-}
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> maybe_name, int builtin_index, FunctionKind kind) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
@@ -2805,17 +2887,19 @@ void Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
return;
}
}
- number_string_cache()->set(hash * 2, *number);
- number_string_cache()->set(hash * 2 + 1, *js_string);
+ DisallowGarbageCollection no_gc;
+ FixedArray cache = *number_string_cache();
+ cache.set(hash * 2, *number);
+ cache.set(hash * 2 + 1, *js_string);
}
Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
DisallowGarbageCollection no_gc;
- Object key = number_string_cache()->get(hash * 2);
+ FixedArray cache = *number_string_cache();
+ Object key = cache.get(hash * 2);
if (key == number || (key.IsHeapNumber() && number.IsHeapNumber() &&
key.Number() == number.Number())) {
- return Handle<String>(
- String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
+ return Handle<String>(String::cast(cache.get(hash * 2 + 1)), isolate());
}
return undefined_value();
}
@@ -2876,11 +2960,15 @@ inline Handle<String> Factory::SmiToString(Smi number, NumberCacheMode mode) {
// Compute the hash here (rather than letting the caller take care of it) so
// that the "cache hit" case above doesn't have to bother with it.
STATIC_ASSERT(Smi::kMaxValue <= std::numeric_limits<uint32_t>::max());
- if (result->raw_hash_field() == String::kEmptyHashField &&
- number.value() >= 0) {
- uint32_t raw_hash_field = StringHasher::MakeArrayIndexHash(
- static_cast<uint32_t>(number.value()), result->length());
- result->set_raw_hash_field(raw_hash_field);
+ {
+ DisallowGarbageCollection no_gc;
+ String raw = *result;
+ if (raw.raw_hash_field() == String::kEmptyHashField &&
+ number.value() >= 0) {
+ uint32_t raw_hash_field = StringHasher::MakeArrayIndexHash(
+ static_cast<uint32_t>(number.value()), raw.length());
+ raw.set_raw_hash_field(raw_hash_field);
+ }
}
return result;
}
@@ -2912,67 +3000,76 @@ Handle<String> Factory::SizeToString(size_t value, bool check_cache) {
// No way to cache this; we'd need an {Object} to use as key.
result = NewStringFromAsciiChecked(string);
}
- if (value <= JSArray::kMaxArrayIndex &&
- result->raw_hash_field() == String::kEmptyHashField) {
- uint32_t raw_hash_field = StringHasher::MakeArrayIndexHash(
- static_cast<uint32_t>(value), result->length());
- result->set_raw_hash_field(raw_hash_field);
+ {
+ DisallowGarbageCollection no_gc;
+ String raw = *result;
+ if (value <= JSArray::kMaxArrayIndex &&
+ raw.raw_hash_field() == String::kEmptyHashField) {
+ uint32_t raw_hash_field = StringHasher::MakeArrayIndexHash(
+ static_cast<uint32_t>(value), raw.length());
+ raw.set_raw_hash_field(raw_hash_field);
+ }
}
return result;
}
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
- Heap* heap = isolate()->heap();
- Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, AllocationType::kOld));
- debug_info->set_flags(DebugInfo::kNone);
- debug_info->set_shared(*shared);
- debug_info->set_debugger_hints(0);
- DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
- debug_info->set_script(shared->script_or_debug_info(kAcquireLoad));
- debug_info->set_original_bytecode_array(ReadOnlyRoots(heap).undefined_value(),
- kReleaseStore);
- debug_info->set_debug_bytecode_array(ReadOnlyRoots(heap).undefined_value(),
- kReleaseStore);
- debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
+ DebugInfo debug_info =
+ DebugInfo::cast(NewStructInternal(DEBUG_INFO_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo raw_shared = *shared;
+ debug_info.set_flags(DebugInfo::kNone);
+ debug_info.set_shared(raw_shared);
+ debug_info.set_debugger_hints(0);
+ DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info.debugging_id());
+ debug_info.set_script(raw_shared.script_or_debug_info(kAcquireLoad));
+ HeapObject undefined = *undefined_value();
+ debug_info.set_original_bytecode_array(undefined, kReleaseStore,
+ SKIP_WRITE_BARRIER);
+ debug_info.set_debug_bytecode_array(undefined, kReleaseStore,
+ SKIP_WRITE_BARRIER);
+ debug_info.set_break_points(*empty_fixed_array(), SKIP_WRITE_BARRIER);
// Link debug info to function.
- shared->SetDebugInfo(*debug_info);
+ raw_shared.SetDebugInfo(debug_info);
- return debug_info;
+ return handle(debug_info, isolate());
}
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
- Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
- NewStruct(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
- new_break_point_info->set_source_position(source_position);
- new_break_point_info->set_break_points(*undefined_value());
- return new_break_point_info;
+ BreakPointInfo new_break_point_info = BreakPointInfo::cast(
+ NewStructInternal(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ new_break_point_info.set_source_position(source_position);
+ new_break_point_info.set_break_points(*undefined_value(), SKIP_WRITE_BARRIER);
+ return handle(new_break_point_info, isolate());
}
Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
- Handle<BreakPoint> new_break_point = Handle<BreakPoint>::cast(
- NewStruct(BREAK_POINT_TYPE, AllocationType::kOld));
- new_break_point->set_id(id);
- new_break_point->set_condition(*condition);
- return new_break_point;
+ BreakPoint new_break_point = BreakPoint::cast(
+ NewStructInternal(BREAK_POINT_TYPE, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ new_break_point.set_id(id);
+ new_break_point.set_condition(*condition);
+ return handle(new_break_point, isolate());
}
Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code_object, int code_offset_or_source_position,
int flags, Handle<FixedArray> parameters) {
- Handle<StackFrameInfo> info =
- Handle<StackFrameInfo>::cast(NewStruct(STACK_FRAME_INFO_TYPE));
- info->set_receiver_or_instance(*receiver_or_instance);
- info->set_function(*function);
- info->set_code_object(*code_object);
- info->set_code_offset_or_source_position(code_offset_or_source_position);
- info->set_flags(flags);
- info->set_parameters(*parameters);
- return info;
+ StackFrameInfo info = StackFrameInfo::cast(
+ NewStructInternal(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+ DisallowGarbageCollection no_gc;
+ info.set_receiver_or_instance(*receiver_or_instance, SKIP_WRITE_BARRIER);
+ info.set_function(*function, SKIP_WRITE_BARRIER);
+ info.set_code_object(*code_object, SKIP_WRITE_BARRIER);
+ info.set_code_offset_or_source_position(code_offset_or_source_position);
+ info.set_flags(flags);
+ info.set_parameters(*parameters, SKIP_WRITE_BARRIER);
+ return handle(info, isolate());
}
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
@@ -3081,13 +3178,14 @@ Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp, Handle<String> source,
JSRegExp::Flags flags, Handle<Object> data) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
-
- store->set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::ATOM));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
- store->set(JSRegExp::kAtomPatternIndex, *data);
- regexp->set_data(*store);
+ FixedArray store =
+ *NewFixedArray(JSRegExp::kAtomDataSize, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ store.set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::ATOM));
+ store.set(JSRegExp::kSourceIndex, *source, SKIP_WRITE_BARRIER);
+ store.set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
+ store.set(JSRegExp::kAtomPatternIndex, *data, SKIP_WRITE_BARRIER);
+ regexp->set_data(store);
}
void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
@@ -3095,46 +3193,50 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Flags flags, int capture_count,
uint32_t backtrack_limit) {
DCHECK(Smi::IsValid(backtrack_limit));
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
+ FixedArray store =
+ *NewFixedArray(JSRegExp::kIrregexpDataSize, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
Smi ticks_until_tier_up = FLAG_regexp_tier_up
? Smi::FromInt(FLAG_regexp_tier_up_ticks)
: uninitialized;
- store->set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::IRREGEXP));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
- store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::zero());
- store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
- store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
- store->set(JSRegExp::kIrregexpTicksUntilTierUpIndex, ticks_until_tier_up);
- store->set(JSRegExp::kIrregexpBacktrackLimit, Smi::FromInt(backtrack_limit));
- regexp->set_data(*store);
+ store.set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::IRREGEXP));
+ store.set(JSRegExp::kSourceIndex, *source, SKIP_WRITE_BARRIER);
+ store.set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
+ store.set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::zero());
+ store.set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
+ store.set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpTicksUntilTierUpIndex, ticks_until_tier_up);
+ store.set(JSRegExp::kIrregexpBacktrackLimit, Smi::FromInt(backtrack_limit));
+ regexp->set_data(store);
}
void Factory::SetRegExpExperimentalData(Handle<JSRegExp> regexp,
Handle<String> source,
JSRegExp::Flags flags,
int capture_count) {
- Handle<FixedArray> store = NewFixedArray(JSRegExp::kExperimentalDataSize);
+ FixedArray store =
+ *NewFixedArray(JSRegExp::kExperimentalDataSize, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
- store->set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::EXPERIMENTAL));
- store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
- store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, uninitialized);
- store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
- store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
- store->set(JSRegExp::kIrregexpTicksUntilTierUpIndex, uninitialized);
- store->set(JSRegExp::kIrregexpBacktrackLimit, uninitialized);
- regexp->set_data(*store);
+ store.set(JSRegExp::kTagIndex, Smi::FromInt(JSRegExp::EXPERIMENTAL));
+ store.set(JSRegExp::kSourceIndex, *source, SKIP_WRITE_BARRIER);
+ store.set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
+ store.set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpMaxRegisterCountIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
+ store.set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpTicksUntilTierUpIndex, uninitialized);
+ store.set(JSRegExp::kIrregexpBacktrackLimit, uninitialized);
+ regexp->set_data(store);
}
Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
@@ -3143,15 +3245,18 @@ Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
static const int kInitialSize = RegExpMatchInfo::kFirstCaptureIndex +
RegExpMatchInfo::kInitialCaptureIndices;
- Handle<FixedArray> elems = NewFixedArray(kInitialSize);
+ Handle<FixedArray> elems =
+ NewFixedArray(kInitialSize, AllocationType::kYoung);
Handle<RegExpMatchInfo> result = Handle<RegExpMatchInfo>::cast(elems);
-
- result->SetNumberOfCaptureRegisters(RegExpMatchInfo::kInitialCaptureIndices);
- result->SetLastSubject(*empty_string());
- result->SetLastInput(*undefined_value());
- result->SetCapture(0, 0);
- result->SetCapture(1, 0);
-
+ {
+ DisallowGarbageCollection no_gc;
+ RegExpMatchInfo raw = *result;
+ raw.SetNumberOfCaptureRegisters(RegExpMatchInfo::kInitialCaptureIndices);
+ raw.SetLastSubject(*empty_string(), SKIP_WRITE_BARRIER);
+ raw.SetLastInput(*undefined_value(), SKIP_WRITE_BARRIER);
+ raw.SetCapture(0, 0);
+ raw.SetCapture(1, 0);
+ }
return result;
}
@@ -3188,9 +3293,13 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
Handle<Map> map = NewMap(
JS_FUNCTION_TYPE, header_size + inobject_properties_count * kTaggedSize,
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
- map->set_has_prototype_slot(has_prototype);
- map->set_is_constructor(has_prototype);
- map->set_is_callable(true);
+ {
+ DisallowGarbageCollection no_gc;
+ Map raw_map = *map;
+ raw_map.set_has_prototype_slot(has_prototype);
+ raw_map.set_is_constructor(has_prototype);
+ raw_map.set_is_callable(true);
+ }
Handle<JSFunction> empty_function;
if (maybe_empty_function.ToHandle(&empty_function)) {
Map::SetPrototype(isolate(), map, empty_function);
@@ -3250,8 +3359,8 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(
- 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
+ DCHECK_EQ(0,
+ map->instance_descriptors(isolate()).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3274,9 +3383,13 @@ Handle<Map> Factory::CreateStrictFunctionMap(
Handle<Map> map = NewMap(
JS_FUNCTION_TYPE, header_size + inobject_properties_count * kTaggedSize,
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
- map->set_has_prototype_slot(has_prototype);
- map->set_is_constructor(has_prototype);
- map->set_is_callable(true);
+ {
+ DisallowGarbageCollection no_gc;
+ Map raw_map = *map;
+ raw_map.set_has_prototype_slot(has_prototype);
+ raw_map.set_is_constructor(has_prototype);
+ raw_map.set_is_callable(true);
+ }
Map::SetPrototype(isolate(), map, empty_function);
//
@@ -3324,18 +3437,22 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(
- 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
+ DCHECK_EQ(0,
+ map->instance_descriptors(isolate()).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype);
- map->set_has_prototype_slot(true);
- map->set_is_constructor(true);
- map->set_is_prototype_map(true);
- map->set_is_callable(true);
+ {
+ DisallowGarbageCollection no_gc;
+ Map raw_map = *map;
+ raw_map.set_has_prototype_slot(true);
+ raw_map.set_is_constructor(true);
+ raw_map.set_is_prototype_map(true);
+ raw_map.set_is_callable(true);
+ }
Map::SetPrototype(isolate(), map, empty_function);
//
@@ -3368,17 +3485,18 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
Handle<JSPromise> promise =
Handle<JSPromise>::cast(NewJSObject(isolate()->promise_function()));
- promise->set_reactions_or_result(Smi::zero());
- promise->set_flags(0);
- ZeroEmbedderFields(promise);
- DCHECK_EQ(promise->GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
+ DisallowGarbageCollection no_gc;
+ JSPromise raw = *promise;
+ raw.set_reactions_or_result(Smi::zero(), SKIP_WRITE_BARRIER);
+ raw.set_flags(0);
+ ZeroEmbedderFields(*promise);
+ DCHECK_EQ(raw.GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
return promise;
}
Handle<JSPromise> Factory::NewJSPromise() {
Handle<JSPromise> promise = NewJSPromiseWithoutHook();
- isolate()->RunAllPromiseHooks(PromiseHookType::kInit, promise,
- undefined_value());
+ isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
return promise;
}
@@ -3386,13 +3504,13 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
Handle<Map> map = has_no_side_effect
? side_effect_free_call_handler_info_map()
: side_effect_call_handler_info_map();
- Handle<CallHandlerInfo> info(
- CallHandlerInfo::cast(New(map, AllocationType::kOld)), isolate());
- Object undefined_value = ReadOnlyRoots(isolate()).undefined_value();
- info->set_callback(undefined_value);
- info->set_js_callback(undefined_value);
- info->set_data(undefined_value);
- return info;
+ CallHandlerInfo info = CallHandlerInfo::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ Object undefined_value = read_only_roots().undefined_value();
+ info.set_callback(undefined_value, SKIP_WRITE_BARRIER);
+ info.set_js_callback(undefined_value, SKIP_WRITE_BARRIER);
+ info.set_data(undefined_value, SKIP_WRITE_BARRIER);
+ return handle(info, isolate());
}
bool Factory::CanAllocateInReadOnlySpace() {
@@ -3450,26 +3568,29 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
DCHECK(InstanceTypeChecker::IsJSFunction(map->instance_type()));
// Allocation.
- Handle<JSFunction> function(
- JSFunction::cast(factory->New(map, allocation_type_)), isolate);
+ JSFunction function = JSFunction::cast(factory->New(map, allocation_type_));
+ DisallowGarbageCollection no_gc;
+ WriteBarrierMode mode = allocation_type_ == AllocationType::kYoung
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
// Header initialization.
- function->initialize_properties(isolate);
- function->initialize_elements();
- function->set_shared(*sfi_);
- function->set_context(*context_);
- function->set_raw_feedback_cell(*feedback_cell);
- function->set_code(*code, kReleaseStore);
- if (map->has_prototype_slot()) {
- function->set_prototype_or_initial_map(
- ReadOnlyRoots(isolate).the_hole_value());
+ function.initialize_properties(isolate);
+ function.initialize_elements();
+ function.set_shared(*sfi_, mode);
+ function.set_context(*context_, mode);
+ function.set_raw_feedback_cell(*feedback_cell, mode);
+ function.set_code(*code, kReleaseStore, mode);
+ if (function.has_prototype_slot()) {
+ function.set_prototype_or_initial_map(
+ ReadOnlyRoots(isolate).the_hole_value(), SKIP_WRITE_BARRIER);
}
// Potentially body initialization.
factory->InitializeJSObjectBody(
- function, map, JSFunction::GetHeaderSize(map->has_prototype_slot()));
+ function, *map, JSFunction::GetHeaderSize(map->has_prototype_slot()));
- return function;
+ return handle(function, isolate_);
}
void Factory::JSFunctionBuilder::PrepareMap() {
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index b6fb9a29425..ebec483de47 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -99,7 +99,7 @@ enum class NumberCacheMode { kIgnore, kSetOnly, kBoth };
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
- inline ReadOnlyRoots read_only_roots();
+ inline ReadOnlyRoots read_only_roots() const;
template <typename T>
Handle<T> MakeHandle(T obj) {
@@ -555,9 +555,18 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSModuleNamespace> NewJSModuleNamespace();
+#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
Handle<Map> opt_parent);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
+ Handle<String> name, Handle<WasmExportedFunctionData> data);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmJSFunction(
+ Handle<String> name, Handle<WasmJSFunctionData> data);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmCapiFunction(
+ Handle<WasmCapiFunctionData> data);
+#endif // V8_ENABLE_WEBASSEMBLY
+
Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
Handle<SyntheticModule> NewSyntheticModule(
Handle<String> module_name, Handle<FixedArray> export_names,
@@ -683,13 +692,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<String> maybe_name,
Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind);
- Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
- Handle<String> name, Handle<WasmExportedFunctionData> data);
- Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmJSFunction(
- Handle<String> name, Handle<WasmJSFunctionData> data);
- Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmCapiFunction(
- Handle<WasmCapiFunctionData> data);
-
Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> name, int builtin_index,
FunctionKind kind = kNormalFunction);
@@ -826,26 +828,29 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
}
CodeBuilder& set_builtin_index(int32_t builtin_index) {
+ DCHECK_IMPLIES(builtin_index != Builtins::kNoBuiltinId,
+ !CodeKindIsJSFunction(kind_));
builtin_index_ = builtin_index;
return *this;
}
CodeBuilder& set_inlined_bytecode_size(uint32_t size) {
+ DCHECK_IMPLIES(size != 0, CodeKindIsOptimizedJSFunction(kind_));
inlined_bytecode_size_ = size;
return *this;
}
CodeBuilder& set_source_position_table(Handle<ByteArray> table) {
+ DCHECK_NE(kind_, CodeKind::BASELINE);
DCHECK(!table.is_null());
- source_position_table_ = table;
+ position_table_ = table;
return *this;
}
CodeBuilder& set_bytecode_offset_table(Handle<ByteArray> table) {
+ DCHECK_EQ(kind_, CodeKind::BASELINE);
DCHECK(!table.is_null());
- // TODO(v8:11429): Rename this and clean up calls to SourcePositionTable
- // under Baseline.
- source_position_table_ = table;
+ position_table_ = table;
return *this;
}
@@ -857,11 +862,13 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
}
CodeBuilder& set_is_turbofanned() {
+ DCHECK(!CodeKindIsUnoptimizedJSFunction(kind_));
is_turbofanned_ = true;
return *this;
}
CodeBuilder& set_is_executable(bool executable) {
+ DCHECK_EQ(kind_, CodeKind::BUILTIN);
is_executable_ = executable;
return *this;
}
@@ -896,8 +903,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
int32_t builtin_index_ = Builtins::kNoBuiltinId;
uint32_t inlined_bytecode_size_ = 0;
int32_t kind_specific_flags_ = 0;
- // Contains bytecode offset table for baseline
- Handle<ByteArray> source_position_table_;
+ // Either source_position_table for non-baseline code
+ // or bytecode_offset_table for baseline code.
+ Handle<ByteArray> position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
BasicBlockProfilerData* profiler_data_ = nullptr;
@@ -915,7 +923,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
- Isolate* isolate() {
+ Isolate* isolate() const {
// Downcast to the privately inherited sub-class using c-style casts to
// avoid undefined behavior (as static_cast cannot cast across private
// bases).
@@ -936,10 +944,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> map, Handle<FixedArrayBase> elements,
Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length);
+ Symbol NewSymbolInternal(AllocationType allocation = AllocationType::kOld);
+
// Allocates new context with given map, sets length and initializes the
// after-header part with uninitialized values and leaves the context header
// uninitialized.
- Handle<Context> NewContext(Handle<Map> map, int size,
+ Context NewContextInternal(Handle<Map> map, int size,
int variadic_part_length,
AllocationType allocation);
@@ -993,11 +1003,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
AllocationSite allocation_site);
// Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(Handle<JSObject> obj,
- Handle<Object> properties, Handle<Map> map);
+ void InitializeJSObjectFromMap(JSObject obj, Object properties, Map map);
// Initializes JSObject body starting at given offset.
- void InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
- int start_offset);
+ void InitializeJSObjectBody(JSObject obj, Map map, int start_offset);
Handle<WeakArrayList> NewUninitializedWeakArrayList(
int capacity, AllocationType allocation = AllocationType::kYoung);
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index ea428125855..e6ffbe17963 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -12,11 +12,8 @@ namespace v8 {
namespace internal {
const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
-const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
void GCIdleTimeHeapState::Print() {
- PrintF("contexts_disposed=%d ", contexts_disposed);
- PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
PrintF("size_of_objects=%zu ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
}
@@ -36,14 +33,6 @@ size_t GCIdleTimeHandler::EstimateMarkingStepSize(
return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
}
-bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
- int contexts_disposed, double contexts_disposal_rate,
- size_t size_of_objects) {
- return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
- contexts_disposal_rate < kHighContextDisposalRate &&
- size_of_objects <= kMaxHeapSizeForContextDisposalMarkCompact;
-}
-
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
@@ -54,13 +43,6 @@ bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
- if (heap_state.incremental_marking_stopped) {
- if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate,
- heap_state.size_of_objects)) {
- return GCIdleTimeAction::kFullGC;
- }
- }
return GCIdleTimeAction::kDone;
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index b78b386db2e..163318f28b2 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -13,15 +13,12 @@ namespace internal {
enum class GCIdleTimeAction : uint8_t {
kDone,
kIncrementalStep,
- kFullGC,
};
class GCIdleTimeHeapState {
public:
void Print();
- int contexts_disposed;
- double contexts_disposal_rate;
size_t size_of_objects;
bool incremental_marking_stopped;
};
@@ -46,11 +43,6 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
// 16.66 ms when there is currently no rendering going on.
static const size_t kMaxScheduledIdleTime = 50;
- static const size_t kMaxHeapSizeForContextDisposalMarkCompact = 100 * MB;
-
- // If contexts are disposed at a higher rate a full gc is triggered.
- static const double kHighContextDisposalRate;
-
GCIdleTimeHandler() = default;
GCIdleTimeHandler(const GCIdleTimeHandler&) = delete;
GCIdleTimeHandler& operator=(const GCIdleTimeHandler&) = delete;
@@ -65,10 +57,6 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
static double EstimateFinalIncrementalMarkCompactTime(
size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
-
- static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
- double contexts_disposal_rate,
- size_t size_of_objects);
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index be7d0ea0fe9..b4f86cc2a15 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -208,7 +208,6 @@ void GCTracer::ResetForTesting() {
recorded_new_generation_allocations_.Reset();
recorded_old_generation_allocations_.Reset();
recorded_embedder_generation_allocations_.Reset();
- recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
average_mutator_duration_ = 0;
@@ -472,11 +471,6 @@ void GCTracer::AddAllocation(double current_ms) {
embedder_allocation_in_bytes_since_gc_ = 0;
}
-
-void GCTracer::AddContextDisposalTime(double time) {
- recorded_context_disposal_times_.Push(time);
-}
-
void GCTracer::AddCompactionEvent(double duration,
size_t live_bytes_compacted) {
recorded_compactions_.Push(
@@ -612,8 +606,7 @@ void GCTracer::PrintNVP() const {
"promotion_rate=%.1f%% "
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
- "unmapper_chunks=%d "
- "context_disposal_rate=%.1f\n",
+ "unmapper_chunks=%d\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
current_.scopes[Scope::HEAP_PROLOGUE],
@@ -651,8 +644,7 @@ void GCTracer::PrintNVP() const {
AverageSurvivalRatio(), heap_->promotion_rate_,
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
- heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- ContextDisposalRateInMilliseconds());
+ heap_->memory_allocator()->unmapper()->NumberOfChunks());
break;
case Event::MINOR_MARK_COMPACTOR:
heap_->isolate()->PrintWithTimestamp(
@@ -804,7 +796,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
- "context_disposal_rate=%.1f "
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
@@ -896,7 +887,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- ContextDisposalRateInMilliseconds(),
CompactionSpeedInBytesPerMillisecond());
break;
case Event::START:
@@ -1118,16 +1108,6 @@ double GCTracer::CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
kThroughputTimeFrameMs);
}
-double GCTracer::ContextDisposalRateInMilliseconds() const {
- if (recorded_context_disposal_times_.Count() <
- recorded_context_disposal_times_.kSize)
- return 0.0;
- double begin = heap_->MonotonicallyIncreasingTimeInMs();
- double end = recorded_context_disposal_times_.Sum(
- [](double a, double b) { return b; }, 0.0);
- return (begin - end) / recorded_context_disposal_times_.Count();
-}
-
double GCTracer::AverageSurvivalRatio() const {
if (recorded_survival_ratios_.Count() == 0) return 0.0;
double sum = recorded_survival_ratios_.Sum(
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 5cad6ef50f4..011889ba66e 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -221,8 +221,6 @@ class V8_EXPORT_PRIVATE GCTracer {
// Log the accumulated new space allocation bytes.
void AddAllocation(double current_ms);
- void AddContextDisposalTime(double time);
-
void AddCompactionEvent(double duration, size_t live_bytes_compacted);
void AddSurvivalRatio(double survival_ratio);
@@ -297,12 +295,6 @@ class V8_EXPORT_PRIVATE GCTracer {
// Returns 0 if no allocation events have been recorded.
double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
- // Computes the context disposal rate in milliseconds. It takes the time
- // frame of the first recorded context disposal to the current time and
- // divides it by the number of recorded events.
- // Returns 0 if no events have been recorded.
- double ContextDisposalRateInMilliseconds() const;
-
// Computes the average survival ratio based on the last recorded survival
// events.
// Returns 0 if no events have been recorded.
@@ -479,7 +471,6 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
- base::RingBuffer<double> recorded_context_disposal_times_;
base::RingBuffer<double> recorded_survival_ratios_;
base::Mutex background_counter_mutex_;
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index e600c9cebbc..63949de2433 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -18,6 +18,11 @@ namespace {
thread_local MarkingBarrier* current_marking_barrier = nullptr;
} // namespace
+MarkingBarrier* WriteBarrier::CurrentMarkingBarrier(Heap* heap) {
+ return current_marking_barrier ? current_marking_barrier
+ : heap->marking_barrier();
+}
+
void WriteBarrier::SetForThread(MarkingBarrier* marking_barrier) {
DCHECK_NULL(current_marking_barrier);
current_marking_barrier = marking_barrier;
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index c510c069fe2..86e0335f4e8 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -58,6 +58,8 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static void SetForThread(MarkingBarrier*);
static void ClearForThread(MarkingBarrier*);
+ static MarkingBarrier* CurrentMarkingBarrier(Heap* heap);
+
private:
static void MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot,
HeapObject value);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 21173cb0b48..af55137e1f6 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -22,6 +22,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/isolate-utils-inl.h"
@@ -289,7 +290,7 @@ size_t Heap::MinOldGenerationSize() {
size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
#ifdef V8_COMPRESS_POINTERS
// Isolate and the young generation are also allocated on the heap.
- return kPtrComprHeapReservationSize -
+ return kPtrComprCageReservationSize -
YoungGenerationSizeFromSemiSpaceSize(kMaxSemiSpaceSize) -
RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
#endif
@@ -409,11 +410,13 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
-bool Heap::CanExpandOldGenerationBackground(size_t size) {
+bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
+ size_t size) {
if (force_oom_) return false;
+
// When the heap is tearing down, then GC requests from background threads
// are not served and the threads are allowed to expand the heap to avoid OOM.
- return gc_state() == TEAR_DOWN ||
+ return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
memory_allocator()->Size() + size <= MaxReserved();
}
@@ -1177,6 +1180,15 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ReduceNewSpaceSize();
}
+ // Set main thread state back to Running from CollectionRequested.
+ LocalHeap* main_thread_local_heap = isolate()->main_thread_local_heap();
+
+ LocalHeap::ThreadState old_state =
+ main_thread_local_heap->state_.exchange(LocalHeap::kRunning);
+
+ CHECK(old_state == LocalHeap::kRunning ||
+ old_state == LocalHeap::kCollectionRequested);
+
// Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection();
}
@@ -1524,6 +1536,14 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
+ if (V8_UNLIKELY(!deserialization_complete_)) {
+ // During isolate initialization heap always grows. GC is only requested
+ // if a new page allocation fails. In such a case we should crash with
+ // an out-of-memory instead of performing GC because the prologue/epilogue
+ // callbacks may see objects that are not yet deserialized.
+ CHECK(always_allocate());
+ FatalProcessOutOfMemory("GC during deserialization");
+ }
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
@@ -1731,8 +1751,6 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
isolate()->raw_native_context().set_retained_maps(
ReadOnlyRoots(this).empty_weak_array_list());
}
-
- tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1936,18 +1954,15 @@ bool Heap::CollectionRequested() {
return collection_barrier_->CollectionRequested();
}
-void Heap::RequestCollectionBackground(LocalHeap* local_heap) {
- if (local_heap->is_main_thread()) {
- CollectAllGarbage(current_gc_flags_,
- GarbageCollectionReason::kBackgroundAllocationFailure,
- current_gc_callback_flags_);
- } else {
- collection_barrier_->AwaitCollectionBackground();
- }
+void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
+ CHECK(local_heap->is_main_thread());
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kBackgroundAllocationFailure,
+ current_gc_callback_flags_);
}
void Heap::CheckCollectionRequested() {
- if (!collection_barrier_->CollectionRequested()) return;
+ if (!CollectionRequested()) return;
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
@@ -2005,14 +2020,12 @@ size_t Heap::PerformGarbageCollection(
// cycle.
UpdateCurrentEpoch(collector);
- // Stop time-to-collection timer before safepoint - we do not want to measure
- // time for safepointing.
- collection_barrier_->StopTimeToCollectionTimer();
-
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
SafepointScope safepoint_scope(this);
+ collection_barrier_->StopTimeToCollectionTimer();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -2238,6 +2251,7 @@ void Heap::MarkCompact() {
mark_compact_collector()->Prepare();
ms_count_++;
+ contexts_disposed_ = 0;
MarkCompactPrologue();
@@ -3006,6 +3020,12 @@ bool Heap::CanMoveObjectStart(HeapObject object) {
if (IsLargeObject(object)) return false;
+ // Compilation jobs may have references to the object.
+ if (isolate()->concurrent_recompilation_enabled() &&
+ isolate()->optimizing_compile_dispatcher()->HasJobs()) {
+ return false;
+ }
+
// We can move the object start if the page was already swept.
return Page::FromHeapObject(object)->SweepingDone();
}
@@ -3575,28 +3595,30 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
if (object.IsJSObject()) {
- DCHECK(!object.map().TransitionRequiresSynchronizationWithGC(new_map));
- } else if (object.IsString() &&
- (new_map == ReadOnlyRoots(this).thin_string_map() ||
- new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
+ // Without double unboxing all in-object fields of a JSObject are tagged.
+ return;
+ }
+ if (object.IsString() &&
+ (new_map == ReadOnlyRoots(this).thin_string_map() ||
+ new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
// When transitioning a string to ThinString,
// Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
// tagged fields are introduced.
- } else {
- // Check that the set of slots before and after the transition match.
- SlotCollectingVisitor old_visitor;
- object.IterateFast(&old_visitor);
- MapWord old_map_word = object.map_word();
- // Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map));
- SlotCollectingVisitor new_visitor;
- object.IterateFast(&new_visitor);
- // Restore the old map.
- object.set_map_word(old_map_word);
- DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
- for (int i = 0; i < new_visitor.number_of_slots(); i++) {
- DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
- }
+ return;
+ }
+ // Check that the set of slots before and after the transition match.
+ SlotCollectingVisitor old_visitor;
+ object.IterateFast(&old_visitor);
+ MapWord old_map_word = object.map_word();
+ // Temporarily set the new map to iterate new slots.
+ object.set_map_word(MapWord::FromMap(new_map));
+ SlotCollectingVisitor new_visitor;
+ object.IterateFast(&new_visitor);
+ // Restore the old map.
+ object.set_map_word(old_map_word);
+ DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
+ for (int i = 0; i < new_visitor.number_of_slots(); i++) {
+ DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
}
} else {
DCHECK_EQ(pending_layout_change_object_, object);
@@ -3607,9 +3629,6 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
- heap_state.contexts_disposed = contexts_disposed_;
- heap_state.contexts_disposal_rate =
- tracer()->ContextDisposalRateInMilliseconds();
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
return heap_state;
@@ -3632,13 +3651,6 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
result = incremental_marking()->IsStopped();
break;
}
- case GCIdleTimeAction::kFullGC: {
- DCHECK_LT(0, contexts_disposed_);
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- TRACE_EVENT0("v8", "V8.GCContext");
- CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
- break;
- }
}
return result;
@@ -3652,8 +3664,6 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
last_idle_notification_time_ = current_time;
double deadline_difference = deadline_in_ms - current_time;
- contexts_disposed_ = 0;
-
if (FLAG_trace_idle_notification) {
isolate_->PrintWithTimestamp(
"Idle notification: requested idle time %.2f ms, used idle time %.2f "
@@ -3667,9 +3677,6 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
case GCIdleTimeAction::kIncrementalStep:
PrintF("incremental step");
break;
- case GCIdleTimeAction::kFullGC:
- PrintF("full GC");
- break;
}
PrintF("]");
if (FLAG_trace_idle_notification_verbose) {
@@ -3711,12 +3718,9 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
EmbedderAllocationCounter());
GCIdleTimeHeapState heap_state = ComputeHeapState();
-
GCIdleTimeAction action =
gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
-
bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
-
IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
return result;
}
@@ -4488,7 +4492,7 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
if (!options.contains(SkipRoot::kStack)) {
IterateStackRoots(v);
- v->Synchronize(VisitorSynchronization::kTop);
+ v->Synchronize(VisitorSynchronization::kStackRoots);
}
// Iterate over local handles in handle scopes.
@@ -4881,7 +4885,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// was initiated.
if (gc_state() == TEAR_DOWN) return true;
- // Ensure that retry of allocation on background thread succeeds
+ // If main thread is parked, it can't perform the GC. Fix the deadlock by
+ // allowing the allocation.
+ if (IsMainThreadParked(local_heap)) return true;
+
+ // Make it more likely that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
// Background thread requested GC, allocation should fail
@@ -4908,6 +4916,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_;
}
+bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
+ if (!local_heap) return false;
+ return local_heap->main_thread_parked_;
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -5186,6 +5199,49 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
read_only_space_ = space;
}
+uint8_t* Heap::RemapEmbeddedBuiltinsIntoCodeRange(
+ const uint8_t* embedded_blob_code, size_t embedded_blob_code_size) {
+ const base::AddressRegion& code_range = memory_allocator()->code_range();
+
+ CHECK_NE(code_range.begin(), kNullAddress);
+ CHECK(!code_range.is_empty());
+
+ v8::PageAllocator* code_page_allocator =
+ memory_allocator()->code_page_allocator();
+
+ const size_t kAllocatePageSize = code_page_allocator->AllocatePageSize();
+ size_t allocate_code_size =
+ RoundUp(embedded_blob_code_size, kAllocatePageSize);
+
+ // Allocate the re-embedded code blob in the end.
+ void* hint = reinterpret_cast<void*>(code_range.end() - allocate_code_size);
+
+ void* embedded_blob_copy = code_page_allocator->AllocatePages(
+ hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
+
+ if (!embedded_blob_copy) {
+ V8::FatalProcessOutOfMemory(
+ isolate(), "Can't allocate space for re-embedded builtins");
+ }
+
+ size_t code_size =
+ RoundUp(embedded_blob_code_size, code_page_allocator->CommitPageSize());
+
+ if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
+ PageAllocator::kReadWrite)) {
+ V8::FatalProcessOutOfMemory(isolate(),
+ "Re-embedded builtins: set permissions");
+ }
+ memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
+
+ if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
+ PageAllocator::kReadExecute)) {
+ V8::FatalProcessOutOfMemory(isolate(),
+ "Re-embedded builtins: set permissions");
+ }
+ return reinterpret_cast<uint8_t*>(embedded_blob_copy);
+}
+
class StressConcurrentAllocationObserver : public AllocationObserver {
public:
explicit StressConcurrentAllocationObserver(Heap* heap)
@@ -5427,7 +5483,7 @@ void Heap::StartTearDown() {
// process the event queue anymore. Avoid this deadlock by allowing all
// allocations after tear down was requested to make sure all background
// threads finish.
- collection_barrier_->ShutdownRequested();
+ collection_barrier_->NotifyShutdownRequested();
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
@@ -6228,6 +6284,8 @@ MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
}
void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
+ if (!FLAG_harmony_weak_refs) return;
+
DisallowGarbageCollection no_gc;
Isolate* isolate = this->isolate();
@@ -6257,6 +6315,7 @@ void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
}
void Heap::KeepDuringJob(Handle<JSReceiver> target) {
+ DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
Handle<OrderedHashSet> table;
@@ -6490,15 +6549,23 @@ Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(Code code, Address addr) {
Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
- if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
+ Builtins::Name maybe_builtin =
+ InstructionStream::TryLookupCode(isolate(), addr);
+ if (Builtins::IsBuiltinId(maybe_builtin) &&
+ code.builtin_index() == maybe_builtin) {
+ return true;
+ }
Address start = code.address();
Address end = code.address() + code.SizeFromMap(map);
return start <= addr && addr < end;
}
Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
- Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (!code.is_null()) return code;
+ Builtins::Name maybe_builtin =
+ InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (Builtins::IsBuiltinId(maybe_builtin)) {
+ return builtin(maybe_builtin);
+ }
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
Address start = tp_heap_->GetObjectFromInnerPointer(inner_pointer);
@@ -6596,7 +6663,7 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
- MarkingBarrier* marking_barrier = this->marking_barrier();
+ MarkingBarrier* marking_barrier = WriteBarrier::CurrentMarkingBarrier(this);
MarkCompactCollector* collector = this->mark_compact_collector();
for (TSlot slot = start_slot; slot < end_slot; ++slot) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index b1ccc4391eb..25b8f5964e0 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -667,8 +667,8 @@ class Heap {
template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
- // Requests collection and blocks until GC is finished.
- void RequestCollectionBackground(LocalHeap* local_heap);
+ // Performs GC after background allocation failure.
+ void CollectGarbageForBackground(LocalHeap* local_heap);
//
// Support for the API.
@@ -812,6 +812,12 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
+ // If the code range exists, allocates executable pages in the code range and
+ // copies the embedded builtins code blob there. Returns address of the copy.
+ // The builtins code region will be freed with the code range at tear down.
+ uint8_t* RemapEmbeddedBuiltinsIntoCodeRange(const uint8_t* embedded_blob_code,
+ size_t embedded_blob_code_size);
+
// Sets the TearDown state, so no new GC tasks get posted.
void StartTearDown();
@@ -1926,12 +1932,14 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
- V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
+ V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap,
+ size_t size);
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
+ bool IsMainThreadParked(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode();
@@ -2356,6 +2364,7 @@ class Heap {
friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
+ friend class LocalHeap;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
deleted file mode 100644
index 5a63e28d773..00000000000
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/item-parallel-job.h"
-
-#include "src/base/platform/semaphore.h"
-#include "src/init/v8.h"
-#include "src/logging/counters.h"
-
-namespace v8 {
-namespace internal {
-
-ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
-
-void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
- std::vector<Item*>* items,
- size_t start_index) {
- on_finish_ = on_finish;
- items_ = items;
-
- if (start_index < items->size()) {
- cur_index_ = start_index;
- } else {
- items_considered_ = items_->size();
- }
-}
-
-void ItemParallelJob::Task::WillRunOnForeground() {
- runner_ = Runner::kForeground;
-}
-
-void ItemParallelJob::Task::RunInternal() {
- RunInParallel(runner_);
- on_finish_->Signal();
-}
-
-ItemParallelJob::ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* pending_tasks)
- : cancelable_task_manager_(cancelable_task_manager),
- pending_tasks_(pending_tasks) {}
-
-ItemParallelJob::~ItemParallelJob() {
- for (size_t i = 0; i < items_.size(); i++) {
- Item* item = items_[i];
- CHECK(item->IsFinished());
- delete item;
- }
-}
-
-void ItemParallelJob::Run() {
- DCHECK_GT(tasks_.size(), 0);
- const size_t num_items = items_.size();
- const size_t num_tasks = tasks_.size();
-
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "ItemParallelJob::Run", TRACE_EVENT_SCOPE_THREAD,
- "num_tasks", static_cast<int>(num_tasks), "num_items",
- static_cast<int>(num_items));
-
- // Some jobs have more tasks than items (when the items are mere coarse
- // grain tasks that generate work dynamically for a second phase which all
- // tasks participate in). Some jobs even have 0 items to preprocess but
- // still have multiple tasks.
- // TODO(gab): Figure out a cleaner scheme for this.
- const size_t num_tasks_processing_items = std::min(num_items, tasks_.size());
-
- // In the event of an uneven workload, distribute an extra item to the first
- // |items_remainder| tasks.
- const size_t items_remainder = num_tasks_processing_items > 0
- ? num_items % num_tasks_processing_items
- : 0;
- // Base |items_per_task|, will be bumped by 1 for the first
- // |items_remainder| tasks.
- const size_t items_per_task = num_tasks_processing_items > 0
- ? num_items / num_tasks_processing_items
- : 0;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
- std::unique_ptr<Task> main_task;
- for (size_t i = 0, start_index = 0; i < num_tasks;
- i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
- auto task = std::move(tasks_[i]);
- DCHECK(task);
-
- // By definition there are less |items_remainder| to distribute then
- // there are tasks processing items so this cannot overflow while we are
- // assigning work items.
- DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
-
- task->SetupInternal(pending_tasks_, &items_, start_index);
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
- } else {
- main_task = std::move(task);
- }
- }
-
- // Contribute on main thread.
- DCHECK(main_task);
- main_task->WillRunOnForeground();
- main_task->Run();
-
- // Wait for background tasks.
- for (size_t i = 0; i < num_tasks; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- delete[] task_ids;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
deleted file mode 100644
index ba21e2efd92..00000000000
--- a/deps/v8/src/heap/item-parallel-job.h
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_ITEM_PARALLEL_JOB_H_
-#define V8_HEAP_ITEM_PARALLEL_JOB_H_
-
-#include <memory>
-#include <vector>
-
-#include "src/base/atomic-utils.h"
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-#include "src/common/globals.h"
-#include "src/tasks/cancelable-task.h"
-
-namespace v8 {
-
-namespace base {
-class Semaphore;
-} // namespace base
-
-namespace internal {
-
-class Counters;
-class Isolate;
-
-// This class manages background tasks that process a set of items in parallel.
-// The first task added is executed on the same thread as |job.Run()| is called.
-// All other tasks are scheduled in the background.
-//
-// - Items need to inherit from ItemParallelJob::Item.
-// - Tasks need to inherit from ItemParallelJob::Task.
-//
-// Items need to be marked as finished after processing them. Task and Item
-// ownership is transferred to the job.
-class V8_EXPORT_PRIVATE ItemParallelJob {
- public:
- class Task;
-
- class V8_EXPORT_PRIVATE Item {
- public:
- Item() = default;
- virtual ~Item() = default;
- Item(const Item&) = delete;
- Item& operator=(const Item&) = delete;
-
- // Marks an item as being finished.
- void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); }
-
- private:
- enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished };
-
- bool TryMarkingAsProcessing() {
- ProcessingState available = kAvailable;
- return state_.compare_exchange_strong(available, kProcessing);
- }
- bool IsFinished() { return state_ == kFinished; }
-
- std::atomic<ProcessingState> state_{kAvailable};
-
- friend class ItemParallelJob;
- friend class ItemParallelJob::Task;
- };
-
- class V8_EXPORT_PRIVATE Task : public CancelableTask {
- public:
- enum class Runner { kForeground, kBackground };
- explicit Task(Isolate* isolate);
- ~Task() override = default;
- Task(const Task&) = delete;
- Task& operator=(const Task&) = delete;
-
- virtual void RunInParallel(Runner runner) = 0;
-
- protected:
- // Retrieves a new item that needs to be processed. Returns |nullptr| if
- // all items are processed. Upon returning an item, the task is required
- // to process the item and mark the item as finished after doing so.
- template <class ItemType>
- ItemType* GetItem() {
- while (items_considered_++ != items_->size()) {
- // Wrap around.
- if (cur_index_ == items_->size()) {
- cur_index_ = 0;
- }
- Item* item = (*items_)[cur_index_++];
- if (item->TryMarkingAsProcessing()) {
- return static_cast<ItemType*>(item);
- }
- }
- return nullptr;
- }
-
- private:
- friend class ItemParallelJob;
- friend class Item;
-
- // Sets up state required before invoking Run(). If
- // |start_index is >= items_.size()|, this task will not process work items
- // (some jobs have more tasks than work items in order to parallelize post-
- // processing, e.g. scavenging).
- void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
- size_t start_index);
- void WillRunOnForeground();
- // We don't allow overriding this method any further.
- void RunInternal() final;
-
- std::vector<Item*>* items_ = nullptr;
- size_t cur_index_ = 0;
- size_t items_considered_ = 0;
- Runner runner_ = Runner::kBackground;
- base::Semaphore* on_finish_ = nullptr;
- };
-
- ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* pending_tasks);
-
- ~ItemParallelJob();
-
- ItemParallelJob(const ItemParallelJob&) = delete;
- ItemParallelJob& operator=(const ItemParallelJob&) = delete;
-
- // Adds a task to the job. Transfers ownership to the job.
- void AddTask(Task* task) { tasks_.push_back(std::unique_ptr<Task>(task)); }
-
- // Adds an item to the job. Transfers ownership to the job.
- void AddItem(Item* item) { items_.push_back(item); }
-
- int NumberOfItems() const { return static_cast<int>(items_.size()); }
- int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
-
- // Runs this job.
- void Run();
-
- private:
- std::vector<Item*> items_;
- std::vector<std::unique_ptr<Task>> tasks_;
- CancelableTaskManager* cancelable_task_manager_;
- base::Semaphore* pending_tasks_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_ITEM_PARALLEL_JOB_H_
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 421c734142e..5cbcc8620fb 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -162,7 +162,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->CanExpandOldGenerationBackground(object_size) ||
+ if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
return AllocationResult::Retry(identity());
}
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 0956a7b0d60..fd0ec5a4499 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_LOCAL_HEAP_INL_H_
#define V8_HEAP_LOCAL_HEAP_INL_H_
+#include <atomic>
+
#include "src/common/assert-scope.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
@@ -24,6 +26,8 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
alignment == AllocationAlignment::kWordAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
+ ThreadState current = state_.load(std::memory_order_relaxed);
+ DCHECK(current == kRunning || current == kSafepointRequested);
#endif
// Each allocation is supposed to be a safepoint.
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 85c36baaeea..70cdbcc0d74 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -4,6 +4,7 @@
#include "src/heap/local-heap.h"
+#include <atomic>
#include <memory>
#include "src/base/logging.h"
@@ -11,6 +12,8 @@
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/local-handles.h"
+#include "src/heap/collection-barrier.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/local-heap-inl.h"
@@ -42,9 +45,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap),
is_main_thread_(kind == ThreadKind::kMain),
- state_(ThreadState::Parked),
- safepoint_requested_(false),
+ state_(kParked),
allocation_failed_(false),
+ main_thread_parked_(false),
prev_(nullptr),
next_(nullptr),
handles_(new LocalHandles),
@@ -122,7 +125,9 @@ bool LocalHeap::IsHandleDereferenceAllowed() {
#ifdef DEBUG
VerifyCurrent();
#endif
- return state_ == ThreadState::Running;
+ ThreadState state = state_relaxed();
+ return state == kRunning || state == kSafepointRequested ||
+ state == kCollectionRequested;
}
#endif
@@ -130,40 +135,69 @@ bool LocalHeap::IsParked() {
#ifdef DEBUG
VerifyCurrent();
#endif
- return state_ == ThreadState::Parked;
+ ThreadState state = state_relaxed();
+ return state == kParked || state == kParkedSafepointRequested ||
+ state == kParkedCollectionRequested;
}
-void LocalHeap::Park() {
- base::MutexGuard guard(&state_mutex_);
- CHECK_EQ(ThreadState::Running, state_);
- state_ = ThreadState::Parked;
- state_change_.NotifyAll();
-}
-
-void LocalHeap::Unpark() {
- base::MutexGuard guard(&state_mutex_);
- CHECK(state_ == ThreadState::Parked);
- state_ = ThreadState::Running;
-}
+void LocalHeap::ParkSlowPath(ThreadState current_state) {
+ if (is_main_thread()) {
+ while (true) {
+ CHECK_EQ(current_state, kCollectionRequested);
+ heap_->CollectGarbageForBackground(this);
-void LocalHeap::EnsureParkedBeforeDestruction() {
- if (IsParked()) return;
- base::MutexGuard guard(&state_mutex_);
- state_ = ThreadState::Parked;
- state_change_.NotifyAll();
+ current_state = kRunning;
+ if (state_.compare_exchange_strong(current_state, kParked)) {
+ return;
+ }
+ }
+ } else {
+ CHECK_EQ(current_state, kSafepointRequested);
+ CHECK(state_.compare_exchange_strong(current_state,
+ kParkedSafepointRequested));
+ heap_->safepoint()->NotifyPark();
+ }
}
-void LocalHeap::RequestSafepoint() {
- safepoint_requested_.store(true, std::memory_order_relaxed);
+void LocalHeap::UnparkSlowPath() {
+ if (is_main_thread()) {
+ ThreadState expected = kParkedCollectionRequested;
+ CHECK(state_.compare_exchange_strong(expected, kCollectionRequested));
+ heap_->CollectGarbageForBackground(this);
+ } else {
+ while (true) {
+ ThreadState expected = kParked;
+ if (!state_.compare_exchange_strong(expected, kRunning)) {
+ CHECK_EQ(expected, kParkedSafepointRequested);
+ TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
+ ThreadKind::kBackground);
+ heap_->safepoint()->WaitInUnpark();
+ } else {
+ return;
+ }
+ }
+ }
}
-void LocalHeap::ClearSafepointRequested() {
- safepoint_requested_.store(false, std::memory_order_relaxed);
+void LocalHeap::EnsureParkedBeforeDestruction() {
+ DCHECK_IMPLIES(!is_main_thread(), IsParked());
}
-void LocalHeap::EnterSafepoint() {
- DCHECK_EQ(LocalHeap::Current(), this);
- if (state_ == ThreadState::Running) heap_->safepoint()->EnterFromThread(this);
+void LocalHeap::SafepointSlowPath() {
+ if (is_main_thread()) {
+ CHECK_EQ(kCollectionRequested, state_relaxed());
+ heap_->CollectGarbageForBackground(this);
+ } else {
+ TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
+ ThreadKind::kBackground);
+ ThreadState expected = kSafepointRequested;
+ CHECK(state_.compare_exchange_strong(expected, kSafepoint));
+ heap_->safepoint()->WaitInSafepoint();
+ // This might be a bit surprising, GlobalSafepoint transitions the state
+ // from Safepoint (--> Running) --> Parked when returning from the
+ // safepoint.
+ Unpark();
+ }
}
void LocalHeap::FreeLinearAllocationArea() {
@@ -182,23 +216,63 @@ void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
}
-void LocalHeap::PerformCollection() {
- ParkedScope scope(this);
- heap_->RequestCollectionBackground(this);
+bool LocalHeap::TryPerformCollection() {
+ if (is_main_thread()) {
+ heap_->CollectGarbageForBackground(this);
+ return true;
+ } else {
+ LocalHeap* main_thread = heap_->isolate()->main_thread_local_heap();
+ ThreadState current = main_thread->state_relaxed();
+
+ while (true) {
+ switch (current) {
+ case kRunning:
+ if (main_thread->state_.compare_exchange_strong(
+ current, kCollectionRequested)) {
+ heap_->collection_barrier_->ActivateStackGuardAndPostTask();
+ return heap_->collection_barrier_->AwaitCollectionBackground(this);
+ }
+ break;
+
+ case kCollectionRequested:
+ return heap_->collection_barrier_->AwaitCollectionBackground(this);
+
+ case kParked:
+ if (main_thread->state_.compare_exchange_strong(
+ current, kParkedCollectionRequested)) {
+ heap_->collection_barrier_->ActivateStackGuardAndPostTask();
+ return false;
+ }
+ break;
+
+ case kParkedCollectionRequested:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
}
Address LocalHeap::PerformCollectionAndAllocateAgain(
int object_size, AllocationType type, AllocationOrigin origin,
AllocationAlignment alignment) {
+ CHECK(!allocation_failed_);
+ CHECK(!main_thread_parked_);
allocation_failed_ = true;
static const int kMaxNumberOfRetries = 3;
for (int i = 0; i < kMaxNumberOfRetries; i++) {
- PerformCollection();
+ if (!TryPerformCollection()) {
+ main_thread_parked_ = true;
+ }
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
+
if (!result.IsRetry()) {
allocation_failed_ = false;
+ main_thread_parked_ = false;
return result.ToObjectChecked().address();
}
}
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index 8b5a6545de6..5ce375165a0 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
+#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/common/assert-scope.h"
@@ -40,18 +41,17 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
~LocalHeap();
- // Invoked by main thread to signal this thread that it needs to halt in a
- // safepoint.
- void RequestSafepoint();
-
// Frequently invoked by local thread to check whether safepoint was requested
// from the main thread.
void Safepoint() {
DCHECK(AllowSafepoints::IsAllowed());
+ ThreadState current = state_relaxed();
+ STATIC_ASSERT(kSafepointRequested == kCollectionRequested);
- if (IsSafepointRequested()) {
- ClearSafepointRequested();
- EnterSafepoint();
+ // The following condition checks for both kSafepointRequested (background
+ // thread) and kCollectionRequested (main thread).
+ if (V8_UNLIKELY(current == kSafepointRequested)) {
+ SafepointSlowPath();
}
}
@@ -133,7 +133,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
bool is_main_thread() const { return is_main_thread_; }
// Requests GC and blocks until the collection finishes.
- void PerformCollection();
+ bool TryPerformCollection();
// Adds a callback that is invoked with the given |data| after each GC.
// The callback is invoked on the main thread before any background thread
@@ -143,16 +143,42 @@ class V8_EXPORT_PRIVATE LocalHeap {
void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
private:
- enum class ThreadState {
- // Threads in this state need to be stopped in a safepoint.
- Running,
+ enum ThreadState {
+ // Threads in this state are allowed to access the heap.
+ kRunning,
// Thread was parked, which means that the thread is not allowed to access
- // or manipulate the heap in any way.
- Parked,
- // Thread was stopped in a safepoint.
- Safepoint
+ // or manipulate the heap in any way. This is considered to be a safepoint.
+ kParked,
+
+ // SafepointRequested is used for Running background threads to force
+ // Safepoint() and
+ // Park() into the slow path.
+ kSafepointRequested,
+ // A background thread transitions into this state from SafepointRequested
+ // when it
+ // enters a safepoint.
+ kSafepoint,
+ // This state is used for Parked background threads and forces Unpark() into
+ // the slow
+ // path. It prevents Unpark() to succeed before the safepoint operation is
+ // finished.
+ kParkedSafepointRequested,
+
+ // This state is used on the main thread when at least one background thread
+ // requested a GC while the main thread was Running.
+ // We can use the same value for CollectionRequested and SafepointRequested
+ // since the first is only used on the main thread, while the other one only
+ // occurs on background threads. This property is used to have a faster
+ // check in Safepoint().
+ kCollectionRequested = kSafepointRequested,
+
+ // This state is used on the main thread when at least one background thread
+ // requested a GC while the main thread was Parked.
+ kParkedCollectionRequested,
};
+ ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); }
+
// Slow path of allocation that performs GC and then retries allocation in
// loop.
Address PerformCollectionAndAllocateAgain(int object_size,
@@ -160,31 +186,38 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin,
AllocationAlignment alignment);
- void Park();
- void Unpark();
- void EnsureParkedBeforeDestruction();
-
- void EnsurePersistentHandles();
+ void Park() {
+ DCHECK(AllowGarbageCollection::IsAllowed());
+ ThreadState expected = kRunning;
+ if (!state_.compare_exchange_strong(expected, kParked)) {
+ ParkSlowPath(expected);
+ }
+ }
- V8_INLINE bool IsSafepointRequested() {
- return safepoint_requested_.load(std::memory_order_relaxed);
+ void Unpark() {
+ DCHECK(AllowGarbageCollection::IsAllowed());
+ ThreadState expected = kParked;
+ if (!state_.compare_exchange_strong(expected, kRunning)) {
+ UnparkSlowPath();
+ }
}
- void ClearSafepointRequested();
- void EnterSafepoint();
+ void ParkSlowPath(ThreadState state);
+ void UnparkSlowPath();
+ void EnsureParkedBeforeDestruction();
+ void SafepointSlowPath();
+
+ void EnsurePersistentHandles();
void InvokeGCEpilogueCallbacksInSafepoint();
Heap* heap_;
bool is_main_thread_;
- base::Mutex state_mutex_;
- base::ConditionVariable state_change_;
- ThreadState state_;
-
- std::atomic<bool> safepoint_requested_;
+ std::atomic<ThreadState> state_;
bool allocation_failed_;
+ bool main_thread_parked_;
LocalHeap* prev_;
LocalHeap* next_;
@@ -197,12 +230,13 @@ class V8_EXPORT_PRIVATE LocalHeap {
ConcurrentAllocator old_space_allocator_;
- friend class Heap;
+ friend class CollectionBarrier;
+ friend class ConcurrentAllocator;
friend class GlobalSafepoint;
+ friend class Heap;
+ friend class Isolate;
friend class ParkedScope;
friend class UnparkedScope;
- friend class ConcurrentAllocator;
- friend class Isolate;
};
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index d4d03a7652f..951b49507ca 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -23,7 +23,6 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
-#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
@@ -46,6 +45,7 @@
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
@@ -1875,7 +1875,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (it.frame()->is_unoptimized()) return;
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
- if (!code.CanDeoptAt(it.frame()->pc())) {
+ if (!code.CanDeoptAt(isolate(), it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
}
return;
@@ -2019,6 +2019,9 @@ void MarkCompactCollector::MarkLiveObjects() {
DCHECK(local_marking_worklists()->IsEmpty());
}
+ // We depend on IterateWeakRootsForPhantomHandles being called before
+ // ClearOldBytecodeCandidates in order to identify flushed bytecode in the
+ // CPU profiler.
{
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
&IsUnmarkedHeapObject);
@@ -2082,6 +2085,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearJSWeakRefs();
}
+ PROFILE(heap()->isolate(), WeakCodeClearEvent());
+
MarkDependentCodeForDeoptimization();
DCHECK(weak_objects_.transition_arrays.IsEmpty());
@@ -2132,8 +2137,8 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
- if (descriptors == dead_target.instance_descriptors(kRelaxedLoad) &&
+ DescriptorArray descriptors = map.instance_descriptors(isolate());
+ if (descriptors == dead_target.instance_descriptors(isolate()) &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
@@ -2205,8 +2210,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// performing the unusual task of decompiling.
shared_info.set_function_data(uncompiled_data, kReleaseStore);
DCHECK(!shared_info.is_compiled());
-
- PROFILE(heap()->isolate(), BytecodeFlushEvent(compiled_data_start));
}
void MarkCompactCollector::ClearOldBytecodeCandidates() {
@@ -2264,7 +2267,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
- parent_is_alive ? parent.instance_descriptors(kRelaxedLoad)
+ parent_is_alive ? parent.instance_descriptors(isolate())
: DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
@@ -2325,7 +2328,7 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
DCHECK_EQ(target.constructor_or_back_pointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
- target.instance_descriptors(kRelaxedLoad) == descriptors) {
+ target.instance_descriptors(isolate()) == descriptors) {
DCHECK(!target.is_prototype_map());
descriptors_owner_died = true;
}
@@ -2480,6 +2483,9 @@ void MarkCompactCollector::ClearWeakReferences() {
}
void MarkCompactCollector::ClearJSWeakRefs() {
+ if (!FLAG_harmony_weak_refs) {
+ return;
+ }
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
@@ -2697,8 +2703,9 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateSlot(IsolateRoot isolate, TSlot slot) {
- typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
+static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
+ TSlot slot) {
+ typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
HeapObject heap_obj;
if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
@@ -2710,9 +2717,9 @@ static inline SlotCallbackResult UpdateSlot(IsolateRoot isolate, TSlot slot) {
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateStrongSlot(IsolateRoot isolate,
+static inline SlotCallbackResult UpdateStrongSlot(PtrComprCageBase cage_base,
TSlot slot) {
- typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
+ typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
HeapObject heap_obj;
if (obj.GetHeapObject(&heap_obj)) {
@@ -2728,39 +2735,40 @@ static inline SlotCallbackResult UpdateStrongSlot(IsolateRoot isolate,
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(IsolateRoot isolate) : isolate_(isolate) {}
+ explicit PointersUpdatingVisitor(PtrComprCageBase cage_base)
+ : cage_base_(cage_base) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
- UpdateStrongSlotInternal(isolate_, p);
+ UpdateStrongSlotInternal(cage_base_, p);
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
- UpdateSlotInternal(isolate_, p);
+ UpdateSlotInternal(cage_base_, p);
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
for (ObjectSlot p = start; p < end; ++p) {
- UpdateStrongSlotInternal(isolate_, p);
+ UpdateStrongSlotInternal(cage_base_, p);
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot p = start; p < end; ++p) {
- UpdateSlotInternal(isolate_, p);
+ UpdateSlotInternal(cage_base_, p);
}
}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
- UpdateRootSlotInternal(isolate_, p);
+ UpdateRootSlotInternal(cage_base_, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(isolate_, p);
+ UpdateRootSlotInternal(cage_base_, p);
}
}
@@ -2768,7 +2776,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
for (OffHeapObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(isolate_, p);
+ UpdateRootSlotInternal(cage_base_, p);
}
}
@@ -2783,32 +2791,32 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
}
private:
- static inline SlotCallbackResult UpdateRootSlotInternal(IsolateRoot isolate,
- FullObjectSlot slot) {
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ static inline SlotCallbackResult UpdateRootSlotInternal(
+ PtrComprCageBase cage_base, FullObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
static inline SlotCallbackResult UpdateRootSlotInternal(
- IsolateRoot isolate, OffHeapObjectSlot slot) {
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ PtrComprCageBase cage_base, OffHeapObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- IsolateRoot isolate, MaybeObjectSlot slot) {
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ PtrComprCageBase cage_base, MaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(IsolateRoot isolate,
- ObjectSlot slot) {
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ static inline SlotCallbackResult UpdateStrongSlotInternal(
+ PtrComprCageBase cage_base, ObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
- static inline SlotCallbackResult UpdateSlotInternal(IsolateRoot isolate,
- MaybeObjectSlot slot) {
- return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ static inline SlotCallbackResult UpdateSlotInternal(
+ PtrComprCageBase cage_base, MaybeObjectSlot slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
- IsolateRoot isolate_;
+ PtrComprCageBase cage_base_;
};
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3105,9 +3113,13 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
chunk, marking_state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits, &failed_object);
if (!success) {
- // Aborted compaction page. Actual processing happens on the main
- // thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
+ if (FLAG_crash_on_aborted_evacuation) {
+ heap_->FatalProcessOutOfMemory("FullEvacuator::RawEvacuatePage");
+ } else {
+ // Aborted compaction page. Actual processing happens on the main
+ // thread for simplicity reasons.
+ collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
+ }
}
break;
}
@@ -3570,7 +3582,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
PointersUpdatingVisitor visitor(
- GetIsolateForPtrComprFromOnHeapAddress(start_));
+ GetPtrComprCageBaseFromOnHeapAddress(start_));
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
Map map = object.map();
@@ -3586,7 +3598,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor(
- GetIsolateForPtrComprFromOnHeapAddress(start_));
+ GetPtrComprCageBaseFromOnHeapAddress(start_));
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first.IterateBodyFast(&visitor);
@@ -3732,12 +3744,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
- IsolateRoot isolate = heap_->isolate();
+ PtrComprCageBase cage_base = heap_->isolate();
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
- [&filter, isolate](MaybeObjectSlot slot) {
+ [&filter, cage_base](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
chunk_->ReleaseSlotSet<OLD_TO_OLD>();
@@ -3772,10 +3784,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
- IsolateRoot isolate = heap_->isolate();
+ PtrComprCageBase cage_base = heap_->isolate();
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, slot_type, slot, [isolate](FullMaybeObjectSlot slot) {
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
+ heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
});
});
}
@@ -3964,6 +3976,9 @@ void MarkCompactCollector::ReportAbortedEvacuationCandidate(
}
void MarkCompactCollector::PostProcessEvacuationCandidates() {
+ CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
+ aborted_evacuation_candidates_.empty());
+
for (auto object_and_page : aborted_evacuation_candidates_) {
HeapObject failed_object = object_and_page.first;
Page* page = object_and_page.second;
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index 28ac8ba3aa2..56bd7efda23 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
+ DCHECK(IsCurrentMarkingBarrier());
DCHECK(is_activated_);
DCHECK(!marking_state_.IsImpossible(value));
// Host may have an impossible markbit pattern if manual allocation folding
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 130c707f416..06f2e67810a 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -5,6 +5,7 @@
#include "src/heap/marking-barrier.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
@@ -37,6 +38,7 @@ MarkingBarrier::~MarkingBarrier() { DCHECK(worklist_.IsLocalEmpty()); }
void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
HeapObject value) {
+ DCHECK(IsCurrentMarkingBarrier());
if (MarkValue(host, value)) {
if (is_compacting_ && slot.address()) {
collector_->RecordSlot(host, slot, value);
@@ -45,6 +47,7 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
}
void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
+ DCHECK(IsCurrentMarkingBarrier());
if (MarkValue(host, value)) {
if (is_compacting_) {
if (is_main_thread_barrier_) {
@@ -60,6 +63,7 @@ void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
void MarkingBarrier::Write(JSArrayBuffer host,
ArrayBufferExtension* extension) {
+ DCHECK(IsCurrentMarkingBarrier());
if (!V8_CONCURRENT_MARKING_BOOL && !marking_state_.IsBlack(host)) {
// The extension will be marked when the marker visits the host object.
return;
@@ -69,6 +73,7 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
+ DCHECK(IsCurrentMarkingBarrier());
DCHECK(is_main_thread_barrier_);
int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) <
@@ -80,6 +85,7 @@ void MarkingBarrier::Write(DescriptorArray descriptor_array,
void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
+ DCHECK(IsCurrentMarkingBarrier());
MarkCompactCollector::RecordRelocSlotInfo info =
MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
if (info.should_record) {
@@ -204,5 +210,9 @@ void MarkingBarrier::Activate(bool is_compacting) {
}
}
+bool MarkingBarrier::IsCurrentMarkingBarrier() {
+ return WriteBarrier::CurrentMarkingBarrier(heap_) == this;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index df34bbddfe9..9ed1ee63824 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -53,6 +53,8 @@ class MarkingBarrier {
void DeactivateSpace(PagedSpace*);
void DeactivateSpace(NewSpace*);
+ bool IsCurrentMarkingBarrier();
+
Heap* heap_;
MarkCompactCollector* collector_;
IncrementalMarking* incremental_marking_;
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index fe9975659f0..b5bccb879d0 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -23,11 +23,15 @@ namespace internal {
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER;
+namespace {
+void FunctionInStaticBinaryForAddressHint() {}
+} // namespace
+
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
base::MutexGuard guard(&mutex_);
auto it = recently_freed_.find(code_range_size);
if (it == recently_freed_.end() || it->second.empty()) {
- return reinterpret_cast<Address>(GetRandomMmapAddr());
+ return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
}
Address result = it->second.back();
it->second.pop_back();
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 92f4f71a701..c2355c6b84b 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -90,8 +90,13 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
+ // We may use RWX pages to write code. Some CPUs have optimisations to push
+ // updates to code to the icache through a fast path, and they may filter
+ // updates based on the written memory being executable.
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
- PageAllocator::kReadWrite));
+ FLAG_write_code_using_rwx
+ ? PageAllocator::kReadWriteExecute
+ : PageAllocator::kReadWrite));
}
}
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 5f79439b056..ab491e19a6e 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -6,16 +6,13 @@
#include "include/v8.h"
#include "src/api/api-inl.h"
-#include "src/api/api.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/isolate.h"
#include "src/heap/factory-inl.h"
-#include "src/heap/factory.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-worklist.h"
#include "src/logging/counters.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-promise-inl.h"
-#include "src/objects/js-promise.h"
#include "src/tasks/task-utils.h"
namespace v8 {
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 82fc1e73cd6..86b2e6a2c40 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -132,7 +132,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors();
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
@@ -856,7 +856,7 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray array = map.instance_descriptors(isolate());
if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 395f76bf607..37ecd50c8df 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -18,7 +18,10 @@
#include "src/objects/ordered-hash-table.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/torque-defined-classes.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index d7127c5c477..9f133d6cfab 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -14,48 +14,48 @@
namespace v8 {
namespace internal {
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(CoverageInfo) \
- V(DataHandler) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackMetadata) \
- V(FixedDoubleArray) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSObject) \
- V(JSTypedArray) \
- V(WeakCell) \
- V(JSWeakCollection) \
- V(JSWeakRef) \
- V(Map) \
- V(NativeContext) \
- V(PreparseData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourceTextModule) \
- V(SwissNameDictionary) \
- V(Symbol) \
- V(SyntheticModule) \
- V(TransitionArray) \
- V(WasmArray) \
- V(WasmIndirectFunctionTable) \
- V(WasmInstanceObject) \
- V(WasmStruct) \
- V(WasmTypeInfo)
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(CoverageInfo) \
+ V(DataHandler) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackMetadata) \
+ V(FixedDoubleArray) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSTypedArray) \
+ V(WeakCell) \
+ V(JSWeakCollection) \
+ V(JSWeakRef) \
+ V(Map) \
+ V(NativeContext) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(SwissNameDictionary) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(TransitionArray) \
+ IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmIndirectFunctionTable) \
+ IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 1de1af4e2b5..f541974a506 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -320,14 +320,17 @@ Page* PagedSpace::Expand() {
return page;
}
-Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) {
+base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
+ LocalHeap* local_heap, size_t size_in_bytes) {
Page* page = AllocatePage();
- if (page == nullptr) return nullptr;
+ if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
AddPage(page);
- Free(page->area_start(), page->area_size(),
+ Address object_start = page->area_start();
+ CHECK_LE(size_in_bytes, page->area_size());
+ Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted);
- return page;
+ return std::make_pair(object_start, size_in_bytes);
}
int PagedSpace::CountTotalPages() {
@@ -589,13 +592,12 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
- heap()->CanExpandOldGenerationBackground(AreaSize()) &&
- ExpandBackground(local_heap)) {
- DCHECK((CountTotalPages() > 1) ||
- (min_size_in_bytes <= free_list_->Available()));
- auto result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
+ heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
+ auto result = ExpandBackground(local_heap, max_size_in_bytes);
+ if (result) {
+ DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
+ return result;
+ }
}
if (collector->sweeping_in_progress()) {
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 5168f0f0533..621d92aa894 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -354,7 +354,13 @@ class V8_EXPORT_PRIVATE PagedSpace
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
virtual Page* Expand();
- Page* ExpandBackground(LocalHeap* local_heap);
+
+ // Expands the space by a single page from a background thread and allocates
+ // a memory area of the given size in it. If successful the method returns
+ // the address and size of the area.
+ base::Optional<std::pair<Address, size_t>> ExpandBackground(
+ LocalHeap* local_heap, size_t size_in_bytes);
+
Page* AllocatePage();
// Sets up a linear allocation area that fits the given number of bytes.
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
index 316f4550134..0c128285845 100644
--- a/deps/v8/src/heap/read-only-heap-inl.h
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -14,9 +14,9 @@ namespace internal {
// static
ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
-#ifdef V8_COMPRESS_POINTERS
- IsolateRoot isolate = GetIsolateForPtrCompr(object);
- return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ return ReadOnlyRoots(
+ Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr())));
#else
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 342ad1d031c..d5f7e843efe 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -37,7 +37,7 @@ base::LazyInstance<std::weak_ptr<ReadOnlyArtifacts>>::type
std::shared_ptr<ReadOnlyArtifacts> InitializeSharedReadOnlyArtifacts() {
std::shared_ptr<ReadOnlyArtifacts> artifacts;
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL) {
artifacts = std::make_shared<PointerCompressedReadOnlyArtifacts>();
} else {
artifacts = std::make_shared<SingleCopyReadOnlyArtifacts>();
@@ -129,7 +129,7 @@ ReadOnlyHeap::ReadOnlyHeap(ReadOnlyHeap* ro_heap, ReadOnlySpace* ro_space)
: read_only_space_(ro_space),
read_only_object_cache_(ro_heap->read_only_object_cache_) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
- DCHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
}
// static
@@ -139,7 +139,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateInitalHeapForBootstrapping(
std::unique_ptr<ReadOnlyHeap> ro_heap;
auto* ro_space = new ReadOnlySpace(isolate->heap());
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL) {
ro_heap.reset(new ReadOnlyHeap(ro_space));
} else {
std::unique_ptr<SoleReadOnlyHeap> sole_ro_heap(
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index c78ea77452d..f947832c5f9 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -87,8 +87,8 @@ class ReadOnlyHeap {
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
- return V8_SHARED_RO_HEAP_BOOL &&
- (!COMPRESS_POINTERS_BOOL || IsSharedMemoryAvailable());
+ return V8_SHARED_RO_HEAP_BOOL && (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
+ IsSharedMemoryAvailable());
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index b54bfc03892..5adac66afe0 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -755,9 +755,10 @@ SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared with pointer
- // compression.
+ // compression in a per-Isolate cage.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
DCHECK(!artifacts->pages().empty());
@@ -776,6 +777,7 @@ SharedReadOnlySpace::SharedReadOnlySpace(
: SharedReadOnlySpace(heap) {
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
accounting_stats_ = std::move(new_stats);
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
index ffadcb55b36..ee4b2a82234 100644
--- a/deps/v8/src/heap/read-only-spaces.h
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -35,10 +35,11 @@ class ReadOnlyPage : public BasicMemoryChunk {
// Returns the address for a given offset in this page.
Address OffsetToAddress(size_t offset) const {
Address address_in_page = address() + offset;
- if (V8_SHARED_RO_HEAP_BOOL && COMPRESS_POINTERS_BOOL) {
- // Pointer compression with share ReadOnlyPages means that the area_start
- // and area_end cannot be defined since they are stored within the pages
- // which can be mapped at multiple memory addresses.
+ if (V8_SHARED_RO_HEAP_BOOL && COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL) {
+ // Pointer compression with a per-Isolate cage and shared ReadOnlyPages
+ // means that the area_start and area_end cannot be defined since they are
+ // stored within the pages which can be mapped at multiple memory
+ // addresses.
DCHECK_LT(offset, size());
} else {
DCHECK_GE(address_in_page, area_start());
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index 6ae7b9e6803..fd3156e6bb9 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -4,11 +4,15 @@
#include "src/heap/safepoint.h"
+#include <atomic>
+
#include "src/base/logging.h"
+#include "src/handles/handles.h"
#include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
namespace v8 {
@@ -29,26 +33,35 @@ void GlobalSafepoint::EnterSafepointScope() {
barrier_.Arm();
DCHECK_NULL(LocalHeap::Current());
- for (LocalHeap* current = local_heaps_head_; current;
- current = current->next_) {
- if (current->is_main_thread()) {
- continue;
- }
- current->RequestSafepoint();
- }
+ int running = 0;
- for (LocalHeap* current = local_heaps_head_; current;
- current = current->next_) {
- if (current->is_main_thread()) {
+ for (LocalHeap* local_heap = local_heaps_head_; local_heap;
+ local_heap = local_heap->next_) {
+ if (local_heap->is_main_thread()) {
continue;
}
- DCHECK(!current->is_main_thread());
- current->state_mutex_.Lock();
-
- while (current->state_ == LocalHeap::ThreadState::Running) {
- current->state_change_.Wait(&current->state_mutex_);
+ DCHECK(!local_heap->is_main_thread());
+
+ LocalHeap::ThreadState expected = local_heap->state_relaxed();
+
+ while (true) {
+ CHECK(expected == LocalHeap::kParked || expected == LocalHeap::kRunning);
+ LocalHeap::ThreadState new_state =
+ expected == LocalHeap::kParked ? LocalHeap::kParkedSafepointRequested
+ : LocalHeap::kSafepointRequested;
+
+ if (local_heap->state_.compare_exchange_strong(expected, new_state)) {
+ if (expected == LocalHeap::kRunning) {
+ running++;
+ } else {
+ CHECK_EQ(expected, LocalHeap::kParked);
+ }
+ break;
+ }
}
}
+
+ barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
void GlobalSafepoint::LeaveSafepointScope() {
@@ -57,12 +70,23 @@ void GlobalSafepoint::LeaveSafepointScope() {
DCHECK_NULL(LocalHeap::Current());
- for (LocalHeap* current = local_heaps_head_; current;
- current = current->next_) {
- if (current->is_main_thread()) {
+ for (LocalHeap* local_heap = local_heaps_head_; local_heap;
+ local_heap = local_heap->next_) {
+ if (local_heap->is_main_thread()) {
continue;
}
- current->state_mutex_.Unlock();
+
+ // We transition both ParkedSafepointRequested and Safepoint states to
+ // Parked. While this is probably intuitive for ParkedSafepointRequested,
+ // this might be surprising for Safepoint though. SafepointSlowPath() will
+ // later unpark that thread again. Going through Parked means that a
+ // background thread doesn't need to be waked up before the main thread can
+ // start the next safepoint.
+
+ LocalHeap::ThreadState old_state =
+ local_heap->state_.exchange(LocalHeap::kParked);
+ CHECK(old_state == LocalHeap::kParkedSafepointRequested ||
+ old_state == LocalHeap::kSafepoint);
}
barrier_.Disarm();
@@ -70,39 +94,59 @@ void GlobalSafepoint::LeaveSafepointScope() {
local_heaps_mutex_.Unlock();
}
-void GlobalSafepoint::EnterFromThread(LocalHeap* local_heap) {
- {
- base::MutexGuard guard(&local_heap->state_mutex_);
- DCHECK_EQ(local_heap->state_, LocalHeap::ThreadState::Running);
- local_heap->state_ = LocalHeap::ThreadState::Safepoint;
- local_heap->state_change_.NotifyAll();
- }
+void GlobalSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
- barrier_.Wait();
+void GlobalSafepoint::WaitInUnpark() { barrier_.WaitInUnpark(); }
- {
- base::MutexGuard guard(&local_heap->state_mutex_);
- local_heap->state_ = LocalHeap::ThreadState::Running;
- }
-}
+void GlobalSafepoint::NotifyPark() { barrier_.NotifyPark(); }
void GlobalSafepoint::Barrier::Arm() {
base::MutexGuard guard(&mutex_);
- CHECK(!armed_);
+ DCHECK(!IsArmed());
armed_ = true;
+ stopped_ = 0;
}
void GlobalSafepoint::Barrier::Disarm() {
base::MutexGuard guard(&mutex_);
- CHECK(armed_);
+ DCHECK(IsArmed());
armed_ = false;
- cond_.NotifyAll();
+ stopped_ = 0;
+ cv_resume_.NotifyAll();
+}
+
+void GlobalSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(int running) {
+ base::MutexGuard guard(&mutex_);
+ DCHECK(IsArmed());
+ while (stopped_ < running) {
+ cv_stopped_.Wait(&mutex_);
+ }
+ DCHECK_EQ(stopped_, running);
+}
+
+void GlobalSafepoint::Barrier::NotifyPark() {
+ base::MutexGuard guard(&mutex_);
+ CHECK(IsArmed());
+ stopped_++;
+ cv_stopped_.NotifyOne();
}
-void GlobalSafepoint::Barrier::Wait() {
+void GlobalSafepoint::Barrier::WaitInSafepoint() {
base::MutexGuard guard(&mutex_);
- while (armed_) {
- cond_.Wait(&mutex_);
+ CHECK(IsArmed());
+ stopped_++;
+ cv_stopped_.NotifyOne();
+
+ while (IsArmed()) {
+ cv_resume_.Wait(&mutex_);
+ }
+}
+
+void GlobalSafepoint::Barrier::WaitInUnpark() {
+ base::MutexGuard guard(&mutex_);
+
+ while (IsArmed()) {
+ cv_resume_.Wait(&mutex_);
}
}
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index c45b03d249b..a33f7eef85a 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -24,8 +24,14 @@ class GlobalSafepoint {
public:
explicit GlobalSafepoint(Heap* heap);
- // Enter the safepoint from a thread
- void EnterFromThread(LocalHeap* local_heap);
+ // Wait until unpark operation is safe again
+ void WaitInUnpark();
+
+ // Enter the safepoint from a running thread
+ void WaitInSafepoint();
+
+ // Running thread reached a safepoint by parking itself.
+ void NotifyPark();
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
@@ -48,15 +54,24 @@ class GlobalSafepoint {
private:
class Barrier {
base::Mutex mutex_;
- base::ConditionVariable cond_;
+ base::ConditionVariable cv_resume_;
+ base::ConditionVariable cv_stopped_;
bool armed_;
+ int stopped_ = 0;
+
+ bool IsArmed() { return armed_; }
+
public:
- Barrier() : armed_(false) {}
+ Barrier() : armed_(false), stopped_(0) {}
void Arm();
void Disarm();
- void Wait();
+ void WaitUntilRunningThreadsInSafepoint(int running);
+
+ void WaitInSafepoint();
+ void WaitInUnpark();
+ void NotifyPark();
};
void EnterSafepointScope();
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index a5c8a41ea5c..be9971e7c68 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -9,7 +9,6 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
-#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
@@ -18,6 +17,7 @@
#include "src/heap/sweeper.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/utils/utils-inl.h"
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 878f47eb002..8a3e1fda121 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -43,7 +43,10 @@
#include "src/objects/template-objects-inl.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/regexp/regexp.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -125,8 +128,11 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
int inobject_properties) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
bool is_js_object = InstanceTypeChecker::IsJSObject(instance_type);
- bool is_wasm_object =
- (instance_type == WASM_STRUCT_TYPE || instance_type == WASM_ARRAY_TYPE);
+ bool is_wasm_object = false;
+#if V8_ENABLE_WEBASSEMBLY
+ is_wasm_object =
+ instance_type == WASM_STRUCT_TYPE || instance_type == WASM_ARRAY_TYPE;
+#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_IMPLIES(is_js_object &&
!Map::CanHaveFastTransitionableElementsKind(instance_type),
IsDictionaryElementsKind(elements_kind) ||
@@ -494,7 +500,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
- ALLOCATE_MAP(WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize, wasm_type_info)
+ IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
+ wasm_type_info)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
@@ -510,11 +517,10 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult alloc =
- AllocateRaw(FixedArray::SizeFor(ScopeInfo::kVariablePartIndex),
+ AllocateRaw(ScopeInfo::SizeFor(ScopeInfo::kVariablePartIndex),
AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj.set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
- ScopeInfo::cast(obj).set_length(ScopeInfo::kVariablePartIndex);
int flags = ScopeInfo::IsEmptyBit::encode(true);
DCHECK_EQ(ScopeInfo::LanguageModeBit::decode(flags), LanguageMode::kSloppy);
DCHECK_EQ(ScopeInfo::ReceiverVariableBits::decode(flags),
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
index 84df473076f..532739000fe 100644
--- a/deps/v8/src/heap/weak-object-worklists.cc
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -115,17 +115,19 @@ void WeakObjects::UpdateWeakObjectsInCode(
void WeakObjects::UpdateJSWeakRefs(
WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
- js_weak_refs.Update(
- [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
- JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
-
- if (!forwarded.is_null()) {
- *js_weak_ref_out = forwarded;
- return true;
- }
-
- return false;
- });
+ if (FLAG_harmony_weak_refs) {
+ js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+ }
}
void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 5bf39a2df1a..3c99566e981 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,6 +1,5 @@
-bmeurer@chromium.org
ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
-verwaest@chromium.org
mythria@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 1494102df9e..35d1da5cd92 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -24,9 +24,6 @@
namespace v8 {
namespace internal {
-using compiler::CodeAssemblerState;
-using compiler::Node;
-
//////////////////// Private helpers.
// Loads dataX field from the DataHandler object.
@@ -172,8 +169,8 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&call_handler);
{
exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, CAST(handler),
- p->context(), p->receiver(), p->name(),
- p->slot(), p->vector());
+ p->context(), p->lookup_start_object(),
+ p->name(), p->slot(), p->vector());
}
}
@@ -392,7 +389,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if (Is64()) {
GotoIfNot(
UintPtrLessThanOrEqual(var_intptr_index.value(),
- IntPtrConstant(JSArray::kMaxArrayIndex)),
+ IntPtrConstant(JSObject::kMaxElementIndex)),
miss);
} else {
GotoIf(IntPtrLessThan(var_intptr_index.value(), IntPtrConstant(0)),
@@ -532,17 +529,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&normal);
{
Comment("load_normal");
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder)));
+ TNode<PropertyDictionary> properties =
+ CAST(LoadSlowProperties(CAST(holder)));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
- &var_name_index, miss);
+ NameDictionaryLookup<PropertyDictionary>(properties, CAST(p->name()),
+ &found, &var_name_index, miss);
BIND(&found);
{
TVARIABLE(Uint32T, var_details);
TVARIABLE(Object, var_value);
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
+ LoadPropertyFromDictionary<PropertyDictionary>(
+ properties, var_name_index.value(), &var_details, &var_value);
TNode<Object> value = CallGetterIfAccessor(
var_value.value(), CAST(holder), var_details.value(), p->context(),
p->receiver(), miss);
@@ -743,11 +741,12 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&normal);
{
Comment("has_normal");
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder)));
+ TNode<PropertyDictionary> properties =
+ CAST(LoadSlowProperties(CAST(holder)));
TVARIABLE(IntPtrT, var_name_index);
Label found(this);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
- &var_name_index, miss);
+ NameDictionaryLookup<PropertyDictionary>(properties, CAST(p->name()),
+ &found, &var_name_index, miss);
BIND(&found);
exit_point->Return(TrueConstant());
@@ -862,11 +861,6 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
BIND(&if_lookup_on_lookup_start_object);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), miss);
- }
-
// Dictionary lookup on lookup start object is not necessary for
// Load/StoreGlobalIC (which is the only case when the
// lookup_start_object can be a JSGlobalObject) because prototype
@@ -876,12 +870,12 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
Word32BinaryNot(HasInstanceType(
CAST(p->lookup_start_object()), JS_GLOBAL_OBJECT_TYPE)));
- TNode<NameDictionary> properties =
+ TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(p->lookup_start_object())));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
- &found, &var_name_index, &done);
+ NameDictionaryLookup<PropertyDictionary>(
+ properties, CAST(p->name()), &found, &var_name_index, &done);
BIND(&found);
{
if (on_found_on_lookup_start_object) {
@@ -908,14 +902,14 @@ void AccessorAssembler::HandleLoadICProtoHandler(
// Code sub-handlers are not expected in LoadICs, so no |on_code_handler|.
nullptr,
// on_found_on_lookup_start_object
- [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) {
+ [=](TNode<PropertyDictionary> properties, TNode<IntPtrT> name_index) {
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
TVARIABLE(Uint32T, var_details);
TVARIABLE(Object, var_value);
- LoadPropertyFromNameDictionary(properties, name_index, &var_details,
- &var_value);
+ LoadPropertyFromDictionary<PropertyDictionary>(
+ properties, name_index, &var_details, &var_value);
TNode<Object> value = CallGetterIfAccessor(
var_value.value(), CAST(var_holder->value()), var_details.value(),
p->context(), p->receiver(), miss);
@@ -1054,11 +1048,12 @@ void AccessorAssembler::HandleStoreICHandlerCase(
&if_slow);
CSA_ASSERT(this,
Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)));
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder)));
+ TNode<PropertyDictionary> properties =
+ CAST(LoadSlowProperties(CAST(holder)));
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(
+ NameDictionaryLookup<PropertyDictionary>(
properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
BIND(&dictionary_found);
{
@@ -1075,8 +1070,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
GotoIf(IsPropertyDetailsConst(details), &if_constant);
}
- StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value());
+ StoreValueByKeyIndex<PropertyDictionary>(
+ properties, var_name_index.value(), p->value());
Return(p->value());
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
@@ -1555,7 +1550,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<Object> smi_handler = HandleProtoHandler<StoreHandler>(
p, handler, on_code_handler,
// on_found_on_lookup_start_object
- [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) {
+ [=](TNode<PropertyDictionary> properties, TNode<IntPtrT> name_index) {
TNode<Uint32T> details = LoadDetailsByKeyIndex(properties, name_index);
// Check that the property is a writable data property (no accessor).
const int kTypeAndReadOnlyMask =
@@ -1564,8 +1559,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
STATIC_ASSERT(kData == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- StoreValueByKeyIndex<NameDictionary>(properties, name_index,
- p->value());
+ StoreValueByKeyIndex<PropertyDictionary>(properties, name_index,
+ p->value());
Return(p->value());
},
miss, ic_mode);
@@ -1636,9 +1631,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<Map> receiver_map = LoadMap(CAST(p->receiver()));
InvalidateValidityCellIfPrototype(receiver_map);
- TNode<NameDictionary> properties =
+ TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(p->receiver())));
- Add<NameDictionary>(properties, CAST(p->name()), p->value(), &slow);
+ Add<PropertyDictionary>(properties, CAST(p->name()), p->value(), &slow);
Return(p->value());
BIND(&slow);
@@ -2137,7 +2132,7 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("dictionary elements");
if (Is64()) {
- GotoIf(UintPtrLessThan(IntPtrConstant(JSArray::kMaxArrayIndex),
+ GotoIf(UintPtrLessThan(IntPtrConstant(JSObject::kMaxElementIndex),
intptr_index),
out_of_bounds);
} else {
@@ -2328,11 +2323,11 @@ void AccessorAssembler::GenericElementLoad(
// without ever checking the prototype chain.
GotoIf(IsJSTypedArrayInstanceType(lookup_start_object_instance_type),
&return_undefined);
- // Positive OOB indices within JSArray index range are effectively the same
+ // Positive OOB indices within elements index range are effectively the same
// as hole loads. Larger keys and negative keys are named loads.
if (Is64()) {
Branch(UintPtrLessThanOrEqual(index,
- IntPtrConstant(JSArray::kMaxArrayIndex)),
+ IntPtrConstant(JSObject::kMaxElementIndex)),
&if_element_hole, slow);
} else {
Branch(IntPtrLessThan(index, IntPtrConstant(0)), slow, &if_element_hole);
@@ -2442,26 +2437,21 @@ void AccessorAssembler::GenericPropertyLoad(
BIND(&if_property_dictionary);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- GotoIf(Int32TrueConstant(), slow);
- }
-
Comment("dictionary property load");
// We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
// seeing global objects here (which would need special handling).
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
- TNode<NameDictionary> properties =
+ TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(lookup_start_object)));
- NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
- &var_name_index,
- &lookup_prototype_chain);
+ NameDictionaryLookup<PropertyDictionary>(properties, name,
+ &dictionary_found, &var_name_index,
+ &lookup_prototype_chain);
BIND(&dictionary_found);
{
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
+ LoadPropertyFromDictionary<PropertyDictionary>(
+ properties, var_name_index.value(), &var_details, &var_value);
Goto(&if_found_on_lookup_start_object);
}
}
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 64ad6808820..79f8181af49 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -20,8 +20,6 @@ class ExitPoint;
class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
public:
- using Node = compiler::Node;
-
explicit AccessorAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
@@ -432,7 +430,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
using OnCodeHandler = std::function<void(TNode<Code> code_handler)>;
using OnFoundOnLookupStartObject = std::function<void(
- TNode<NameDictionary> properties, TNode<IntPtrT> name_index)>;
+ TNode<PropertyDictionary> properties, TNode<IntPtrT> name_index)>;
template <typename ICHandler, typename ICParameters>
TNode<Object> HandleProtoHandler(
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 8a6374a431b..6521e831939 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -88,16 +88,14 @@ bool CallOptimization::IsCompatibleReceiverMap(
void CallOptimization::Initialize(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- if (function_template_info->call_code(kAcquireLoad).IsUndefined(isolate))
- return;
- api_call_info_ = handle(
- CallHandlerInfo::cast(function_template_info->call_code(kAcquireLoad)),
- isolate);
+ HeapObject call_code = function_template_info->call_code(kAcquireLoad);
+ if (call_code.IsUndefined(isolate)) return;
+ api_call_info_ = handle(CallHandlerInfo::cast(call_code), isolate);
- if (!function_template_info->signature().IsUndefined(isolate)) {
+ HeapObject signature = function_template_info->signature();
+ if (!signature.IsUndefined(isolate)) {
expected_receiver_type_ =
- handle(FunctionTemplateInfo::cast(function_template_info->signature()),
- isolate);
+ handle(FunctionTemplateInfo::cast(signature), isolate);
}
is_simple_api_call_ = true;
}
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index ca267a72a97..75bda11cd15 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -29,8 +29,6 @@ LoadHandler::Kind LoadHandler::GetHandlerKind(Smi smi_handler) {
}
Handle<Smi> LoadHandler::LoadNormal(Isolate* isolate) {
- // TODO(v8:11167) remove DCHECK once OrderedNameDictionary supported.
- DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
int config = KindBits::encode(kNormal);
return handle(Smi::FromInt(config), isolate);
}
@@ -130,8 +128,6 @@ Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
}
Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
- // TODO(v8:11167) remove DCHECK once OrderedNameDictionary supported.
- DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
int config = KindBits::encode(kNormal);
return handle(Smi::FromInt(config), isolate);
}
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 462f2e1ab08..78379b01a90 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -116,9 +116,6 @@ Handle<Object> LoadHandler::LoadFromPrototype(
Isolate* isolate, Handle<Map> lookup_start_object_map,
Handle<JSReceiver> holder, Handle<Smi> smi_handler,
MaybeObjectHandle maybe_data1, MaybeObjectHandle maybe_data2) {
- // TODO(v8:11167) remove DCHECK once OrderedNameDictionary supported.
- DCHECK_IMPLIES(V8_DICT_MODE_PROTOTYPES_BOOL,
- GetHandlerKind(*smi_handler) != Kind::kNormal);
MaybeObjectHandle data1;
if (maybe_data1.is_null()) {
data1 = MaybeObjectHandle::Weak(holder);
@@ -229,7 +226,7 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
if (!is_dictionary_map) {
InternalIndex descriptor = transition_map->LastAdded();
Handle<DescriptorArray> descriptors(
- transition_map->instance_descriptors(kRelaxedLoad), isolate);
+ transition_map->instance_descriptors(isolate), isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
if (descriptors->GetKey(descriptor).IsPrivate()) {
DCHECK_EQ(DONT_ENUM, details.attributes());
@@ -274,10 +271,6 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
Handle<Smi> smi_handler, MaybeObjectHandle maybe_data1,
MaybeObjectHandle maybe_data2) {
- // TODO(v8:11167) remove DCHECK once OrderedNameDictionary supported.
- DCHECK_IMPLIES(V8_DICT_MODE_PROTOTYPES_BOOL,
- KindBits::decode(smi_handler->value()) != Kind::kNormal);
-
MaybeObjectHandle data1;
if (maybe_data1.is_null()) {
data1 = MaybeObjectHandle::Weak(holder);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 2614e274405..81e31d1c2d8 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -31,6 +31,7 @@
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/prototype.h"
@@ -779,25 +780,28 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
ReadOnlyRoots roots(isolate());
+ Handle<Object> lookup_start_object = lookup->lookup_start_object();
// `in` cannot be called on strings, and will always return true for string
// wrapper length and function prototypes. The latter two cases are given
// LoadHandler::LoadNativeDataProperty below.
if (!IsAnyHas() && !lookup->IsElement()) {
- if (receiver->IsString() && *lookup->name() == roots.length_string()) {
+ if (lookup_start_object->IsString() &&
+ *lookup->name() == roots.length_string()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
return BUILTIN_CODE(isolate(), LoadIC_StringLength);
}
- if (receiver->IsStringWrapper() &&
+ if (lookup_start_object->IsStringWrapper() &&
*lookup->name() == roots.length_string()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_StringWrapperLength);
return BUILTIN_CODE(isolate(), LoadIC_StringWrapperLength);
}
// Use specialized code for getting prototype of functions.
- if (receiver->IsJSFunction() &&
+ if (lookup_start_object->IsJSFunction() &&
*lookup->name() == roots.prototype_string() &&
- !JSFunction::cast(*receiver).PrototypeRequiresRuntimeLookup()) {
+ !JSFunction::cast(*lookup_start_object)
+ .PrototypeRequiresRuntimeLookup()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
}
@@ -808,8 +812,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
bool holder_is_lookup_start_object;
if (lookup->state() != LookupIterator::JSPROXY) {
holder = lookup->GetHolder<JSObject>();
- holder_is_lookup_start_object =
- lookup->lookup_start_object().is_identical_to(holder);
+ holder_is_lookup_start_object = lookup_start_object.is_identical_to(holder);
}
switch (lookup->state()) {
@@ -926,12 +929,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate(), map, holder, smi_handler,
MaybeObjectHandle::Weak(lookup->GetPropertyCell()));
} else {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- smi_handler = LoadHandler::LoadSlow(isolate());
- } else {
- smi_handler = LoadHandler::LoadNormal(isolate());
- }
+ smi_handler = LoadHandler::LoadNormal(isolate());
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
if (holder_is_lookup_start_object) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
@@ -943,6 +941,13 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+ if (info->replace_on_access()) {
+ set_slow_stub_reason(
+ "getter needs to be reconfigured to data property");
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return LoadHandler::LoadSlow(isolate());
+ }
+
if (v8::ToCData<Address>(info->getter()) == kNullAddress ||
!AccessorInfo::IsCompatibleReceiverMap(info, map) ||
!holder->HasFastProperties() ||
@@ -974,12 +979,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate(), map, holder, smi_handler,
MaybeObjectHandle::Weak(lookup->GetPropertyCell()));
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167) remove once OrderedNameDictionary supported.
- smi_handler = LoadHandler::LoadSlow(isolate());
- } else {
- smi_handler = LoadHandler::LoadNormal(isolate());
- }
+ smi_handler = LoadHandler::LoadNormal(isolate());
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
if (holder_is_lookup_start_object) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
@@ -1301,10 +1301,14 @@ bool IntPtrKeyToSize(intptr_t index, Handle<HeapObject> receiver, size_t* out) {
return false;
}
#if V8_HOST_ARCH_64_BIT
- // On 32-bit platforms, any intptr_t is less than kMaxArrayIndex.
- if (index > JSArray::kMaxArrayIndex && !receiver->IsJSTypedArray()) {
+ if (index > JSObject::kMaxElementIndex && !receiver->IsJSTypedArray()) {
return false;
}
+#else
+ // On 32-bit platforms, any intptr_t is less than kMaxElementIndex.
+ STATIC_ASSERT(
+ static_cast<double>(std::numeric_limits<decltype(index)>::max()) <=
+ static_cast<double>(JSObject::kMaxElementIndex));
#endif
*out = static_cast<size_t>(index);
return true;
@@ -1810,11 +1814,8 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
DCHECK(holder.is_identical_to(receiver));
DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
lookup->constness() == PropertyConstness::kMutable);
- // TODO(v8:11167) don't create slow hanlder once OrderedNameDictionary
- // supported.
- Handle<Smi> handler = V8_DICT_MODE_PROTOTYPES_BOOL
- ? StoreHandler::StoreSlow(isolate())
- : StoreHandler::StoreNormal(isolate());
+
+ Handle<Smi> handler = StoreHandler::StoreNormal(isolate());
return MaybeObjectHandle(handler);
}
@@ -2718,7 +2719,7 @@ static bool CanFastCloneObject(Handle<Map> map) {
return false;
}
- DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map->instance_descriptors();
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
Name key = descriptors.GetKey(i);
@@ -2768,7 +2769,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
}
Handle<DescriptorArray> source_descriptors(
- source_map->instance_descriptors(kRelaxedLoad), isolate);
+ source_map->instance_descriptors(isolate), isolate);
int size = source_map->NumberOfOwnDescriptors();
int slack = 0;
Handle<DescriptorArray> descriptors = DescriptorArray::CopyForFastObjectClone(
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index c741298d2cf..b07ea644836 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -555,8 +555,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
// Out-of-capacity accesses (index >= capacity) jump here. Additionally,
// an ElementsKind transition might be necessary.
- // The index can also be negative or larger than kMaxArrayIndex at this point!
- // Jump to the runtime in that case to convert it to a named property.
+ // The index can also be negative or larger than kMaxElementIndex at this
+ // point! Jump to the runtime in that case to convert it to a named property.
BIND(&if_grow);
{
Comment("Grow backing store");
@@ -638,7 +638,7 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
BIND(&found_dict);
{
- TNode<NameDictionary> dictionary = CAST(var_meta_storage.value());
+ TNode<PropertyDictionary> dictionary = CAST(var_meta_storage.value());
TNode<IntPtrT> entry = var_entry.value();
TNode<Uint32T> details = LoadDetailsByKeyIndex(dictionary, entry);
JumpIfDataProperty(details, &ok_to_write, readonly);
@@ -827,19 +827,15 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&dictionary_properties);
{
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- // TODO(v8:11167, v8:11177) Only here due to SetDataProperties workaround.
- GotoIf(Int32TrueConstant(), slow);
- }
Comment("dictionary property store");
// We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
// seeing global objects here (which would need special handling).
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index), not_found(this);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
- NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
- &var_name_index, &not_found);
+ TNode<PropertyDictionary> properties = CAST(LoadSlowProperties(receiver));
+ NameDictionaryLookup<PropertyDictionary>(
+ properties, name, &dictionary_found, &var_name_index, &not_found);
BIND(&dictionary_found);
{
Label check_const(this), overwrite(this), done(this);
@@ -877,8 +873,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&overwrite);
{
CheckForAssociatedProtector(name, slow);
- StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value());
+ StoreValueByKeyIndex<PropertyDictionary>(
+ properties, var_name_index.value(), p->value());
Goto(&done);
}
@@ -916,8 +912,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
Label add_dictionary_property_slow(this);
InvalidateValidityCellIfPrototype(receiver_map, bitfield3);
- Add<NameDictionary>(properties, name, p->value(),
- &add_dictionary_property_slow);
+ Add<PropertyDictionary>(properties, name, p->value(),
+ &add_dictionary_property_slow);
exit_point->Return(p->value());
BIND(&add_dictionary_property_slow);
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index 8e374f760cf..4f3a734ba61 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -1,11 +1,8 @@
-ahaas@chromium.org
-bmeurer@chromium.org
ftang@chromium.org
gsathya@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
marja@chromium.org
-mathias@chromium.org
ulan@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 46c3a1a66ba..7b1e7a196bd 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -60,11 +60,15 @@
#include "src/objects/ordered-hash-table.h"
#include "src/objects/property-cell.h"
#include "src/objects/slots-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/templates.h"
#include "src/snapshot/snapshot.h"
-#include "src/wasm/wasm-js.h"
#include "src/zone/zone-hashmap.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-js.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -445,7 +449,7 @@ V8_NOINLINE Handle<JSFunction> CreateFunctionForBuiltinWithPrototype(
if (!IsResumableFunction(info->kind()) && prototype->IsTheHole(isolate)) {
prototype = factory->NewFunctionPrototype(result);
}
- JSFunction::SetInitialMap(result, initial_map, prototype);
+ JSFunction::SetInitialMap(isolate, result, initial_map, prototype);
return result;
}
@@ -1173,7 +1177,7 @@ namespace {
void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
- DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map->instance_descriptors(isolate);
InternalIndex entry = descriptors.SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors.Replace(entry, &d);
@@ -2069,6 +2073,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeRepeat, 1, true);
SimpleInstallFunction(isolate_, prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
+ SimpleInstallFunction(isolate(), prototype, "replaceAll",
+ Builtins::kStringPrototypeReplaceAll, 2, true);
SimpleInstallFunction(isolate_, prototype, "search",
Builtins::kStringPrototypeSearch, 1, true);
SimpleInstallFunction(isolate_, prototype, "slice",
@@ -3295,6 +3301,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kAtomicsIsLockFree, 1, true);
SimpleInstallFunction(isolate_, atomics_object, "wait",
Builtins::kAtomicsWait, 4, true);
+ SimpleInstallFunction(isolate(), atomics_object, "waitAsync",
+ Builtins::kAtomicsWaitAsync, 4, true);
SimpleInstallFunction(isolate_, atomics_object, "notify",
Builtins::kAtomicsNotify, 3, true);
}
@@ -3854,61 +3862,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_bound_function_with_constructor_map(*map);
}
- { // -- F i n a l i z a t i o n R e g i s t r y
- Handle<JSFunction> finalization_registry_fun = InstallFunction(
- isolate_, global, factory->FinalizationRegistry_string(),
- JS_FINALIZATION_REGISTRY_TYPE, JSFinalizationRegistry::kHeaderSize, 0,
- factory->the_hole_value(), Builtins::kFinalizationRegistryConstructor);
- InstallWithIntrinsicDefaultProto(
- isolate_, finalization_registry_fun,
- Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
-
- finalization_registry_fun->shared().DontAdaptArguments();
- finalization_registry_fun->shared().set_length(1);
-
- Handle<JSObject> finalization_registry_prototype(
- JSObject::cast(finalization_registry_fun->instance_prototype()),
- isolate());
-
- InstallToStringTag(isolate_, finalization_registry_prototype,
- factory->FinalizationRegistry_string());
-
- SimpleInstallFunction(isolate_, finalization_registry_prototype, "register",
- Builtins::kFinalizationRegistryRegister, 2, false);
-
- SimpleInstallFunction(isolate_, finalization_registry_prototype,
- "unregister",
- Builtins::kFinalizationRegistryUnregister, 1, false);
-
- // The cleanupSome function is created but not exposed, as it is used
- // internally by InvokeFinalizationRegistryCleanupFromTask.
- //
- // It is exposed by FLAG_harmony_weak_refs_with_cleanup_some.
- Handle<JSFunction> cleanup_some_fun = SimpleCreateFunction(
- isolate_, factory->InternalizeUtf8String("cleanupSome"),
- Builtins::kFinalizationRegistryPrototypeCleanupSome, 0, false);
- native_context()->set_finalization_registry_cleanup_some(*cleanup_some_fun);
- }
-
- { // -- W e a k R e f
- Handle<JSFunction> weak_ref_fun = InstallFunction(
- isolate_, global, "WeakRef", JS_WEAK_REF_TYPE, JSWeakRef::kHeaderSize,
- 0, factory->the_hole_value(), Builtins::kWeakRefConstructor);
- InstallWithIntrinsicDefaultProto(isolate_, weak_ref_fun,
- Context::JS_WEAK_REF_FUNCTION_INDEX);
-
- weak_ref_fun->shared().DontAdaptArguments();
- weak_ref_fun->shared().set_length(1);
-
- Handle<JSObject> weak_ref_prototype(
- JSObject::cast(weak_ref_fun->instance_prototype()), isolate());
-
- InstallToStringTag(isolate_, weak_ref_prototype, factory->WeakRef_string());
-
- SimpleInstallFunction(isolate_, weak_ref_prototype, "deref",
- Builtins::kWeakRefDeref, 0, true);
- }
-
{ // --- sloppy arguments map
Handle<String> arguments_string = factory->Arguments_string();
Handle<JSFunction> function = CreateFunctionForBuiltinWithPrototype(
@@ -4369,24 +4322,18 @@ void Genesis::InitializeCallSiteBuiltins() {
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_logical_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_static_blocks)
#ifdef V8_INTL_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_date_types)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
-void Genesis::InitializeGlobal_harmony_atomics_waitasync() {
- if (!FLAG_harmony_atomics_waitasync) return;
- SimpleInstallFunction(isolate(), isolate()->atomics_object(), "waitAsync",
- Builtins::kAtomicsWaitAsync, 4, true);
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -4406,8 +4353,75 @@ void Genesis::InitializeGlobal_harmony_atomics() {
InstallToStringTag(isolate_, isolate()->atomics_object(), "Atomics");
}
+void Genesis::InitializeGlobal_harmony_weak_refs() {
+ if (!FLAG_harmony_weak_refs) return;
+
+ Factory* factory = isolate()->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+
+ {
+ // Create %FinalizationRegistry%
+ Handle<JSFunction> finalization_registry_fun = InstallFunction(
+ isolate(), global, factory->FinalizationRegistry_string(),
+ JS_FINALIZATION_REGISTRY_TYPE, JSFinalizationRegistry::kHeaderSize, 0,
+ factory->the_hole_value(), Builtins::kFinalizationRegistryConstructor);
+ InstallWithIntrinsicDefaultProto(
+ isolate(), finalization_registry_fun,
+ Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
+
+ finalization_registry_fun->shared().DontAdaptArguments();
+ finalization_registry_fun->shared().set_length(1);
+
+ Handle<JSObject> finalization_registry_prototype(
+ JSObject::cast(finalization_registry_fun->instance_prototype()),
+ isolate());
+
+ InstallToStringTag(isolate(), finalization_registry_prototype,
+ factory->FinalizationRegistry_string());
+
+ SimpleInstallFunction(isolate(), finalization_registry_prototype,
+ "register", Builtins::kFinalizationRegistryRegister,
+ 2, false);
+
+ SimpleInstallFunction(isolate(), finalization_registry_prototype,
+ "unregister",
+ Builtins::kFinalizationRegistryUnregister, 1, false);
+
+ // The cleanupSome function is created but not exposed, as it is used
+ // internally by InvokeFinalizationRegistryCleanupFromTask.
+ //
+ // It is exposed by FLAG_harmony_weak_refs_with_cleanup_some.
+ Handle<JSFunction> cleanup_some_fun = SimpleCreateFunction(
+ isolate(), factory->InternalizeUtf8String("cleanupSome"),
+ Builtins::kFinalizationRegistryPrototypeCleanupSome, 0, false);
+ native_context()->set_finalization_registry_cleanup_some(*cleanup_some_fun);
+ }
+ {
+ // Create %WeakRef%
+ Handle<JSFunction> weak_ref_fun = InstallFunction(
+ isolate(), global, factory->WeakRef_string(), JS_WEAK_REF_TYPE,
+ JSWeakRef::kHeaderSize, 0, factory->the_hole_value(),
+ Builtins::kWeakRefConstructor);
+ InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
+ Context::JS_WEAK_REF_FUNCTION_INDEX);
+
+ weak_ref_fun->shared().DontAdaptArguments();
+ weak_ref_fun->shared().set_length(1);
+
+ Handle<JSObject> weak_ref_prototype(
+ JSObject::cast(weak_ref_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate(), weak_ref_prototype,
+ factory->WeakRef_string());
+
+ SimpleInstallFunction(isolate(), weak_ref_prototype, "deref",
+ Builtins::kWeakRefDeref, 0, true);
+ }
+}
+
void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
if (!FLAG_harmony_weak_refs_with_cleanup_some) return;
+ DCHECK(FLAG_harmony_weak_refs);
Handle<JSFunction> finalization_registry_fun =
isolate()->js_finalization_registry_fun();
@@ -4451,17 +4465,6 @@ void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
native_context()->set_regexp_prototype_map(prototype->map());
}
-void Genesis::InitializeGlobal_harmony_string_replaceall() {
- if (!FLAG_harmony_string_replaceall) return;
-
- Handle<JSFunction> string_fun(native_context()->string_function(), isolate());
- Handle<JSObject> string_prototype(
- JSObject::cast(string_fun->instance_prototype()), isolate());
-
- SimpleInstallFunction(isolate(), string_prototype, "replaceAll",
- Builtins::kStringPrototypeReplaceAll, 2, true);
-}
-
void Genesis::InitializeGlobal_regexp_linear_flag() {
if (!FLAG_enable_experimental_regexp_engine) return;
@@ -4932,6 +4935,7 @@ bool Genesis::InstallSpecialObjects(Isolate* isolate,
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(isolate, Error, name, stack_trace_limit, NONE);
+#if V8_ENABLE_WEBASSEMBLY
if (FLAG_expose_wasm) {
// Install the internal data structures into the isolate and expose on
// the global object.
@@ -4941,6 +4945,7 @@ bool Genesis::InstallSpecialObjects(Isolate* isolate,
// translated to Wasm to work correctly.
WasmJs::Install(isolate, false);
}
+#endif // V8_ENABLE_WEBASSEMBLY
return true;
}
@@ -5134,7 +5139,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// in the snapshotted global object.
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs = Handle<DescriptorArray>(
- from->map().instance_descriptors(kRelaxedLoad), isolate());
+ from->map().instance_descriptors(isolate()), isolate());
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
@@ -5188,12 +5193,12 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
// Copy all keys and values in enumeration order.
- Handle<OrderedNameDictionary> properties = Handle<OrderedNameDictionary>(
- from->property_dictionary_ordered(), isolate());
+ Handle<SwissNameDictionary> properties = Handle<SwissNameDictionary>(
+ from->property_dictionary_swiss(), isolate());
ReadOnlyRoots roots(isolate());
- for (InternalIndex entry : properties->IterateEntries()) {
+ for (InternalIndex entry : properties->IterateEntriesOrdered()) {
Object raw_key;
if (!properties->ToKey(roots, entry, &raw_key)) continue;
@@ -5286,7 +5291,7 @@ Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size,
{
JSFunction array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
- array_function.initial_map().instance_descriptors(kRelaxedLoad),
+ array_function.initial_map().instance_descriptors(isolate()),
isolate());
Handle<String> length = factory()->length_string();
InternalIndex old = array_descriptors->SearchWithCache(
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 56b51314ab4..71eb0545965 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -517,6 +517,8 @@
F(BACKGROUND_FULL_ARRAY_BUFFER_SWEEP) \
F(BACKGROUND_COLLECTION) \
F(BACKGROUND_UNMAPPER) \
+ F(BACKGROUND_UNPARK) \
+ F(BACKGROUND_SAFEPOINT) \
F(MC_BACKGROUND_EVACUATE_COPY) \
F(MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
F(MC_BACKGROUND_MARKING) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 01ae416181c..5db27d288b1 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -59,8 +59,8 @@ Address IsolateAllocator::InitReservation() {
// Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the
// resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned.
const size_t reservation_size =
- kPtrComprHeapReservationSize + kIsolateRootBiasPageSize;
- const size_t base_alignment = kPtrComprIsolateRootAlignment;
+ kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
+ const size_t base_alignment = kPtrComprCageBaseAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
@@ -137,11 +137,11 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
GetIsolateRootBiasPageSize(platform_page_allocator);
Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize;
- CHECK(IsAligned(isolate_root, kPtrComprIsolateRootAlignment));
+ CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
CHECK(reservation_.InVM(
heap_reservation_address,
- kPtrComprHeapReservationSize + kIsolateRootBiasPageSize));
+ kPtrComprCageReservationSize + kIsolateRootBiasPageSize));
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
@@ -149,7 +149,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
platform_page_allocator->AllocatePageSize());
page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- platform_page_allocator, isolate_root, kPtrComprHeapReservationSize,
+ platform_page_allocator, isolate_root, kPtrComprCageReservationSize,
page_size);
page_allocator_ = page_allocator_instance_.get();
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 921efe631b8..fbf120b1b61 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -26,7 +26,14 @@
#include "src/profiler/heap-profiler.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#include "src/diagnostics/system-jit-win.h"
+#endif
namespace v8 {
namespace internal {
@@ -46,7 +53,9 @@ bool V8::Initialize() {
}
void V8::TearDown() {
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine::GlobalTearDown();
+#endif // V8_ENABLE_WEBASSEMBLY
#if defined(USE_SIMULATOR)
Simulator::GlobalTearDown();
#endif
@@ -118,13 +127,11 @@ void V8::InitializeOncePerProcessImpl() {
// continue exposing wasm on correctness fuzzers even in jitless mode.
// TODO(jgruber): Remove this once / if wasm can run without executable
// memory.
- if (FLAG_jitless && !FLAG_correctness_fuzzer_suppressions) {
#if V8_ENABLE_WEBASSEMBLY
+ if (FLAG_jitless && !FLAG_correctness_fuzzer_suppressions) {
FLAG_expose_wasm = false;
-#else
- STATIC_ASSERT(!FLAG_expose_wasm);
-#endif
}
+#endif
if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) {
// Turning off the tier-up strategy, because the --regexp-interpret-all and
@@ -153,7 +160,9 @@ void V8::InitializeOncePerProcessImpl() {
ElementsAccessor::InitializeOncePerProcess();
Bootstrapper::InitializeOncePerProcess();
CallDescriptors::InitializeOncePerProcess();
+#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine::InitializeOncePerProcess();
+#endif // V8_ENABLE_WEBASSEMBLY
}
void V8::InitializeOncePerProcess() {
@@ -166,10 +175,21 @@ void V8::InitializePlatform(v8::Platform* platform) {
platform_ = platform;
v8::base::SetPrintStackTrace(platform_->GetStackTracePrinter());
v8::tracing::TracingCategoryObserver::SetUp();
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ if (FLAG_enable_system_instrumentation) {
+ // TODO(sartang@microsoft.com): Move to platform specific diagnostics object
+ v8::internal::ETWJITInterface::Register();
+ }
+#endif
}
void V8::ShutdownPlatform() {
CHECK(platform_);
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ if (FLAG_enable_system_instrumentation) {
+ v8::internal::ETWJITInterface::Unregister();
+ }
+#endif
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
platform_ = nullptr;
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 1dea2a7f543..49ab6a96335 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -1,9 +1,8 @@
-alph@chromium.org
bmeurer@chromium.org
caseq@chromium.org
-dgozman@chromium.org
-kozyatinskiy@chromium.org
-pfeldman@chromium.org
+kimanh@chromium.org
+leese@chromium.org
+pfaffe@chromium.org
szuend@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 4e0b83952e2..f99d57d33e3 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -66,6 +66,7 @@ static const intptr_t kBreakpointHintMaxSearchOffset = 80 * 10;
// the maximum length of a message in mojo (see https://crbug.com/1105172).
static const size_t kMaxNumBreakpoints = 1000;
+#if V8_ENABLE_WEBASSEMBLY
// TODO(1099680): getScriptSource and getWasmBytecode return Wasm wire bytes
// as protocol::Binary, which is encoded as JSON string in the communication
// to the DevTools front-end and hence leads to either crashing the renderer
@@ -73,9 +74,11 @@ static const size_t kMaxNumBreakpoints = 1000;
// allow arbitrarily big Wasm byte sequences here. Ideally we would find a
// different way to transfer the wire bytes (middle- to long-term), but as a
// short-term solution, we should at least not crash.
-static const size_t kWasmBytecodeMaxLength = (v8::String::kMaxLength / 4) * 3;
-static const char kWasmBytecodeExceedsTransferLimit[] =
+static constexpr size_t kWasmBytecodeMaxLength =
+ (v8::String::kMaxLength / 4) * 3;
+static constexpr const char kWasmBytecodeExceedsTransferLimit[] =
"WebAssembly bytecode exceeds the transfer limit";
+#endif // V8_ENABLE_WEBASSEMBLY
namespace {
@@ -191,6 +194,14 @@ void adjustBreakpointLocation(const V8DebuggerScript& script,
int* columnNumber) {
if (*lineNumber < script.startLine() || *lineNumber > script.endLine())
return;
+ if (*lineNumber == script.startLine() &&
+ *columnNumber < script.startColumn()) {
+ return;
+ }
+ if (*lineNumber == script.endLine() && script.endColumn() < *columnNumber) {
+ return;
+ }
+
if (hint.isEmpty()) return;
intptr_t sourceOffset = script.offset(*lineNumber, *columnNumber);
if (sourceOffset == V8DebuggerScript::kNoOffset) return;
@@ -499,6 +510,8 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+
*locations = std::make_unique<Array<protocol::Debugger::Location>>();
int specified = (optionalURL.isJust() ? 1 : 0) +
@@ -587,6 +600,8 @@ Response V8DebuggerAgentImpl::setBreakpoint(
String16 breakpointId = generateBreakpointId(
BreakpointType::kByScriptId, location->getScriptId(),
location->getLineNumber(), location->getColumnNumber(0));
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
return Response::ServerError(
@@ -605,6 +620,8 @@ Response V8DebuggerAgentImpl::setBreakpoint(
Response V8DebuggerAgentImpl::setBreakpointOnFunctionCall(
const String16& functionObjectId, Maybe<String16> optionalCondition,
String16* outBreakpointId) {
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+
InjectedScript::ObjectScope scope(m_session, functionObjectId);
Response response = scope.initialize();
if (!response.IsSuccess()) return response;
@@ -706,9 +723,11 @@ void V8DebuggerAgentImpl::removeBreakpointImpl(
return;
}
for (const auto& id : debuggerBreakpointIdsIterator->second) {
+#if V8_ENABLE_WEBASSEMBLY
for (auto& script : scripts) {
script->removeWasmBreakpoint(id);
}
+#endif // V8_ENABLE_WEBASSEMBLY
v8::debug::RemoveBreakpoint(m_isolate, id);
m_debuggerBreakpointIdToBreakpointId.erase(id);
}
@@ -910,6 +929,13 @@ V8DebuggerAgentImpl::setBreakpointImpl(const String16& breakpointId,
if (lineNumber < script->startLine() || script->endLine() < lineNumber) {
return nullptr;
}
+ if (lineNumber == script->startLine() &&
+ columnNumber < script->startColumn()) {
+ return nullptr;
+ }
+ if (lineNumber == script->endLine() && script->endColumn() < columnNumber) {
+ return nullptr;
+ }
v8::debug::BreakpointId debuggerBreakpointId;
v8::debug::Location location(lineNumber, columnNumber);
@@ -1043,6 +1069,7 @@ Response V8DebuggerAgentImpl::getScriptSource(
if (it == m_scripts.end())
return Response::ServerError("No script for id: " + scriptId.utf8());
*scriptSource = it->second->source(0);
+#if V8_ENABLE_WEBASSEMBLY
v8::MemorySpan<const uint8_t> span;
if (it->second->wasmBytecode().To(&span)) {
if (span.size() > kWasmBytecodeMaxLength) {
@@ -1050,11 +1077,13 @@ Response V8DebuggerAgentImpl::getScriptSource(
}
*bytecode = protocol::Binary::fromSpan(span.data(), span.size());
}
+#endif // V8_ENABLE_WEBASSEMBLY
return Response::Success();
}
Response V8DebuggerAgentImpl::getWasmBytecode(const String16& scriptId,
protocol::Binary* bytecode) {
+#if V8_ENABLE_WEBASSEMBLY
if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end())
@@ -1068,6 +1097,9 @@ Response V8DebuggerAgentImpl::getWasmBytecode(const String16& scriptId,
}
*bytecode = protocol::Binary::fromSpan(span.data(), span.size());
return Response::Success();
+#else
+ return Response::ServerError("WebAssembly is disabled");
+#endif // V8_ENABLE_WEBASSEMBLY
}
void V8DebuggerAgentImpl::pushBreakDetails(
@@ -1505,6 +1537,7 @@ static String16 getScriptLanguage(const V8DebuggerScript& script) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
static const char* getDebugSymbolTypeName(
v8::debug::WasmScript::DebugSymbolsType type) {
switch (type) {
@@ -1537,6 +1570,7 @@ static std::unique_ptr<protocol::Debugger::DebugSymbols> getDebugSymbols(
}
return debugSymbols;
}
+#endif // V8_ENABLE_WEBASSEMBLY
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
@@ -1571,10 +1605,12 @@ void V8DebuggerAgentImpl::didParseSource(
String16 embedderName = script->embedderName();
String16 scriptLanguage = getScriptLanguage(*script);
Maybe<int> codeOffset;
+ std::unique_ptr<protocol::Debugger::DebugSymbols> debugSymbols;
+#if V8_ENABLE_WEBASSEMBLY
if (script->getLanguage() == V8DebuggerScript::Language::WebAssembly)
codeOffset = script->codeOffset();
- std::unique_ptr<protocol::Debugger::DebugSymbols> debugSymbols =
- getDebugSymbols(*script);
+ debugSymbols = getDebugSymbols(*script);
+#endif // V8_ENABLE_WEBASSEMBLY
m_scripts[scriptId] = std::move(script);
// Release the strong reference to get notified when debugger is the only
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index e5089e5a60c..ecadb6c137e 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -116,13 +116,16 @@ class ActualScript : public V8DebuggerScript {
static_cast<int>(pos), static_cast<int>(substringLength));
return String16(buffer.get(), substringLength);
}
+ Language getLanguage() const override { return m_language; }
+
+#if V8_ENABLE_WEBASSEMBLY
v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override {
v8::HandleScope scope(m_isolate);
auto script = this->script();
if (!script->IsWasm()) return v8::Nothing<v8::MemorySpan<const uint8_t>>();
return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode());
}
- Language getLanguage() const override { return m_language; }
+
v8::Maybe<v8::debug::WasmScript::DebugSymbolsType> getDebugSymbolsType()
const override {
auto script = this->script();
@@ -130,6 +133,7 @@ class ActualScript : public V8DebuggerScript {
return v8::Nothing<v8::debug::WasmScript::DebugSymbolsType>();
return v8::Just(v8::debug::WasmScript::Cast(*script)->GetDebugSymbolType());
}
+
v8::Maybe<String16> getExternalDebugSymbolsURL() const override {
auto script = this->script();
if (!script->IsWasm()) return v8::Nothing<String16>();
@@ -138,22 +142,29 @@ class ActualScript : public V8DebuggerScript {
if (external_url.size() == 0) return v8::Nothing<String16>();
return v8::Just(String16(external_url.data(), external_url.size()));
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
int startLine() const override { return m_startLine; }
int startColumn() const override { return m_startColumn; }
int endLine() const override { return m_endLine; }
int endColumn() const override { return m_endColumn; }
int codeOffset() const override {
- auto script = this->script();
- if (!script->IsWasm()) return 0;
- return v8::debug::WasmScript::Cast(*script)->CodeOffset();
+#if V8_ENABLE_WEBASSEMBLY
+ if (script()->IsWasm()) {
+ return v8::debug::WasmScript::Cast(*script())->CodeOffset();
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return 0;
}
bool isSourceLoadedLazily() const override { return false; }
int length() const override {
auto script = this->script();
+#if V8_ENABLE_WEBASSEMBLY
if (script->IsWasm()) {
return static_cast<int>(
v8::debug::WasmScript::Cast(*script)->Bytecode().size());
}
+#endif // V8_ENABLE_WEBASSEMBLY
v8::HandleScope scope(m_isolate);
v8::Local<v8::String> v8Source;
return script->Source().ToLocal(&v8Source) ? v8Source->Length() : 0;
@@ -305,23 +316,26 @@ class ActualScript : public V8DebuggerScript {
} else {
m_endColumn = source_length + m_startColumn;
}
+#if V8_ENABLE_WEBASSEMBLY
} else if (script->IsWasm()) {
DCHECK_EQ(0, m_startLine);
DCHECK_EQ(0, m_startColumn);
m_endLine = 0;
m_endColumn = static_cast<int>(
v8::debug::WasmScript::Cast(*script)->Bytecode().size());
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
m_endLine = m_startLine;
m_endColumn = m_startColumn;
}
USE(script->ContextId().To(&m_executionContextId));
+ m_language = V8DebuggerScript::Language::JavaScript;
+#if V8_ENABLE_WEBASSEMBLY
if (script->IsWasm()) {
m_language = V8DebuggerScript::Language::WebAssembly;
- } else {
- m_language = V8DebuggerScript::Language::JavaScript;
}
+#endif // V8_ENABLE_WEBASSEMBLY
m_isModule = script->IsModule();
@@ -387,9 +401,11 @@ bool V8DebuggerScript::setBreakpoint(const String16& condition,
return script()->SetBreakpoint(toV8String(m_isolate, condition), loc, id);
}
+#if V8_ENABLE_WEBASSEMBLY
void V8DebuggerScript::removeWasmBreakpoint(int id) {
v8::HandleScope scope(m_isolate);
script()->RemoveWasmBreakpoint(id);
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index e46dda59a93..a8fd6775b05 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -62,11 +62,7 @@ class V8DebuggerScript {
virtual const String16& sourceMappingURL() const = 0;
virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0;
- virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
virtual Language getLanguage() const = 0;
- virtual v8::Maybe<String16> getExternalDebugSymbolsURL() const = 0;
- virtual v8::Maybe<v8::debug::WasmScript::DebugSymbolsType>
- getDebugSymbolsType() const = 0;
virtual const String16& hash() const = 0;
virtual int startLine() const = 0;
virtual int startColumn() const = 0;
@@ -96,10 +92,17 @@ class V8DebuggerScript {
virtual bool setBreakpoint(const String16& condition,
v8::debug::Location* location, int* id) const = 0;
- void removeWasmBreakpoint(int id);
virtual void MakeWeak() = 0;
virtual bool setBreakpointOnRun(int* id) const = 0;
+#if V8_ENABLE_WEBASSEMBLY
+ virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
+ virtual v8::Maybe<v8::debug::WasmScript::DebugSymbolsType>
+ getDebugSymbolsType() const = 0;
+ virtual v8::Maybe<String16> getExternalDebugSymbolsURL() const = 0;
+ void removeWasmBreakpoint(int id);
+#endif // V8_ENABLE_WEBASSEMBLY
+
protected:
V8DebuggerScript(v8::Isolate*, String16 id, String16 url,
String16 embedderName);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 0a17f9e2f8e..9f035b578ef 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -90,7 +90,9 @@ void V8Debugger::enable() {
m_isolate->AddNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback, this);
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
+#if V8_ENABLE_WEBASSEMBLY
v8::debug::TierDownAllModulesPerIsolate(m_isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void V8Debugger::disable() {
@@ -113,7 +115,9 @@ void V8Debugger::disable() {
m_taskWithScheduledBreakPauseRequested = false;
m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
+#if V8_ENABLE_WEBASSEMBLY
v8::debug::TierUpAllModulesPerIsolate(m_isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
v8::debug::SetDebugDelegate(m_isolate, nullptr);
m_isolate->RemoveNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback,
m_originalHeapLimit);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 62744a8a9cf..18d870a94a3 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -305,6 +305,15 @@ String16 descriptionForCollection(v8::Isolate* isolate,
return String16::concat(className, '(', String16::fromInteger(length), ')');
}
+#if V8_ENABLE_WEBASSEMBLY
+String16 descriptionForWasmValueObject(
+ v8::Local<v8::Context> context,
+ v8::Local<v8::debug::WasmValueObject> object) {
+ v8::Isolate* isolate = context->GetIsolate();
+ return toProtocolString(isolate, object->type());
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
String16 descriptionForEntry(v8::Local<v8::Context> context,
v8::Local<v8::Object> object) {
v8::Isolate* isolate = context->GetIsolate();
@@ -793,8 +802,10 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
if (object->IsArray() || isArrayLike(context, object, &length) ||
object->IsStringObject()) {
blocklist.push_back("length");
+#if V8_ENABLE_WEBASSEMBLY
} else if (v8::debug::WasmValueObject::IsWasmValueObject(object)) {
blocklist.push_back("type");
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
auto clientSubtype = clientFor(context)->valueSubtype(object);
if (clientSubtype && toString16(clientSubtype->string()) == "array") {
@@ -1310,6 +1321,7 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
}
}
if (accessorPropertiesOnly && !isAccessorProperty) continue;
+ if (name == "__proto__") shouldSkipProto = true;
auto mirror = PropertyMirror{name,
writable,
configurable,
@@ -1693,13 +1705,15 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
descriptionForCollection(
isolate, memory, memory->Buffer()->ByteLength() / kWasmPageSize));
}
+#if V8_ENABLE_WEBASSEMBLY
if (v8::debug::WasmValueObject::IsWasmValueObject(value)) {
v8::Local<v8::debug::WasmValueObject> object =
value.As<v8::debug::WasmValueObject>();
return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Wasmvalue,
- descriptionForObject(isolate, object));
+ descriptionForWasmValueObject(context, object));
}
+#endif // V8_ENABLE_WEBASSEMBLY
V8InternalValueType internalType =
v8InternalValueTypeFrom(context, value.As<v8::Object>());
if (value->IsArray() && internalType == V8InternalValueType::kScopeList) {
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
deleted file mode 100644
index 7294255dbe2..00000000000
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/bytecode-array-accessor.h"
-
-#include "src/interpreter/bytecode-decoder.h"
-#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects/code-inl.h"
-#include "src/objects/feedback-vector.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-BytecodeArrayAccessor::BytecodeArrayAccessor(
- Handle<BytecodeArray> bytecode_array, int initial_offset)
- : bytecode_array_(bytecode_array),
- start_(reinterpret_cast<uint8_t*>(
- bytecode_array_->GetFirstBytecodeAddress())),
- end_(start_ + bytecode_array_->length()),
- cursor_(start_ + initial_offset),
- operand_scale_(OperandScale::kSingle),
- prefix_size_(0),
- local_heap_(LocalHeap::Current()
- ? LocalHeap::Current()
- : Isolate::Current()->main_thread_local_heap()) {
- local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this);
- UpdateOperandScale();
-}
-
-BytecodeArrayAccessor::~BytecodeArrayAccessor() {
- local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
-}
-
-void BytecodeArrayAccessor::SetOffset(int offset) {
- if (offset < 0) return;
- cursor_ = reinterpret_cast<uint8_t*>(
- bytecode_array()->GetFirstBytecodeAddress() + offset);
- UpdateOperandScale();
-}
-
-void BytecodeArrayAccessor::ApplyDebugBreak() {
- // Get the raw bytecode from the bytecode array. This may give us a
- // scaling prefix, which we can patch with the matching debug-break
- // variant.
- uint8_t* cursor = cursor_ - prefix_size_;
- interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(*cursor);
- if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
- interpreter::Bytecode debugbreak =
- interpreter::Bytecodes::GetDebugBreak(bytecode);
- *cursor = interpreter::Bytecodes::ToByte(debugbreak);
-}
-
-int BytecodeArrayAccessor::current_bytecode_size() const {
- return prefix_size_ +
- Bytecodes::Size(current_bytecode(), current_operand_scale());
-}
-
-uint32_t BytecodeArrayAccessor::GetUnsignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- Address operand_start =
- reinterpret_cast<Address>(cursor_) +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-int32_t BytecodeArrayAccessor::GetSignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- Address operand_start =
- reinterpret_cast<Address>(cursor_) +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-uint32_t BytecodeArrayAccessor::GetFlagOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kFlag8);
- return GetUnsignedOperand(operand_index, OperandType::kFlag8);
-}
-
-uint32_t BytecodeArrayAccessor::GetUnsignedImmediateOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kUImm);
- return GetUnsignedOperand(operand_index, OperandType::kUImm);
-}
-
-int32_t BytecodeArrayAccessor::GetImmediateOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kImm);
- return GetSignedOperand(operand_index, OperandType::kImm);
-}
-
-uint32_t BytecodeArrayAccessor::GetRegisterCountOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kRegCount);
- return GetUnsignedOperand(operand_index, OperandType::kRegCount);
-}
-
-uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kIdx);
- return GetUnsignedOperand(operand_index, operand_type);
-}
-
-FeedbackSlot BytecodeArrayAccessor::GetSlotOperand(int operand_index) const {
- int index = GetIndexOperand(operand_index);
- return FeedbackVector::ToSlot(index);
-}
-
-Register BytecodeArrayAccessor::GetReceiver() const {
- return Register::FromParameterIndex(0, bytecode_array()->parameter_count());
-}
-
-Register BytecodeArrayAccessor::GetParameter(int parameter_index) const {
- DCHECK_GE(parameter_index, 0);
- // The parameter indices are shifted by 1 (receiver is the
- // first entry).
- return Register::FromParameterIndex(parameter_index + 1,
- bytecode_array()->parameter_count());
-}
-
-Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- Address operand_start =
- reinterpret_cast<Address>(cursor_) +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-std::pair<Register, Register> BytecodeArrayAccessor::GetRegisterPairOperand(
- int operand_index) const {
- Register first = GetRegisterOperand(operand_index);
- Register second(first.index() + 1);
- return std::make_pair(first, second);
-}
-
-RegisterList BytecodeArrayAccessor::GetRegisterListOperand(
- int operand_index) const {
- Register first = GetRegisterOperand(operand_index);
- uint32_t count = GetRegisterCountOperand(operand_index + 1);
- return RegisterList(first.index(), count);
-}
-
-int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
- DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- const OperandType* operand_types =
- Bytecodes::GetOperandTypes(current_bytecode());
- OperandType operand_type = operand_types[operand_index];
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- if (operand_type == OperandType::kRegList ||
- operand_type == OperandType::kRegOutList) {
- return GetRegisterCountOperand(operand_index + 1);
- } else {
- return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
- }
-}
-
-Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kRuntimeId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return static_cast<Runtime::FunctionId>(raw_id);
-}
-
-uint32_t BytecodeArrayAccessor::GetNativeContextIndexOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kNativeContextIndex);
- return GetUnsignedOperand(operand_index, operand_type);
-}
-
-Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kIntrinsicId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return IntrinsicsHelper::ToRuntimeId(
- static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
-}
-
-template <typename LocalIsolate>
-Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(
- int index, LocalIsolate* isolate) const {
- return handle(bytecode_array()->constant_pool().get(index), isolate);
-}
-
-bool BytecodeArrayAccessor::IsConstantAtIndexSmi(int index) const {
- return bytecode_array()->constant_pool().get(index).IsSmi();
-}
-
-Smi BytecodeArrayAccessor::GetConstantAtIndexAsSmi(int index) const {
- return Smi::cast(bytecode_array()->constant_pool().get(index));
-}
-
-template <typename LocalIsolate>
-Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index, LocalIsolate* isolate) const {
- return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index, Isolate* isolate) const;
-template Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index, LocalIsolate* isolate) const;
-
-int BytecodeArrayAccessor::GetRelativeJumpTargetOffset() const {
- Bytecode bytecode = current_bytecode();
- if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
- int relative_offset = GetUnsignedImmediateOperand(0);
- if (bytecode == Bytecode::kJumpLoop) {
- relative_offset = -relative_offset;
- }
- return relative_offset;
- } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi smi = GetConstantAtIndexAsSmi(GetIndexOperand(0));
- return smi.value();
- } else {
- UNREACHABLE();
- }
-}
-
-int BytecodeArrayAccessor::GetJumpTargetOffset() const {
- return GetAbsoluteOffset(GetRelativeJumpTargetOffset());
-}
-
-JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
- const {
- uint32_t table_start, table_size;
- int32_t case_value_base;
- if (current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
- table_start = GetIndexOperand(1);
- table_size = GetUnsignedImmediateOperand(2);
- case_value_base = 0;
- } else {
- DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
- table_start = GetIndexOperand(0);
- table_size = GetUnsignedImmediateOperand(1);
- case_value_base = GetImmediateOperand(2);
- }
- return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
-}
-
-int BytecodeArrayAccessor::GetAbsoluteOffset(int relative_offset) const {
- return current_offset() + relative_offset + prefix_size_;
-}
-
-std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
- return BytecodeDecoder::Decode(os, cursor_ - prefix_size_,
- bytecode_array()->parameter_count());
-}
-
-void BytecodeArrayAccessor::UpdatePointers() {
- DisallowGarbageCollection no_gc;
- uint8_t* start =
- reinterpret_cast<uint8_t*>(bytecode_array_->GetFirstBytecodeAddress());
- if (start != start_) {
- start_ = start;
- uint8_t* end = start + bytecode_array_->length();
- size_t distance_to_end = end_ - cursor_;
- cursor_ = end - distance_to_end;
- end_ = end;
- }
-}
-
-JumpTableTargetOffsets::JumpTableTargetOffsets(
- const BytecodeArrayAccessor* accessor, int table_start, int table_size,
- int case_value_base)
- : accessor_(accessor),
- table_start_(table_start),
- table_size_(table_size),
- case_value_base_(case_value_base) {}
-
-JumpTableTargetOffsets::iterator JumpTableTargetOffsets::begin() const {
- return iterator(case_value_base_, table_start_, table_start_ + table_size_,
- accessor_);
-}
-JumpTableTargetOffsets::iterator JumpTableTargetOffsets::end() const {
- return iterator(case_value_base_ + table_size_, table_start_ + table_size_,
- table_start_ + table_size_, accessor_);
-}
-int JumpTableTargetOffsets::size() const {
- int ret = 0;
- // TODO(leszeks): Is there a more efficient way of doing this than iterating?
- for (const auto& entry : *this) {
- USE(entry);
- ret++;
- }
- return ret;
-}
-
-JumpTableTargetOffsets::iterator::iterator(
- int case_value, int table_offset, int table_end,
- const BytecodeArrayAccessor* accessor)
- : accessor_(accessor),
- current_(Smi::zero()),
- index_(case_value),
- table_offset_(table_offset),
- table_end_(table_end) {
- UpdateAndAdvanceToValid();
-}
-
-JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
- DCHECK_LT(table_offset_, table_end_);
- return {index_, accessor_->GetAbsoluteOffset(Smi::ToInt(current_))};
-}
-
-JumpTableTargetOffsets::iterator& JumpTableTargetOffsets::iterator::
-operator++() {
- DCHECK_LT(table_offset_, table_end_);
- ++table_offset_;
- ++index_;
- UpdateAndAdvanceToValid();
- return *this;
-}
-
-bool JumpTableTargetOffsets::iterator::operator!=(
- const JumpTableTargetOffsets::iterator& other) {
- DCHECK_EQ(accessor_, other.accessor_);
- DCHECK_EQ(table_end_, other.table_end_);
- DCHECK_EQ(index_ - other.index_, table_offset_ - other.table_offset_);
- return index_ != other.index_;
-}
-
-void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
- while (table_offset_ < table_end_ &&
- !accessor_->IsConstantAtIndexSmi(table_offset_)) {
- ++table_offset_;
- ++index_;
- }
-
- // Make sure we haven't reached the end of the table with a hole in current.
- if (table_offset_ < table_end_) {
- DCHECK(accessor_->IsConstantAtIndexSmi(table_offset_));
- current_ = accessor_->GetConstantAtIndexAsSmi(table_offset_);
- }
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
deleted file mode 100644
index dc2a8c217aa..00000000000
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
-#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
-
-#include <memory>
-
-#include "src/base/optional.h"
-#include "src/common/globals.h"
-#include "src/handles/handles.h"
-#include "src/interpreter/bytecode-register.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/objects/objects.h"
-#include "src/objects/smi.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace interpreter {
-
-class BytecodeArrayAccessor;
-
-struct V8_EXPORT_PRIVATE JumpTableTargetOffset {
- int case_value;
- int target_offset;
-};
-
-class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
- public:
- // Minimal iterator implementation for use in ranged-for.
- class V8_EXPORT_PRIVATE iterator final {
- public:
- iterator(int case_value, int table_offset, int table_end,
- const BytecodeArrayAccessor* accessor);
-
- JumpTableTargetOffset operator*();
- iterator& operator++();
- bool operator!=(const iterator& other);
-
- private:
- void UpdateAndAdvanceToValid();
-
- const BytecodeArrayAccessor* accessor_;
- Smi current_;
- int index_;
- int table_offset_;
- int table_end_;
- };
-
- JumpTableTargetOffsets(const BytecodeArrayAccessor* accessor, int table_start,
- int table_size, int case_value_base);
-
- iterator begin() const;
- iterator end() const;
-
- int size() const;
-
- private:
- const BytecodeArrayAccessor* accessor_;
- int table_start_;
- int table_size_;
- int case_value_base_;
-};
-
-class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
- public:
- BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
- int initial_offset);
- ~BytecodeArrayAccessor();
-
- BytecodeArrayAccessor(const BytecodeArrayAccessor&) = delete;
- BytecodeArrayAccessor& operator=(const BytecodeArrayAccessor&) = delete;
-
- inline void Advance() {
- cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
- UpdateOperandScale();
- }
- void SetOffset(int offset);
- void Reset() { SetOffset(0); }
-
- void ApplyDebugBreak();
-
- inline Bytecode current_bytecode() const {
- DCHECK(!done());
- uint8_t current_byte = *cursor_;
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
- return current_bytecode;
- }
- int current_bytecode_size() const;
- int current_offset() const {
- return static_cast<int>(cursor_ - start_ - prefix_size_);
- }
- OperandScale current_operand_scale() const { return operand_scale_; }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- uint32_t GetFlagOperand(int operand_index) const;
- uint32_t GetUnsignedImmediateOperand(int operand_index) const;
- int32_t GetImmediateOperand(int operand_index) const;
- uint32_t GetIndexOperand(int operand_index) const;
- FeedbackSlot GetSlotOperand(int operand_index) const;
- Register GetReceiver() const;
- Register GetParameter(int parameter_index) const;
- uint32_t GetRegisterCountOperand(int operand_index) const;
- Register GetRegisterOperand(int operand_index) const;
- std::pair<Register, Register> GetRegisterPairOperand(int operand_index) const;
- RegisterList GetRegisterListOperand(int operand_index) const;
- int GetRegisterOperandRange(int operand_index) const;
- Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
- Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
- uint32_t GetNativeContextIndexOperand(int operand_index) const;
- template <typename LocalIsolate>
- Handle<Object> GetConstantAtIndex(int offset, LocalIsolate* isolate) const;
- bool IsConstantAtIndexSmi(int offset) const;
- Smi GetConstantAtIndexAsSmi(int offset) const;
- template <typename LocalIsolate>
- Handle<Object> GetConstantForIndexOperand(int operand_index,
- LocalIsolate* isolate) const;
-
- // Returns the relative offset of the branch target at the current bytecode.
- // It is an error to call this method if the bytecode is not for a jump or
- // conditional jump. Returns a negative offset for backward jumps.
- int GetRelativeJumpTargetOffset() const;
- // Returns the absolute offset of the branch target at the current bytecode.
- // It is an error to call this method if the bytecode is not for a jump or
- // conditional jump.
- int GetJumpTargetOffset() const;
- // Returns an iterator over the absolute offsets of the targets of the current
- // switch bytecode's jump table. It is an error to call this method if the
- // bytecode is not a switch.
- JumpTableTargetOffsets GetJumpTableTargetOffsets() const;
-
- // Returns the absolute offset of the bytecode at the given relative offset
- // from the current bytecode.
- int GetAbsoluteOffset(int relative_offset) const;
-
- std::ostream& PrintTo(std::ostream& os) const;
-
- static void UpdatePointersCallback(void* accessor) {
- reinterpret_cast<BytecodeArrayAccessor*>(accessor)->UpdatePointers();
- }
-
- void UpdatePointers();
-
- inline bool done() const { return cursor_ >= end_; }
-
- private:
- uint32_t GetUnsignedOperand(int operand_index,
- OperandType operand_type) const;
- int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
-
- inline void UpdateOperandScale() {
- if (done()) return;
- uint8_t current_byte = *cursor_;
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
- operand_scale_ =
- Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
- ++cursor_;
- prefix_size_ = 1;
- } else {
- operand_scale_ = OperandScale::kSingle;
- prefix_size_ = 0;
- }
- }
-
- Handle<BytecodeArray> bytecode_array_;
- uint8_t* start_;
- uint8_t* end_;
- // The cursor always points to the active bytecode. If there's a prefix, the
- // prefix is at (cursor - 1).
- uint8_t* cursor_;
- OperandScale operand_scale_;
- int prefix_size_;
- LocalHeap* const local_heap_;
-};
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index cb1c92e3f84..28716b401bb 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -508,8 +508,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
void InitializeReturnPosition(FunctionLiteral* literal);
void SetStatementPosition(Statement* stmt) {
- if (stmt->position() == kNoSourcePosition) return;
- latest_source_info_.MakeStatementPosition(stmt->position());
+ SetStatementPosition(stmt->position());
+ }
+
+ void SetStatementPosition(int position) {
+ if (position == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(position);
}
void SetExpressionPosition(Expression* expr) {
@@ -526,16 +530,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
}
void SetExpressionAsStatementPosition(Expression* expr) {
- if (expr->position() == kNoSourcePosition) return;
- latest_source_info_.MakeStatementPosition(expr->position());
- }
-
- void SetReturnPosition(int source_position, FunctionLiteral* literal) {
- if (source_position != kNoSourcePosition) {
- latest_source_info_.MakeStatementPosition(source_position);
- } else if (literal->return_position() != kNoSourcePosition) {
- latest_source_info_.MakeStatementPosition(literal->return_position());
- }
+ SetStatementPosition(expr->position());
}
bool RemainderOfBlockIsDead() const {
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index c90ed56f177..2579f5d3782 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -3,7 +3,11 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-iterator.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects/code-inl.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -11,8 +15,355 @@ namespace internal {
namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
- Handle<BytecodeArray> bytecode_array)
- : BytecodeArrayAccessor(bytecode_array, 0) {}
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(bytecode_array),
+ start_(reinterpret_cast<uint8_t*>(
+ bytecode_array_->GetFirstBytecodeAddress())),
+ end_(start_ + bytecode_array_->length()),
+ cursor_(start_ + initial_offset),
+ operand_scale_(OperandScale::kSingle),
+ prefix_size_(0),
+ local_heap_(LocalHeap::Current()
+ ? LocalHeap::Current()
+ : Isolate::Current()->main_thread_local_heap()) {
+ local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this);
+ UpdateOperandScale();
+}
+
+BytecodeArrayIterator::~BytecodeArrayIterator() {
+ local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
+}
+
+void BytecodeArrayIterator::SetOffset(int offset) {
+ if (offset < 0) return;
+ cursor_ = reinterpret_cast<uint8_t*>(
+ bytecode_array()->GetFirstBytecodeAddress() + offset);
+ UpdateOperandScale();
+}
+
+void BytecodeArrayIterator::ApplyDebugBreak() {
+ // Get the raw bytecode from the bytecode array. This may give us a
+ // scaling prefix, which we can patch with the matching debug-break
+ // variant.
+ uint8_t* cursor = cursor_ - prefix_size_;
+ interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(*cursor);
+ if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
+ interpreter::Bytecode debugbreak =
+ interpreter::Bytecodes::GetDebugBreak(bytecode);
+ *cursor = interpreter::Bytecodes::ToByte(debugbreak);
+}
+
+int BytecodeArrayIterator::current_bytecode_size() const {
+ return prefix_size_ + current_bytecode_size_without_prefix();
+}
+
+int BytecodeArrayIterator::current_bytecode_size_without_prefix() const {
+ return Bytecodes::Size(current_bytecode(), current_operand_scale());
+}
+
+uint32_t BytecodeArrayIterator::GetUnsignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ Address operand_start =
+ reinterpret_cast<Address>(cursor_) +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+int32_t BytecodeArrayIterator::GetSignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ Address operand_start =
+ reinterpret_cast<Address>(cursor_) +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kFlag8);
+ return GetUnsignedOperand(operand_index, OperandType::kFlag8);
+}
+
+uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kUImm);
+ return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
+int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kImm);
+ return GetSignedOperand(operand_index, OperandType::kImm);
+}
+
+uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kRegCount);
+ return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kIdx);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
+FeedbackSlot BytecodeArrayIterator::GetSlotOperand(int operand_index) const {
+ int index = GetIndexOperand(operand_index);
+ return FeedbackVector::ToSlot(index);
+}
+
+Register BytecodeArrayIterator::GetReceiver() const {
+ return Register::FromParameterIndex(0, bytecode_array()->parameter_count());
+}
+
+Register BytecodeArrayIterator::GetParameter(int parameter_index) const {
+ DCHECK_GE(parameter_index, 0);
+ // The parameter indices are shifted by 1 (receiver is the
+ // first entry).
+ return Register::FromParameterIndex(parameter_index + 1,
+ bytecode_array()->parameter_count());
+}
+
+Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ Address operand_start =
+ reinterpret_cast<Address>(cursor_) +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+std::pair<Register, Register> BytecodeArrayIterator::GetRegisterPairOperand(
+ int operand_index) const {
+ Register first = GetRegisterOperand(operand_index);
+ Register second(first.index() + 1);
+ return std::make_pair(first, second);
+}
+
+RegisterList BytecodeArrayIterator::GetRegisterListOperand(
+ int operand_index) const {
+ Register first = GetRegisterOperand(operand_index);
+ uint32_t count = GetRegisterCountOperand(operand_index + 1);
+ return RegisterList(first.index(), count);
+}
+
+int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(current_bytecode());
+ OperandType operand_type = operand_types[operand_index];
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ if (operand_type == OperandType::kRegList ||
+ operand_type == OperandType::kRegOutList) {
+ return GetRegisterCountOperand(operand_index + 1);
+ } else {
+ return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+ }
+}
+
+Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kRuntimeId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+uint32_t BytecodeArrayIterator::GetNativeContextIndexOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kNativeContextIndex);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
+Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kIntrinsicId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return IntrinsicsHelper::ToRuntimeId(
+ static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
+}
+
+template <typename LocalIsolate>
+Handle<Object> BytecodeArrayIterator::GetConstantAtIndex(
+ int index, LocalIsolate* isolate) const {
+ return handle(bytecode_array()->constant_pool().get(index), isolate);
+}
+
+bool BytecodeArrayIterator::IsConstantAtIndexSmi(int index) const {
+ return bytecode_array()->constant_pool().get(index).IsSmi();
+}
+
+Smi BytecodeArrayIterator::GetConstantAtIndexAsSmi(int index) const {
+ return Smi::cast(bytecode_array()->constant_pool().get(index));
+}
+
+template <typename LocalIsolate>
+Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
+ int operand_index, LocalIsolate* isolate) const {
+ return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
+}
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
+ int operand_index, Isolate* isolate) const;
+template Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
+ int operand_index, LocalIsolate* isolate) const;
+
+int BytecodeArrayIterator::GetRelativeJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetUnsignedImmediateOperand(0);
+ if (bytecode == Bytecode::kJumpLoop) {
+ relative_offset = -relative_offset;
+ }
+ return relative_offset;
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ Smi smi = GetConstantAtIndexAsSmi(GetIndexOperand(0));
+ return smi.value();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+int BytecodeArrayIterator::GetJumpTargetOffset() const {
+ return GetAbsoluteOffset(GetRelativeJumpTargetOffset());
+}
+
+JumpTableTargetOffsets BytecodeArrayIterator::GetJumpTableTargetOffsets()
+ const {
+ uint32_t table_start, table_size;
+ int32_t case_value_base;
+ if (current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
+ table_start = GetIndexOperand(1);
+ table_size = GetUnsignedImmediateOperand(2);
+ case_value_base = 0;
+ } else {
+ DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+ table_start = GetIndexOperand(0);
+ table_size = GetUnsignedImmediateOperand(1);
+ case_value_base = GetImmediateOperand(2);
+ }
+ return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
+}
+
+int BytecodeArrayIterator::GetAbsoluteOffset(int relative_offset) const {
+ return current_offset() + relative_offset + prefix_size_;
+}
+
+std::ostream& BytecodeArrayIterator::PrintTo(std::ostream& os) const {
+ return BytecodeDecoder::Decode(os, cursor_ - prefix_size_,
+ bytecode_array()->parameter_count());
+}
+
+void BytecodeArrayIterator::UpdatePointers() {
+ DisallowGarbageCollection no_gc;
+ uint8_t* start =
+ reinterpret_cast<uint8_t*>(bytecode_array_->GetFirstBytecodeAddress());
+ if (start != start_) {
+ start_ = start;
+ uint8_t* end = start + bytecode_array_->length();
+ size_t distance_to_end = end_ - cursor_;
+ cursor_ = end - distance_to_end;
+ end_ = end;
+ }
+}
+
+JumpTableTargetOffsets::JumpTableTargetOffsets(
+ const BytecodeArrayIterator* iterator, int table_start, int table_size,
+ int case_value_base)
+ : iterator_(iterator),
+ table_start_(table_start),
+ table_size_(table_size),
+ case_value_base_(case_value_base) {}
+
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::begin() const {
+ return iterator(case_value_base_, table_start_, table_start_ + table_size_,
+ iterator_);
+}
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::end() const {
+ return iterator(case_value_base_ + table_size_, table_start_ + table_size_,
+ table_start_ + table_size_, iterator_);
+}
+int JumpTableTargetOffsets::size() const {
+ int ret = 0;
+ // TODO(leszeks): Is there a more efficient way of doing this than iterating?
+ for (const auto& entry : *this) {
+ USE(entry);
+ ret++;
+ }
+ return ret;
+}
+
+JumpTableTargetOffsets::iterator::iterator(
+ int case_value, int table_offset, int table_end,
+ const BytecodeArrayIterator* iterator)
+ : iterator_(iterator),
+ current_(Smi::zero()),
+ index_(case_value),
+ table_offset_(table_offset),
+ table_end_(table_end) {
+ UpdateAndAdvanceToValid();
+}
+
+JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
+ DCHECK_LT(table_offset_, table_end_);
+ return {index_, iterator_->GetAbsoluteOffset(Smi::ToInt(current_))};
+}
+
+JumpTableTargetOffsets::iterator&
+JumpTableTargetOffsets::iterator::operator++() {
+ DCHECK_LT(table_offset_, table_end_);
+ ++table_offset_;
+ ++index_;
+ UpdateAndAdvanceToValid();
+ return *this;
+}
+
+bool JumpTableTargetOffsets::iterator::operator!=(
+ const JumpTableTargetOffsets::iterator& other) {
+ DCHECK_EQ(iterator_, other.iterator_);
+ DCHECK_EQ(table_end_, other.table_end_);
+ DCHECK_EQ(index_ - other.index_, table_offset_ - other.table_offset_);
+ return index_ != other.index_;
+}
+
+void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
+ while (table_offset_ < table_end_ &&
+ !iterator_->IsConstantAtIndexSmi(table_offset_)) {
+ ++table_offset_;
+ ++index_;
+ }
+
+ // Make sure we haven't reached the end of the table with a hole in current.
+ if (table_offset_ < table_end_) {
+ DCHECK(iterator_->IsConstantAtIndexSmi(table_offset_));
+ current_ = iterator_->GetConstantAtIndexAsSmi(table_offset_);
+ }
+}
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 37fa228236c..d0c676d2a34 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -7,19 +7,178 @@
#include <memory>
-#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/base/optional.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects/objects.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
+
+class BytecodeArray;
+
namespace interpreter {
-class V8_EXPORT_PRIVATE BytecodeArrayIterator final
- : public BytecodeArrayAccessor {
+class BytecodeArrayIterator;
+
+struct V8_EXPORT_PRIVATE JumpTableTargetOffset {
+ int case_value;
+ int target_offset;
+};
+
+class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
+ public:
+ // Minimal iterator implementation for use in ranged-for.
+ class V8_EXPORT_PRIVATE iterator final {
+ public:
+ iterator(int case_value, int table_offset, int table_end,
+ const BytecodeArrayIterator* iterator);
+
+ JumpTableTargetOffset operator*();
+ iterator& operator++();
+ bool operator!=(const iterator& other);
+
+ private:
+ void UpdateAndAdvanceToValid();
+
+ const BytecodeArrayIterator* iterator_;
+ Smi current_;
+ int index_;
+ int table_offset_;
+ int table_end_;
+ };
+
+ JumpTableTargetOffsets(const BytecodeArrayIterator* iterator, int table_start,
+ int table_size, int case_value_base);
+
+ iterator begin() const;
+ iterator end() const;
+
+ int size() const;
+
+ private:
+ const BytecodeArrayIterator* iterator_;
+ int table_start_;
+ int table_size_;
+ int case_value_base_;
+};
+
+class V8_EXPORT_PRIVATE BytecodeArrayIterator {
public:
- explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
+ BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array,
+ int initial_offset = 0);
+ ~BytecodeArrayIterator();
BytecodeArrayIterator(const BytecodeArrayIterator&) = delete;
BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
+
+ inline void Advance() {
+ cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
+ UpdateOperandScale();
+ }
+ void SetOffset(int offset);
+ void Reset() { SetOffset(0); }
+
+ void ApplyDebugBreak();
+
+ inline Bytecode current_bytecode() const {
+ DCHECK(!done());
+ uint8_t current_byte = *cursor_;
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+ return current_bytecode;
+ }
+ int current_bytecode_size() const;
+ int current_bytecode_size_without_prefix() const;
+ int current_offset() const {
+ return static_cast<int>(cursor_ - start_ - prefix_size_);
+ }
+ OperandScale current_operand_scale() const { return operand_scale_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ uint32_t GetFlagOperand(int operand_index) const;
+ uint32_t GetUnsignedImmediateOperand(int operand_index) const;
+ int32_t GetImmediateOperand(int operand_index) const;
+ uint32_t GetIndexOperand(int operand_index) const;
+ FeedbackSlot GetSlotOperand(int operand_index) const;
+ Register GetReceiver() const;
+ Register GetParameter(int parameter_index) const;
+ uint32_t GetRegisterCountOperand(int operand_index) const;
+ Register GetRegisterOperand(int operand_index) const;
+ std::pair<Register, Register> GetRegisterPairOperand(int operand_index) const;
+ RegisterList GetRegisterListOperand(int operand_index) const;
+ int GetRegisterOperandRange(int operand_index) const;
+ Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+ Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ uint32_t GetNativeContextIndexOperand(int operand_index) const;
+ template <typename LocalIsolate>
+ Handle<Object> GetConstantAtIndex(int offset, LocalIsolate* isolate) const;
+ bool IsConstantAtIndexSmi(int offset) const;
+ Smi GetConstantAtIndexAsSmi(int offset) const;
+ template <typename LocalIsolate>
+ Handle<Object> GetConstantForIndexOperand(int operand_index,
+ LocalIsolate* isolate) const;
+
+ // Returns the relative offset of the branch target at the current bytecode.
+ // It is an error to call this method if the bytecode is not for a jump or
+ // conditional jump. Returns a negative offset for backward jumps.
+ int GetRelativeJumpTargetOffset() const;
+ // Returns the absolute offset of the branch target at the current bytecode.
+ // It is an error to call this method if the bytecode is not for a jump or
+ // conditional jump.
+ int GetJumpTargetOffset() const;
+ // Returns an iterator over the absolute offsets of the targets of the current
+ // switch bytecode's jump table. It is an error to call this method if the
+ // bytecode is not a switch.
+ JumpTableTargetOffsets GetJumpTableTargetOffsets() const;
+
+ // Returns the absolute offset of the bytecode at the given relative offset
+ // from the current bytecode.
+ int GetAbsoluteOffset(int relative_offset) const;
+
+ std::ostream& PrintTo(std::ostream& os) const;
+
+ static void UpdatePointersCallback(void* iterator) {
+ reinterpret_cast<BytecodeArrayIterator*>(iterator)->UpdatePointers();
+ }
+
+ void UpdatePointers();
+
+ inline bool done() const { return cursor_ >= end_; }
+
+ private:
+ uint32_t GetUnsignedOperand(int operand_index,
+ OperandType operand_type) const;
+ int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+ inline void UpdateOperandScale() {
+ if (done()) return;
+ uint8_t current_byte = *cursor_;
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+ operand_scale_ =
+ Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+ ++cursor_;
+ prefix_size_ = 1;
+ } else {
+ operand_scale_ = OperandScale::kSingle;
+ prefix_size_ = 0;
+ }
+ }
+
+ Handle<BytecodeArray> bytecode_array_;
+ uint8_t* start_;
+ uint8_t* end_;
+ // The cursor always points to the active bytecode. If there's a prefix, the
+ // prefix is at (cursor - 1).
+ uint8_t* cursor_;
+ OperandScale operand_scale_;
+ int prefix_size_;
+ LocalHeap* const local_heap_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index 4dca16effed..c73a0d2e9e6 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -12,7 +12,7 @@ namespace interpreter {
BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
Handle<BytecodeArray> bytecode_array, Zone* zone)
- : BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+ : BytecodeArrayIterator(bytecode_array, 0), offsets_(zone) {
offsets_.reserve(bytecode_array->length() / 2);
Initialize();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 57eb3573ade..6f0ca2cfdd9 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -16,7 +16,7 @@ namespace internal {
namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
- : public BytecodeArrayAccessor {
+ : public BytecodeArrayIterator {
public:
BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 2d1c3704a39..76686a9d62e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -6,10 +6,12 @@
#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
@@ -122,10 +124,10 @@ class V8_NODISCARD BytecodeGenerator::ControlScope {
void Continue(Statement* stmt) {
PerformCommand(CMD_CONTINUE, stmt, kNoSourcePosition);
}
- void ReturnAccumulator(int source_position = kNoSourcePosition) {
+ void ReturnAccumulator(int source_position) {
PerformCommand(CMD_RETURN, nullptr, source_position);
}
- void AsyncReturnAccumulator(int source_position = kNoSourcePosition) {
+ void AsyncReturnAccumulator(int source_position) {
PerformCommand(CMD_ASYNC_RETURN, nullptr, source_position);
}
@@ -1458,7 +1460,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// end of the function without an explicit return being present on all paths.
if (!builder()->RemainderOfBlockIsDead()) {
builder()->LoadUndefined();
- BuildReturn();
+ BuildReturn(literal->return_position());
}
}
@@ -1772,10 +1774,14 @@ void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
+ int return_position = stmt->end_position();
+ if (return_position == ReturnStatement::kFunctionLiteralReturnPosition) {
+ return_position = info()->literal()->return_position();
+ }
if (stmt->is_async_return()) {
- execution_control()->AsyncReturnAccumulator(stmt->end_position());
+ execution_control()->AsyncReturnAccumulator(return_position);
} else {
- execution_control()->ReturnAccumulator(stmt->end_position());
+ execution_control()->ReturnAccumulator(return_position);
}
}
@@ -3309,7 +3315,7 @@ void BytecodeGenerator::BuildReturn(int source_position) {
if (info()->flags().collect_type_profile()) {
builder()->CollectTypeProfile(info()->literal()->return_position());
}
- builder()->SetReturnPosition(source_position, info()->literal());
+ builder()->SetStatementPosition(source_position);
builder()->Return();
}
@@ -4396,9 +4402,9 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
builder()->Bind(jump_table, JSGeneratorObject::kReturn);
builder()->LoadAccumulatorWithRegister(input);
if (IsAsyncGeneratorFunction(function_kind())) {
- execution_control()->AsyncReturnAccumulator();
+ execution_control()->AsyncReturnAccumulator(kNoSourcePosition);
} else {
- execution_control()->ReturnAccumulator();
+ execution_control()->ReturnAccumulator(kNoSourcePosition);
}
}
@@ -4548,9 +4554,9 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kAsync) {
// Await input.
BuildAwait(expr->position());
- execution_control()->AsyncReturnAccumulator();
+ execution_control()->AsyncReturnAccumulator(kNoSourcePosition);
} else {
- execution_control()->ReturnAccumulator();
+ execution_control()->ReturnAccumulator(kNoSourcePosition);
}
}
@@ -4640,9 +4646,9 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &completion_is_output_value)
.LoadAccumulatorWithRegister(output_value);
if (iterator_type == IteratorType::kAsync) {
- execution_control()->AsyncReturnAccumulator();
+ execution_control()->AsyncReturnAccumulator(kNoSourcePosition);
} else {
- execution_control()->ReturnAccumulator();
+ execution_control()->ReturnAccumulator(kNoSourcePosition);
}
builder()->Bind(&completion_is_output_value);
@@ -5342,8 +5348,37 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
- Register constructor = VisitForRegisterValue(expr->expression());
RegisterList args = register_allocator()->NewGrowableRegisterList();
+
+ // Load the constructor. It's in the first register in args for ease of
+ // calling %reflect_construct if we have a non-final spread. For all other
+ // cases it is popped before emitting the construct below.
+ VisitAndPushIntoRegisterList(expr->expression(), &args);
+
+ // We compile the new differently depending on the presence of spreads and
+ // their positions.
+ //
+ // If there is only one spread and it is the final argument, there is a
+ // special ConstructWithSpread bytecode.
+ //
+ // If there is a non-final spread, we rewrite calls like
+ // new ctor(1, ...x, 2)
+ // to
+ // %reflect_construct(ctor, [1, ...x, 2])
+ const CallNew::SpreadPosition spread_position = expr->spread_position();
+
+ if (spread_position == CallNew::kHasNonFinalSpread) {
+ BuildCreateArrayLiteral(expr->arguments(), nullptr);
+ builder()->SetExpressionPosition(expr);
+ builder()
+ ->StoreAccumulatorInRegister(
+ register_allocator()->GrowRegisterList(&args))
+ .CallJSRuntime(Context::REFLECT_CONSTRUCT_INDEX, args);
+ return;
+ }
+
+ Register constructor = args.first_register();
+ args = args.PopLeft();
VisitArguments(expr->arguments(), &args);
// The accumulator holds new target which is the same as the
@@ -5352,9 +5387,10 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
builder()->LoadAccumulatorWithRegister(constructor);
int feedback_slot_index = feedback_index(feedback_spec()->AddCallICSlot());
- if (expr->only_last_arg_is_spread()) {
+ if (spread_position == CallNew::kHasFinalSpread) {
builder()->ConstructWithSpread(constructor, args, feedback_slot_index);
} else {
+ DCHECK_EQ(spread_position, CallNew::kNoSpread);
builder()->Construct(constructor, args, feedback_slot_index);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 3abda9e387e..69d5bf89576 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -252,8 +252,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
void BuildLiteralCompareNil(Token::Value compare_op,
BytecodeArrayBuilder::NilValue nil);
- void BuildReturn(int source_position = kNoSourcePosition);
- void BuildAsyncReturn(int source_position = kNoSourcePosition);
+ void BuildReturn(int source_position);
+ void BuildAsyncReturn(int source_position);
void BuildAsyncGeneratorReturn();
void BuildReThrow();
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 604ebe56f5a..270b3a4a3db 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -157,7 +157,7 @@ class RegisterList {
friend class BytecodeDecoder;
friend class InterpreterTester;
friend class BytecodeUtils;
- friend class BytecodeArrayAccessor;
+ friend class BytecodeArrayIterator;
RegisterList(int first_reg_index, int register_count)
: first_reg_index_(first_reg_index), register_count_(register_count) {}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 4ff55795970..df5b525877d 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1330,6 +1330,34 @@ void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
BIND(&ok);
}
+void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
+ TNode<IntPtrT> relative_jump) {
+ TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
+ TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
+ TNode<Object> sfi_data =
+ LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
+ TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
+
+ Label baseline(this);
+ GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
+ {
+ Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
+ CallStub(callable, context);
+ JumpBackward(relative_jump);
+ }
+
+ BIND(&baseline);
+ {
+ Callable callable =
+ CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
+ // We already compiled the baseline code, so we don't need to handle failed
+ // compilation as in the Ignition -> Turbofan case. Therefore we can just
+ // tailcall to the OSR builtin.
+ SaveBytecodeOffset();
+ TailCallStub(callable, context);
+ }
+}
+
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index e2fc572f184..019fd40f3bf 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -244,6 +244,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch to frame dropper trampoline if necessary.
void MaybeDropFrames(TNode<Context> context);
+ // Perform OnStackReplacement.
+ void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
+
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index c7993316ab8..cb01348a351 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -2203,11 +2203,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
JumpBackward(relative_jump);
BIND(&osr_armed);
- {
- Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
- CallStub(callable, context);
- JumpBackward(relative_jump);
- }
+ OnStackReplacement(context, relative_jump);
}
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index a24bbca7064..ddce0f0e4eb 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -186,7 +186,10 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
// Print AST if flag is enabled. Note, if compiling on a background thread
// then ASTs from different functions may be intersperse when printed.
- MaybePrintAst(parse_info(), compilation_info());
+ {
+ DisallowGarbageCollection no_heap_access;
+ MaybePrintAst(parse_info(), compilation_info());
+ }
base::Optional<ParkedScope> parked_scope;
if (local_isolate_) parked_scope.emplace(local_isolate_);
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 668cd79824d..a85d2af94bf 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -401,7 +401,7 @@ Handle<Map> ParentOfDescriptorOwner(Isolate* isolate, Handle<Map> maybe_root,
template <typename Char>
Handle<Object> JsonParser<Char>::BuildJsonObject(
const JsonContinuation& cont,
- const std::vector<JsonProperty>& property_stack, Handle<Map> feedback) {
+ const SmallVector<JsonProperty>& property_stack, Handle<Map> feedback) {
size_t start = cont.index;
int length = static_cast<int>(property_stack.size() - start);
int named_length = length - cont.elements;
@@ -464,8 +464,8 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
InternalIndex descriptor_index(descriptor);
if (descriptor < feedback_descriptors) {
expected =
- handle(String::cast(feedback->instance_descriptors(kRelaxedLoad)
- .GetKey(descriptor_index)),
+ handle(String::cast(feedback->instance_descriptors(isolate_).GetKey(
+ descriptor_index)),
isolate_);
} else {
DisallowGarbageCollection no_gc;
@@ -497,7 +497,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Object> value = property.value;
PropertyDetails details =
- target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
+ target->instance_descriptors(isolate_).GetDetails(descriptor_index);
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
@@ -512,7 +512,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Map::GeneralizeField(isolate(), target, descriptor_index,
details.constness(), representation, value_type);
} else if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors(kRelaxedLoad)
+ !target->instance_descriptors(isolate())
.GetFieldType(descriptor_index)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -524,7 +524,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
new_mutable_double++;
}
- DCHECK(target->instance_descriptors(kRelaxedLoad)
+ DCHECK(target->instance_descriptors(isolate())
.GetFieldType(descriptor_index)
.NowContains(value));
map = target;
@@ -574,7 +574,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (property.string.is_index()) continue;
InternalIndex descriptor_index(descriptor);
PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
+ map->instance_descriptors(isolate()).GetDetails(descriptor_index);
Object value = *property.value;
FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index);
descriptor++;
@@ -645,7 +645,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
template <typename Char>
Handle<Object> JsonParser<Char>::BuildJsonArray(
const JsonContinuation& cont,
- const std::vector<Handle<Object>>& element_stack) {
+ const SmallVector<Handle<Object>>& element_stack) {
size_t start = cont.index;
int length = static_cast<int>(element_stack.size() - start);
@@ -686,12 +686,10 @@ Handle<Object> JsonParser<Char>::BuildJsonArray(
template <typename Char>
MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
std::vector<JsonContinuation> cont_stack;
- std::vector<JsonProperty> property_stack;
- std::vector<Handle<Object>> element_stack;
+ SmallVector<JsonProperty> property_stack;
+ SmallVector<Handle<Object>> element_stack;
cont_stack.reserve(16);
- property_stack.reserve(16);
- element_stack.reserve(16);
JsonContinuation cont(isolate_, JsonContinuation::kReturn, 0);
@@ -833,7 +831,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
}
}
value = BuildJsonObject(cont, property_stack, feedback);
- property_stack.resize(cont.index);
+ property_stack.resize_no_init(cont.index);
Expect(JsonToken::RBRACE);
// Return the object.
@@ -852,7 +850,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
if (V8_LIKELY(Check(JsonToken::COMMA))) break;
value = BuildJsonArray(cont, element_stack);
- element_stack.resize(cont.index);
+ element_stack.resize_no_init(cont.index);
Expect(JsonToken::RBRACK);
// Return the array.
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 22aa530a95c..f6b9fb656dd 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -5,6 +5,7 @@
#ifndef V8_JSON_JSON_PARSER_H_
#define V8_JSON_JSON_PARSER_H_
+#include "src/base/small-vector.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/objects.h"
@@ -155,6 +156,8 @@ class JsonParser final {
static constexpr uc32 kInvalidUnicodeCharacter = static_cast<uc32>(-1);
private:
+ template <typename T>
+ using SmallVector = base::SmallVector<T, 16>;
struct JsonContinuation {
enum Type : uint8_t { kReturn, kObjectProperty, kArrayElement };
JsonContinuation(Isolate* isolate, Type type, size_t index)
@@ -287,10 +290,10 @@ class JsonParser final {
Handle<Object> BuildJsonObject(
const JsonContinuation& cont,
- const std::vector<JsonProperty>& property_stack, Handle<Map> feedback);
+ const SmallVector<JsonProperty>& property_stack, Handle<Map> feedback);
Handle<Object> BuildJsonArray(
const JsonContinuation& cont,
- const std::vector<Handle<Object>>& element_stack);
+ const SmallVector<Handle<Object>>& element_stack);
// Mark that a parsing error has happened at the current character.
void ReportUnexpectedCharacter(uc32 c);
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index a7ff0809d00..f5391fc882b 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -772,13 +772,13 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
Indent();
bool comma = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> name(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ Handle<Name> name(map->instance_descriptors(isolate_).GetKey(i),
isolate_);
// TODO(rossberg): Should this throw?
if (!name->IsString()) continue;
Handle<String> key = Handle<String>::cast(name);
PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
+ map->instance_descriptors(isolate_).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.location() == kField && *map == object->map()) {
diff --git a/deps/v8/src/libplatform/tracing/OWNERS b/deps/v8/src/libplatform/tracing/OWNERS
index 507f904088e..7c8128c2f2e 100644
--- a/deps/v8/src/libplatform/tracing/OWNERS
+++ b/deps/v8/src/libplatform/tracing/OWNERS
@@ -1 +1 @@
-petermarshall@chromium.org
+cbruni@chromium.org
diff --git a/deps/v8/src/libplatform/tracing/recorder-win.cc b/deps/v8/src/libplatform/tracing/recorder-win.cc
index b15704c0505..195bddd4295 100644
--- a/deps/v8/src/libplatform/tracing/recorder-win.cc
+++ b/deps/v8/src/libplatform/tracing/recorder-win.cc
@@ -4,7 +4,7 @@
#ifndef V8_LIBPLATFORM_TRACING_RECORDER_WIN_H_
#define V8_LIBPLATFORM_TRACING_RECORDER_WIN_H_
-#include <windows.h>
+#include <Windows.h>
#include <TraceLoggingProvider.h>
#include "src/libplatform/tracing/recorder.h"
@@ -13,33 +13,28 @@
#pragma clang diagnostic ignored "-Wc++98-compat-extra-semi"
#endif
-#ifndef V8_ETW_GUID
-#define V8_ETW_GUID \
- 0x57277741, 0x3638, 0x4A4B, 0xBD, 0xBA, 0x0A, 0xC6, 0xE4, 0x5D, 0xA5, 0x6C
-#endif
-
namespace v8 {
namespace platform {
namespace tracing {
-TRACELOGGING_DECLARE_PROVIDER(g_v8Provider);
+TRACELOGGING_DECLARE_PROVIDER(g_v8LibProvider);
-TRACELOGGING_DEFINE_PROVIDER(g_v8Provider, "V8.js", (V8_ETW_GUID));
+TRACELOGGING_DEFINE_PROVIDER(g_v8LibProvider, "V8.js", (V8_ETW_GUID));
-Recorder::Recorder() { TraceLoggingRegister(g_v8Provider); }
+Recorder::Recorder() { TraceLoggingRegister(g_v8LibProvider); }
Recorder::~Recorder() {
- if (g_v8Provider) {
- TraceLoggingUnregister(g_v8Provider);
+ if (g_v8LibProvider) {
+ TraceLoggingUnregister(g_v8LibProvider);
}
}
bool Recorder::IsEnabled() {
- return TraceLoggingProviderEnabled(g_v8Provider, 0, 0);
+ return TraceLoggingProviderEnabled(g_v8LibProvider, 0, 0);
}
bool Recorder::IsEnabled(const uint8_t level) {
- return TraceLoggingProviderEnabled(g_v8Provider, level, 0);
+ return TraceLoggingProviderEnabled(g_v8LibProvider, level, 0);
}
void Recorder::AddEvent(TraceObject* trace_event) {
@@ -54,7 +49,7 @@ void Recorder::AddEvent(TraceObject* trace_event) {
trace_event->category_enabled_flag()),
-1, wCategoryGroupName, 4096);
- TraceLoggingWrite(g_v8Provider, "", TraceLoggingValue(wName, "Event Name"),
+ TraceLoggingWrite(g_v8LibProvider, "", TraceLoggingValue(wName, "Event Name"),
TraceLoggingValue(trace_event->pid(), "pid"),
TraceLoggingValue(trace_event->tid(), "tid"),
TraceLoggingValue(trace_event->ts(), "ts"),
diff --git a/deps/v8/src/libplatform/tracing/recorder.h b/deps/v8/src/libplatform/tracing/recorder.h
index 4e14a038e86..31cc75f9bd8 100644
--- a/deps/v8/src/libplatform/tracing/recorder.h
+++ b/deps/v8/src/libplatform/tracing/recorder.h
@@ -9,6 +9,13 @@
#include "include/libplatform/v8-tracing.h"
+#if V8_OS_WIN
+#ifndef V8_ETW_GUID
+#define V8_ETW_GUID \
+ 0x57277741, 0x3638, 0x4A4B, 0xBD, 0xBA, 0x0A, 0xC6, 0xE4, 0x5D, 0xA5, 0x6C
+#endif
+#endif
+
namespace v8 {
namespace platform {
namespace tracing {
@@ -19,13 +26,13 @@ namespace tracing {
// the --enable-system-instrumentation command line flag. When enabled, it is
// called from within SystemInstrumentationTraceWriter and replaces the
// JSONTraceWriter for event-tracing.
-class Recorder {
+class V8_PLATFORM_EXPORT Recorder {
public:
Recorder();
~Recorder();
- bool IsEnabled();
- bool IsEnabled(const uint8_t level);
+ static bool IsEnabled();
+ static bool IsEnabled(const uint8_t level);
void AddEvent(TraceObject* trace_event);
};
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
index 6afd4d0fee4..7c8128c2f2e 100644
--- a/deps/v8/src/libsampler/OWNERS
+++ b/deps/v8/src/libsampler/OWNERS
@@ -1,2 +1 @@
-alph@chromium.org
-petermarshall@chromium.org
+cbruni@chromium.org
diff --git a/deps/v8/src/logging/code-events.h b/deps/v8/src/logging/code-events.h
index c6ea66edb8b..c009ba0b15d 100644
--- a/deps/v8/src/logging/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -37,24 +37,22 @@ using WasmName = Vector<const char>;
V(CODE_MOVING_GC, code-moving-gc) \
V(SHARED_FUNC_MOVE_EVENT, sfi-move) \
V(SNAPSHOT_CODE_NAME_EVENT, snapshot-code-name) \
- V(TICK_EVENT, tick) \
- V(BYTECODE_FLUSH_EVENT, bytecode-flush)
+ V(TICK_EVENT, tick)
// clang-format on
-#define TAGS_LIST(V) \
- V(BUILTIN_TAG, Builtin) \
- V(CALLBACK_TAG, Callback) \
- V(EVAL_TAG, Eval) \
- V(FUNCTION_TAG, Function) \
- V(INTERPRETED_FUNCTION_TAG, InterpretedFunction) \
- V(HANDLER_TAG, Handler) \
- V(BYTECODE_HANDLER_TAG, BytecodeHandler) \
- V(LAZY_COMPILE_TAG, LazyCompile) \
- V(REG_EXP_TAG, RegExp) \
- V(SCRIPT_TAG, Script) \
- V(STUB_TAG, Stub) \
- V(NATIVE_FUNCTION_TAG, Function) \
- V(NATIVE_LAZY_COMPILE_TAG, LazyCompile) \
+#define TAGS_LIST(V) \
+ V(BUILTIN_TAG, Builtin) \
+ V(CALLBACK_TAG, Callback) \
+ V(EVAL_TAG, Eval) \
+ V(FUNCTION_TAG, Function) \
+ V(HANDLER_TAG, Handler) \
+ V(BYTECODE_HANDLER_TAG, BytecodeHandler) \
+ V(LAZY_COMPILE_TAG, LazyCompile) \
+ V(REG_EXP_TAG, RegExp) \
+ V(SCRIPT_TAG, Script) \
+ V(STUB_TAG, Stub) \
+ V(NATIVE_FUNCTION_TAG, Function) \
+ V(NATIVE_LAZY_COMPILE_TAG, LazyCompile) \
V(NATIVE_SCRIPT_TAG, Script)
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
@@ -86,9 +84,11 @@ class CodeEventListener {
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line,
int column) = 0;
+#if V8_ENABLE_WEBASSEMBLY
virtual void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) = 0;
+#endif // V8_ENABLE_WEBASSEMBLY
virtual void CallbackEvent(Handle<Name> name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Handle<Name> name, Address entry_point) = 0;
@@ -109,8 +109,9 @@ class CodeEventListener {
virtual void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> shared,
const char* reason) = 0;
- // Invoked during GC. No allocation allowed.
- virtual void BytecodeFlushEvent(Address compiled_data_start) = 0;
+ // Called during GC shortly after any weak references to code objects are
+ // cleared.
+ virtual void WeakCodeClearEvent() = 0;
virtual bool is_listening_to_code_events() { return false; }
};
@@ -175,6 +176,7 @@ class CodeEventDispatcher : public CodeEventListener {
listener->CodeCreateEvent(tag, code, shared, source, line, column);
});
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override {
@@ -183,6 +185,7 @@ class CodeEventDispatcher : public CodeEventListener {
script_id);
});
}
+#endif // V8_ENABLE_WEBASSEMBLY
void CallbackEvent(Handle<Name> name, Address entry_point) override {
DispatchEventToListeners([=](CodeEventListener* listener) {
listener->CallbackEvent(name, entry_point);
@@ -237,10 +240,9 @@ class CodeEventDispatcher : public CodeEventListener {
listener->CodeDependencyChangeEvent(code, sfi, reason);
});
}
- void BytecodeFlushEvent(Address compiled_data_start) override {
- DispatchEventToListeners([=](CodeEventListener* listener) {
- listener->BytecodeFlushEvent(compiled_data_start);
- });
+ void WeakCodeClearEvent() override {
+ DispatchEventToListeners(
+ [=](CodeEventListener* listener) { listener->WeakCodeClearEvent(); });
}
private:
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 5a3298a772d..ffa9647719d 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -100,8 +100,6 @@ namespace internal {
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
/* Garbage collection timers. */ \
- HT(gc_context, V8.GCContext, 10000, \
- MILLISECOND) /* GC context cleanup time */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 6f0076b1806..89cda727542 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -903,6 +903,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
@@ -914,17 +915,15 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
@@ -952,7 +951,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmFullOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
\
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
@@ -971,7 +971,6 @@ class RuntimeCallTimer final {
V(CompileBackgroundCompileTask) \
V(CompileBaseline) \
V(CompileBaselineVisit) \
- V(CompileBaselinePrepareHandlerOffsets) \
V(CompileBaselinePreVisit) \
V(CompileCollectSourcePositions) \
V(CompileDeserialize) \
diff --git a/deps/v8/src/logging/log-utils.h b/deps/v8/src/logging/log-utils.h
index 159ce9150ee..bba1186e4f1 100644
--- a/deps/v8/src/logging/log-utils.h
+++ b/deps/v8/src/logging/log-utils.h
@@ -14,6 +14,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
+#include "src/common/assert-scope.h"
#include "src/flags/flags.h"
#include "src/utils/allocation.h"
#include "src/utils/ostreams.h"
@@ -93,7 +94,7 @@ class Log {
void AppendRawCharacter(const char character);
Log* log_;
- base::MutexGuard lock_guard_;
+ NoGarbageCollectionMutexGuard lock_guard_;
friend class Log;
};
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 7738cab831c..110152c56bf 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -43,8 +43,12 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/utils/memcopy.h"
#include "src/utils/version.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -81,13 +85,23 @@ static v8::CodeEventType GetCodeEventTypeForTag(
}
static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
+ CodeKind kind = code.kind();
+ // We record interpreter trampoline builting copies as having the
+ // "interpreted" marker.
+ if (FLAG_interpreted_frames_native_stack && kind == CodeKind::BUILTIN &&
+ code.GetCode().is_interpreter_trampoline_builtin() &&
+ code.GetCode() !=
+ *BUILTIN_CODE(shared.GetIsolate(), InterpreterEntryTrampoline)) {
+ kind = CodeKind::INTERPRETED_FUNCTION;
+ }
if (shared.optimization_disabled() &&
- code.kind() == CodeKind::INTERPRETED_FUNCTION) {
+ kind == CodeKind::INTERPRETED_FUNCTION) {
return "";
}
- return CodeKindToMarker(code.kind());
+ return CodeKindToMarker(kind);
}
+#if V8_ENABLE_WEBASSEMBLY
static const char* ComputeMarker(const wasm::WasmCode* code) {
switch (code->kind()) {
case wasm::WasmCode::kFunction:
@@ -96,6 +110,7 @@ static const char* ComputeMarker(const wasm::WasmCode* code) {
return "";
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
class CodeEventLogger::NameBuffer {
public:
@@ -237,6 +252,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
const wasm::WasmCode* code,
wasm::WasmName name,
@@ -255,6 +271,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendBytes(ExecutionTierToString(code->tier()));
LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size());
}
+#endif // V8_ENABLE_WEBASSEMBLY
void CodeEventLogger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) {
@@ -279,8 +296,10 @@ class PerfBasicLogger : public CodeEventLogger {
void LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
+#endif // V8_ENABLE_WEBASSEMBLY
void WriteLogRecordedBuffer(uintptr_t address, int size, const char* name,
int name_length);
@@ -339,11 +358,13 @@ void PerfBasicLogger::LogRecordedBuffer(Handle<AbstractCode> code,
code->InstructionSize(), name, length);
}
+#if V8_ENABLE_WEBASSEMBLY
void PerfBasicLogger::LogRecordedBuffer(const wasm::WasmCode* code,
const char* name, int length) {
WriteLogRecordedBuffer(static_cast<uintptr_t>(code->instruction_start()),
code->instructions().length(), name, length);
}
+#endif // V8_ENABLE_WEBASSEMBLY
#endif // V8_OS_LINUX
// External CodeEventListener
@@ -465,11 +486,13 @@ void ExternalCodeEventListener::CodeCreateEvent(
code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
}
+#if V8_ENABLE_WEBASSEMBLY
void ExternalCodeEventListener::CodeCreateEvent(
LogEventsAndTags tag, const wasm::WasmCode* code, wasm::WasmName name,
const char* source_url, int code_offset, int script_id) {
// TODO(mmarchini): handle later
}
+#endif // V8_ENABLE_WEBASSEMBLY
void ExternalCodeEventListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) {
@@ -520,8 +543,10 @@ class LowLevelLogger : public CodeEventLogger {
void LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
+#endif // V8_ENABLE_WEBASSEMBLY
// Low-level profiling event structures.
struct CodeCreateStruct {
@@ -616,6 +641,7 @@ void LowLevelLogger::LogRecordedBuffer(Handle<AbstractCode> code,
code->InstructionSize());
}
+#if V8_ENABLE_WEBASSEMBLY
void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
const char* name, int length) {
CodeCreateStruct event;
@@ -627,6 +653,7 @@ void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
LogWriteBytes(reinterpret_cast<const char*>(code->instruction_start()),
code->instructions().length());
}
+#endif // V8_ENABLE_WEBASSEMBLY
void LowLevelLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
CodeMoveStruct event;
@@ -665,8 +692,10 @@ class JitLogger : public CodeEventLogger {
void LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
+#endif // V8_ENABLE_WEBASSEMBLY
JitCodeEventHandler code_event_handler_;
base::Mutex logger_mutex_;
@@ -697,6 +726,7 @@ void JitLogger::LogRecordedBuffer(Handle<AbstractCode> code,
code_event_handler_(&event);
}
+#if V8_ENABLE_WEBASSEMBLY
void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event;
@@ -746,6 +776,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
}
code_event_handler_(&event);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void JitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
base::MutexGuard guard(&logger_mutex_);
@@ -1225,7 +1256,7 @@ void Logger::LogSourceCodeInformation(Handle<AbstractCode> code,
// iteration.
bool hasInlined = false;
if (code->kind() != CodeKind::BASELINE) {
- SourcePositionTableIterator iterator(code->source_position_table());
+ SourcePositionTableIterator iterator(code->SourcePositionTable(*shared));
for (; !iterator.done(); iterator.Advance()) {
SourcePosition pos = iterator.source_position();
msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
@@ -1364,6 +1395,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
LogCodeDisassemble(code);
}
+#if V8_ENABLE_WEBASSEMBLY
void Logger::CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* /*source_url*/,
int /*code_offset*/, int /*script_id*/) {
@@ -1386,6 +1418,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
msg << kNext << tag_ptr << kNext << ComputeMarker(code);
msg.WriteToLogFile();
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Logger::CallbackEventInternal(const char* prefix, Handle<Name> name,
Address entry_point) {
@@ -1730,11 +1763,13 @@ void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
Handle<Object> key, char old_state, char new_state,
const char* modifier, const char* slow_stub_reason) {
if (!FLAG_log_ic) return;
- MSG_BUILDER();
- if (keyed) msg << "Keyed";
int line;
int column;
+ // GetAbstractPC must come before MSG_BUILDER(), as it can GC, which might
+ // attempt to get the log lock again and result in a deadlock.
Address pc = isolate_->GetAbstractPC(&line, &column);
+ MSG_BUILDER();
+ if (keyed) msg << "Keyed";
msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << Time()
<< kNext << line << kNext << column << kNext << old_state << kNext
<< new_state << kNext
@@ -1860,22 +1895,6 @@ EnumerateCompiledFunctions(Heap* heap) {
return compiled_funcs;
}
-static std::vector<Handle<WasmModuleObject>> EnumerateWasmModuleObjects(
- Heap* heap) {
- HeapObjectIterator iterator(heap);
- DisallowGarbageCollection no_gc;
- std::vector<Handle<WasmModuleObject>> module_objects;
-
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- if (obj.IsWasmModuleObject()) {
- WasmModuleObject module = WasmModuleObject::cast(obj);
- module_objects.emplace_back(module, Isolate::FromHeap(heap));
- }
- }
- return module_objects;
-}
-
void Logger::LogCodeObjects() { existing_code_logger_.LogCodeObjects(); }
void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
@@ -2031,9 +2050,11 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
if (event_handler) {
+#if V8_ENABLE_WEBASSEMBLY
if (isolate_->wasm_engine() != nullptr) {
isolate_->wasm_engine()->EnableCodeLogging(isolate_);
}
+#endif // V8_ENABLE_WEBASSEMBLY
jit_logger_ = std::make_unique<JitLogger>(isolate_, event_handler);
AddCodeEventListener(jit_logger_.get());
if (options & kJitCodeEventEnumExisting) {
@@ -2183,29 +2204,32 @@ void ExistingCodeLogger::LogCompiledFunctions() {
LogExistingFunction(
shared,
Handle<AbstractCode>(
- AbstractCode::cast(shared->InterpreterTrampoline()), isolate_),
- CodeEventListener::INTERPRETED_FUNCTION_TAG);
+ AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
}
if (shared->HasBaselineData()) {
- // TODO(v8:11429): Add a tag for baseline code. Or use CodeKind?
LogExistingFunction(
shared,
Handle<AbstractCode>(
AbstractCode::cast(shared->baseline_data().baseline_code()),
- isolate_),
- CodeEventListener::INTERPRETED_FUNCTION_TAG);
+ isolate_));
}
if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
LogExistingFunction(pair.first, pair.second);
}
- const std::vector<Handle<WasmModuleObject>> wasm_module_objects =
- EnumerateWasmModuleObjects(heap);
- for (auto& module_object : wasm_module_objects) {
- module_object->native_module()->LogWasmCodes(isolate_,
- module_object->script());
+#if V8_ENABLE_WEBASSEMBLY
+ HeapObjectIterator iterator(heap);
+ DisallowGarbageCollection no_gc;
+
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ if (!obj.IsWasmModuleObject()) continue;
+ auto module_object = WasmModuleObject::cast(obj);
+ module_object.native_module()->LogWasmCodes(isolate_,
+ module_object.script());
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
void ExistingCodeLogger::LogExistingFunction(
@@ -2218,7 +2242,7 @@ void ExistingCodeLogger::LogExistingFunction(
Script::GetColumnNumber(script, shared->StartPosition()) + 1;
if (script->name().IsString()) {
Handle<String> script_name(String::cast(script->name()), isolate_);
- if (line_num > 0) {
+ if (!shared->is_toplevel()) {
CALL_CODE_EVENT_HANDLER(
CodeCreateEvent(Logger::ToNativeByScript(tag, *script), code,
shared, script_name, line_num, column_num))
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 6951a290549..e52f9f28338 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -91,7 +91,7 @@ class ExistingCodeLogger {
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag =
- CodeEventListener::LAZY_COMPILE_TAG);
+ CodeEventListener::FUNCTION_TAG);
void LogCodeObject(Object object);
private:
@@ -195,9 +195,11 @@ class Logger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line, int column) override;
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override;
+#endif // V8_ENABLE_WEBASSEMBLY
void CallbackEvent(Handle<Name> name, Address entry_point) override;
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override;
@@ -214,7 +216,7 @@ class Logger : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override;
- void BytecodeFlushEvent(Address compiled_data_start) override {}
+ void WeakCodeClearEvent() override {}
void ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
const char* kind, const char* reason);
@@ -396,9 +398,11 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line, int column) override;
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override;
+#endif // V8_ENABLE_WEBASSEMBLY
void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) override;
@@ -412,7 +416,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
- void BytecodeFlushEvent(Address compiled_data_start) override {}
+ void WeakCodeClearEvent() override {}
protected:
Isolate* isolate_;
@@ -423,8 +427,10 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
virtual void LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) = 0;
+#if V8_ENABLE_WEBASSEMBLY
virtual void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) = 0;
+#endif // V8_ENABLE_WEBASSEMBLY
std::unique_ptr<NameBuffer> name_buffer_;
};
@@ -457,9 +463,11 @@ class ExternalCodeEventListener : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared, Handle<Name> source,
int line, int column) override;
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override;
+#endif // V8_ENABLE_WEBASSEMBLY
void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) override;
@@ -476,7 +484,7 @@ class ExternalCodeEventListener : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
- void BytecodeFlushEvent(Address compiled_data_start) override {}
+ void WeakCodeClearEvent() override {}
void StartListening(v8::CodeEventHandler* code_event_handler);
void StopListening();
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index c4022e3ada8..d2e7aea5e22 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -1,5 +1,4 @@
clemensb@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-sigurds@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 689dedd103d..08288ef62c0 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -11,10 +11,13 @@
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
#include "src/trap-handler/trap-handler.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#define TRACE_BS(...) \
do { \
@@ -25,6 +28,15 @@ namespace v8 {
namespace internal {
namespace {
+
+#if V8_ENABLE_WEBASSEMBLY
+// Trying to allocate 4 GiB on a 32-bit platform is guaranteed to fail.
+// We don't lower the official max_mem_pages() limit because that would be
+// observable upon instantiation; this way the effective limit on 32-bit
+// platforms is defined by the allocator.
+constexpr size_t kPlatformMaxPages =
+ std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
+
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
@@ -98,6 +110,7 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status));
}
+#endif // V8_ENABLE_WEBASSEMBLY
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
@@ -119,25 +132,6 @@ inline void DebugCheckZero(void* start, size_t byte_length) {
}
} // namespace
-bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
- uint64_t reservation_limit = kAddressSpaceLimit;
- uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
- while (true) {
- if (old_count > reservation_limit) return false;
- if (reservation_limit - old_count < num_bytes) return false;
- if (reserved_address_space_.compare_exchange_weak(
- old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
- return true;
- }
- }
-}
-
-void BackingStore::ReleaseReservation(uint64_t num_bytes) {
- uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
- USE(old_reserved);
- DCHECK_LE(num_bytes, old_reserved);
-}
-
// The backing store for a Wasm shared memory remembers all the isolates
// with which it has been shared.
struct SharedWasmMemoryData {
@@ -164,6 +158,7 @@ BackingStore::~BackingStore() {
return;
}
+#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
@@ -192,6 +187,8 @@ BackingStore::~BackingStore() {
Clear();
return;
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
if (custom_deleter_) {
DCHECK(free_on_destruct_);
TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
@@ -271,13 +268,6 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
return std::unique_ptr<BackingStore>(result);
}
-// Trying to allocate 4 GiB on a 32-bit platform is guaranteed to fail.
-// We don't lower the official max_mem_pages() limit because that would be
-// observable upon instantiation; this way the effective limit on 32-bit
-// platforms is defined by the allocator.
-constexpr size_t kPlatformMaxPages =
- std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
-
void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
if (auto allocator_shared = isolate->array_buffer_allocator_shared()) {
holds_shared_ptr_to_allocator_ = true;
@@ -290,6 +280,26 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
+bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
+ uint64_t reservation_limit = kAddressSpaceLimit;
+ uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
+ while (true) {
+ if (old_count > reservation_limit) return false;
+ if (reservation_limit - old_count < num_bytes) return false;
+ if (reserved_address_space_.compare_exchange_weak(
+ old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
+ return true;
+ }
+ }
+}
+
+void BackingStore::ReleaseReservation(uint64_t num_bytes) {
+ uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
+ USE(old_reserved);
+ DCHECK_LE(num_bytes, old_reserved);
+}
+
// Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
@@ -571,6 +581,7 @@ void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate);
}
+#endif // V8_ENABLE_WEBASSEMBLY
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
@@ -674,17 +685,8 @@ inline GlobalBackingStoreRegistryImpl* impl() {
void GlobalBackingStoreRegistry::Register(
std::shared_ptr<BackingStore> backing_store) {
if (!backing_store || !backing_store->buffer_start()) return;
-
- if (!backing_store->free_on_destruct()) {
- // If the backing store buffer is managed by the embedder,
- // then we don't have to guarantee that there is single unique
- // BackingStore per buffer_start() because the destructor of
- // of the BackingStore will be a no-op in that case.
-
- // All Wasm memory has to be registered.
- CHECK(!backing_store->is_wasm_memory());
- return;
- }
+ // Only wasm memory backing stores need to be registered globally.
+ CHECK(backing_store->is_wasm_memory());
base::MutexGuard scope_lock(&impl()->mutex_);
if (backing_store->globally_registered_) return;
@@ -700,6 +702,8 @@ void GlobalBackingStoreRegistry::Register(
void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
if (!backing_store->globally_registered_) return;
+ CHECK(backing_store->is_wasm_memory());
+
DCHECK_NOT_NULL(backing_store->buffer_start());
base::MutexGuard scope_lock(&impl()->mutex_);
@@ -711,26 +715,6 @@ void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
backing_store->globally_registered_ = false;
}
-std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup(
- void* buffer_start, size_t length) {
- base::MutexGuard scope_lock(&impl()->mutex_);
- TRACE_BS("BS:lookup mem=%p (%zu bytes)\n", buffer_start, length);
- const auto& result = impl()->map_.find(buffer_start);
- if (result == impl()->map_.end()) {
- return std::shared_ptr<BackingStore>();
- }
- auto backing_store = result->second.lock();
- CHECK_EQ(buffer_start, backing_store->buffer_start());
- if (backing_store->is_wasm_memory()) {
- // Grow calls to shared WebAssembly threads can be triggered from different
- // workers, length equality cannot be guaranteed here.
- CHECK_LE(length, backing_store->byte_length());
- } else {
- CHECK_EQ(length, backing_store->byte_length());
- }
- return backing_store;
-}
-
void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
// We need to keep a reference to all backing stores that are inspected
// in the purging loop below. Otherwise, we might get a deadlock
@@ -744,7 +728,7 @@ void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
auto backing_store = entry.second.lock();
prevent_destruction_under_lock.emplace_back(backing_store);
if (!backing_store) continue; // skip entries where weak ptr is null
- if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory
+ CHECK(backing_store->is_wasm_memory());
if (!backing_store->is_shared()) continue; // skip non-shared memory
SharedWasmMemoryData* shared_data =
backing_store->get_shared_wasm_memory_data();
@@ -756,6 +740,7 @@ void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
Isolate* isolate, BackingStore* backing_store,
Handle<WasmMemoryObject> memory_object) {
@@ -815,6 +800,7 @@ void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(
memory_object->update_instances(isolate, new_buffer);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 18505baf670..eb879d5e8ad 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -48,11 +48,13 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
SharedFlag shared,
InitializedFlag initialized);
+#if V8_ENABLE_WEBASSEMBLY
// Allocate the backing store for a Wasm memory.
static std::unique_ptr<BackingStore> AllocateWasmMemory(Isolate* isolate,
size_t initial_pages,
size_t maximum_pages,
SharedFlag shared);
+#endif // V8_ENABLE_WEBASSEMBLY
// Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the
@@ -84,14 +86,15 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool has_guard_regions() const { return has_guard_regions_; }
bool free_on_destruct() const { return free_on_destruct_; }
+ // Wrapper around ArrayBuffer::Allocator::Reallocate.
+ bool Reallocate(Isolate* isolate, size_t new_byte_length);
+
+#if V8_ENABLE_WEBASSEMBLY
// Attempt to grow this backing store in place.
base::Optional<size_t> GrowWasmMemoryInPlace(Isolate* isolate,
size_t delta_pages,
size_t max_pages);
- // Wrapper around ArrayBuffer::Allocator::Reallocate.
- bool Reallocate(Isolate* isolate, size_t new_byte_length);
-
// Allocate a new, larger, backing store for this Wasm memory and copy the
// contents of this backing store into it.
std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate,
@@ -120,6 +123,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Update all shared memory objects in this isolate (after a grow operation).
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
// Returns the size of the external memory owned by this backing store.
// It is used for triggering GCs based on the external memory pressure.
@@ -208,26 +212,23 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
SharedWasmMemoryData* get_shared_wasm_memory_data();
void Clear(); // Internally clears fields after deallocation.
+#if V8_ENABLE_WEBASSEMBLY
static std::unique_ptr<BackingStore> TryAllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared);
+#endif // V8_ENABLE_WEBASSEMBLY
};
-// A global, per-process mapping from buffer addresses to backing stores.
-// This is generally only used for dealing with an embedder that has not
-// migrated to the new API which should use proper pointers to manage
-// backing stores.
+// A global, per-process mapping from buffer addresses to backing stores
+// of wasm memory objects.
class GlobalBackingStoreRegistry {
public:
// Register a backing store in the global registry. A mapping from the
// {buffer_start} to the backing store object will be added. The backing
// store will automatically unregister itself upon destruction.
+ // Only wasm memory backing stores are supported.
static void Register(std::shared_ptr<BackingStore> backing_store);
- // Look up a backing store based on the {buffer_start} pointer.
- static std::shared_ptr<BackingStore> Lookup(void* buffer_start,
- size_t length);
-
private:
friend class BackingStore;
// Unregister a backing store in the global registry.
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 7c562465cb0..cffac5af8e1 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -19,6 +19,7 @@
#include "src/objects/bigint.h"
+#include "src/bigint/bigint.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -135,10 +136,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt result_storage = MutableBigInt());
- static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
-
- static int AbsoluteCompare(BigIntBase x, BigIntBase y);
-
static void MultiplyAccumulate(Handle<BigIntBase> multiplicand,
digit_t multiplier,
Handle<MutableBigInt> accumulator,
@@ -232,6 +229,9 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
bool IsMutableBigInt() const { return IsBigInt(); }
+ static_assert(std::is_same<bigint::digit_t, BigIntBase::digit_t>::value,
+ "We must be able to call BigInt library functions");
+
NEVER_READ_ONLY_SPACE
OBJECT_CONSTRUCTORS(MutableBigInt, FreshlyAllocatedBigInt);
@@ -243,6 +243,15 @@ NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
#include "src/base/platform/wrappers.h"
#include "src/objects/object-macros-undef.h"
+struct GetDigits : bigint::Digits {
+ explicit GetDigits(Handle<BigIntBase> bigint) : GetDigits(*bigint) {}
+ explicit GetDigits(BigIntBase bigint)
+ : bigint::Digits(
+ reinterpret_cast<bigint::digit_t*>(
+ bigint.ptr() + BigIntBase::kDigitsOffset - kHeapObjectTag),
+ bigint.length()) {}
+};
+
template <typename T, typename Isolate>
MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
// If the result of a BigInt computation is truncated to 64 bit, Turbofan
@@ -560,7 +569,7 @@ MaybeHandle<BigInt> BigInt::Divide(Isolate* isolate, Handle<BigInt> x,
// 2. Let quotient be the mathematical value of x divided by y.
// 3. Return a BigInt representing quotient rounded towards 0 to the next
// integral value.
- if (MutableBigInt::AbsoluteCompare(x, y) < 0) {
+ if (bigint::Compare(GetDigits(x), GetDigits(y)) < 0) {
return Zero(isolate);
}
Handle<MutableBigInt> quotient;
@@ -590,7 +599,7 @@ MaybeHandle<BigInt> BigInt::Remainder(Isolate* isolate, Handle<BigInt> x,
}
// 2. Return the BigInt representing x modulo y.
// See https://github.com/tc39/proposal-bigint/issues/84 though.
- if (MutableBigInt::AbsoluteCompare(x, y) < 0) return x;
+ if (bigint::Compare(GetDigits(x), GetDigits(y)) < 0) return x;
Handle<MutableBigInt> remainder;
if (y->length() == 1) {
digit_t divisor = y->digit(0);
@@ -622,7 +631,7 @@ MaybeHandle<BigInt> BigInt::Add(Isolate* isolate, Handle<BigInt> x,
}
// x + -y == x - y == -(y - x)
// -x + y == y - x == -(x - y)
- if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
+ if (bigint::Compare(GetDigits(x), GetDigits(y)) >= 0) {
return MutableBigInt::AbsoluteSub(isolate, x, y, xsign);
}
return MutableBigInt::AbsoluteSub(isolate, y, x, !xsign);
@@ -638,7 +647,7 @@ MaybeHandle<BigInt> BigInt::Subtract(Isolate* isolate, Handle<BigInt> x,
}
// x - y == -(y - x)
// (-x) - (-y) == y - x == -(x - y)
- if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
+ if (bigint::Compare(GetDigits(x), GetDigits(y)) >= 0) {
return MutableBigInt::AbsoluteSub(isolate, x, y, xsign);
}
return MutableBigInt::AbsoluteSub(isolate, y, x, !xsign);
@@ -691,7 +700,7 @@ ComparisonResult BigInt::CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y) {
bool x_sign = x->sign();
if (x_sign != y->sign()) return UnequalSign(x_sign);
- int result = MutableBigInt::AbsoluteCompare(x, y);
+ int result = bigint::Compare(GetDigits(x), GetDigits(y));
if (result > 0) return AbsoluteGreater(x_sign);
if (result < 0) return AbsoluteLess(x_sign);
return ComparisonResult::kEqual;
@@ -1230,7 +1239,7 @@ MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
Handle<BigInt> MutableBigInt::AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y, bool result_sign) {
DCHECK(x->length() >= y->length());
- SLOW_DCHECK(AbsoluteCompare(x, y) >= 0);
+ SLOW_DCHECK(bigint::Compare(GetDigits(x), GetDigits(y)) >= 0);
if (x->is_zero()) {
DCHECK(y->is_zero());
return x;
@@ -1440,22 +1449,6 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
[](digit_t a, digit_t b) { return a ^ b; });
}
-// Returns a positive value if abs(x) > abs(y), a negative value if
-// abs(x) < abs(y), or zero if abs(x) == abs(y).
-int MutableBigInt::AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y) {
- return MutableBigInt::AbsoluteCompare(*x, *y);
-}
-
-int MutableBigInt::AbsoluteCompare(BigIntBase x, BigIntBase y) {
- DisallowGarbageCollection no_gc;
- int diff = x.length() - y.length();
- if (diff != 0) return diff;
- int i = x.length() - 1;
- while (i >= 0 && x.digit(i) == y.digit(i)) i--;
- if (i < 0) return 0;
- return x.digit(i) > y.digit(i) ? 1 : -1;
-}
-
// Multiplies {multiplicand} with {multiplier} and adds the result to
// {accumulator}, starting at {accumulator_index} for the least-significant
// digit.
@@ -2766,7 +2759,7 @@ int32_t MutableBigInt_AbsoluteCompare(Address x_addr, Address y_addr) {
BigInt x = BigInt::cast(Object(x_addr));
BigInt y = BigInt::cast(Object(y_addr));
- return MutableBigInt::AbsoluteCompare(x, y);
+ return bigint::Compare(GetDigits(x), GetDigits(y));
}
void MutableBigInt_AbsoluteSubAndCanonicalize(Address result_addr,
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 191abdb4b6a..6fd3355a42f 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_CODE_INL_H_
#include "src/base/memory.h"
+#include "src/baseline/bytecode-offset-iterator.h"
#include "src/codegen/code-desc.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
@@ -16,7 +17,9 @@
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/oddball.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/smi-inl.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -55,9 +58,18 @@ int AbstractCode::InstructionSize() {
}
}
-ByteArray AbstractCode::source_position_table() {
+ByteArray AbstractCode::SourcePositionTableInternal() {
if (IsCode()) {
- return GetCode().SourcePositionTable();
+ DCHECK_NE(GetCode().kind(), CodeKind::BASELINE);
+ return GetCode().source_position_table();
+ } else {
+ return GetBytecodeArray().SourcePositionTable();
+ }
+}
+
+ByteArray AbstractCode::SourcePositionTable(SharedFunctionInfo sfi) {
+ if (IsCode()) {
+ return GetCode().SourcePositionTable(sfi);
} else {
return GetBytecodeArray().SourcePositionTable();
}
@@ -177,7 +189,14 @@ INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
+#define IS_BASELINE() (kind() == CodeKind::BASELINE)
+ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
+ !IS_BASELINE(),
+ !IS_BASELINE() && !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
+ IS_BASELINE(),
+ IS_BASELINE() && !ObjectInYoungGeneration(value))
+#undef IS_BASELINE
// Concurrent marker needs to access kind specific flags in code data container.
RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
kCodeDataContainerOffset)
@@ -187,7 +206,7 @@ RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kDeoptimizationDataOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kSourcePositionTableOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
}
@@ -204,12 +223,12 @@ void Code::clear_padding() {
memset(reinterpret_cast<void*>(raw_body_end()), 0, trailing_padding_size);
}
-ByteArray Code::SourcePositionTable() const {
- Object maybe_table = source_position_table();
- if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
- ReadOnlyRoots roots = GetReadOnlyRoots();
- DCHECK(maybe_table.IsUndefined(roots) || maybe_table.IsException(roots));
- return roots.empty_byte_array();
+ByteArray Code::SourcePositionTable(SharedFunctionInfo sfi) const {
+ DisallowGarbageCollection no_gc;
+ if (kind() == CodeKind::BASELINE) {
+ return sfi.GetBytecodeArray(sfi.GetIsolate()).SourcePositionTable();
+ }
+ return source_position_table();
}
Object Code::next_code_link() const {
@@ -257,6 +276,25 @@ Address Code::raw_metadata_start() const {
return raw_instruction_start() + raw_instruction_size();
}
+Address Code::InstructionStart(Isolate* isolate, Address pc) const {
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapInstructionStart(isolate, pc)
+ : raw_instruction_start();
+}
+
+Address Code::InstructionEnd(Isolate* isolate, Address pc) const {
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapInstructionEnd(isolate, pc)
+ : raw_instruction_end();
+}
+
+int Code::GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const {
+ Address instruction_start = InstructionStart(isolate, pc);
+ Address offset = pc - instruction_start;
+ DCHECK_LE(offset, InstructionSize());
+ return static_cast<int>(offset);
+}
+
Address Code::MetadataStart() const {
STATIC_ASSERT(kOnHeapBodyIsContiguous);
return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataStart()
@@ -285,9 +323,9 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return ByteArray::unchecked_cast(
- TaggedField<HeapObject, kRelocationInfoOffset>::load(isolate, *this));
+ TaggedField<HeapObject, kRelocationInfoOffset>::load(cage_base, *this));
}
byte* Code::relocation_start() const {
@@ -304,10 +342,10 @@ int Code::relocation_size() const {
Address Code::entry() const { return raw_instruction_start(); }
-bool Code::contains(Address inner_pointer) {
+bool Code::contains(Isolate* isolate, Address inner_pointer) {
if (is_off_heap_trampoline()) {
- if (OffHeapInstructionStart() <= inner_pointer &&
- inner_pointer < OffHeapInstructionEnd()) {
+ if (OffHeapInstructionStart(isolate, inner_pointer) <= inner_pointer &&
+ inner_pointer < OffHeapInstructionEnd(isolate, inner_pointer)) {
return true;
}
}
@@ -326,70 +364,73 @@ int Code::CodeSize() const { return SizeFor(raw_body_size()); }
CodeKind Code::kind() const {
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
- return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
-}
-
-namespace detail {
-
-// TODO(v8:11429): Extract out of header, to generic helper, and merge with
-// TranslationArray de/encoding.
-inline int ReadUint(ByteArray array, int* index) {
- int byte = 0;
- int value = 0;
- int shift = 0;
- do {
- byte = array.get((*index)++);
- value += (byte & ((1 << 7) - 1)) << shift;
- shift += 7;
- } while (byte & (1 << 7));
- return value;
+ const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
+ return KindField::decode(flags);
}
-} // namespace detail
-
-int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc) {
+int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
+ BytecodeArray bytecodes) {
DisallowGarbageCollection no_gc;
- CHECK(!is_baseline_prologue_builtin());
+ CHECK(!is_baseline_trampoline_builtin());
if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
CHECK_EQ(kind(), CodeKind::BASELINE);
- ByteArray data = ByteArray::cast(source_position_table());
- Address lookup_pc = 0;
+ baseline::BytecodeOffsetIterator offset_iterator(
+ ByteArray::cast(bytecode_offset_table()), bytecodes);
Address pc = baseline_pc - InstructionStart();
- int index = 0;
- int offset = 0;
- while (pc > lookup_pc) {
- lookup_pc += detail::ReadUint(data, &index);
- offset += detail::ReadUint(data, &index);
- }
- CHECK_EQ(pc, lookup_pc);
- return offset;
+ offset_iterator.AdvanceToPCOffset(pc);
+ return offset_iterator.current_bytecode_offset();
}
uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset,
- bool precise) {
+ BytecodeToPCPosition position,
+ BytecodeArray bytecodes) {
DisallowGarbageCollection no_gc;
CHECK_EQ(kind(), CodeKind::BASELINE);
- ByteArray data = ByteArray::cast(source_position_table());
- intptr_t pc = 0;
- int index = 0;
- int offset = 0;
- // TODO(v8:11429,cbruni): clean up
- // Return the offset for the last bytecode that matches
- while (offset < bytecode_offset && index < data.length()) {
- int delta_pc = detail::ReadUint(data, &index);
- int delta_offset = detail::ReadUint(data, &index);
- if (!precise && (bytecode_offset < offset + delta_offset)) break;
- pc += delta_pc;
- offset += delta_offset;
- }
- if (precise) {
- CHECK_EQ(offset, bytecode_offset);
+ baseline::BytecodeOffsetIterator offset_iterator(
+ ByteArray::cast(bytecode_offset_table()), bytecodes);
+ offset_iterator.AdvanceToBytecodeOffset(bytecode_offset);
+ uintptr_t pc = 0;
+ if (position == kPcAtStartOfBytecode) {
+ pc = offset_iterator.current_pc_start_offset();
} else {
- CHECK_LE(offset, bytecode_offset);
+ DCHECK_EQ(position, kPcAtEndOfBytecode);
+ pc = offset_iterator.current_pc_end_offset();
}
return pc;
}
+uintptr_t Code::GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes) {
+ return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtStartOfBytecode,
+ bytecodes);
+}
+
+uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes) {
+ return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtEndOfBytecode,
+ bytecodes);
+}
+
+uintptr_t Code::GetBaselinePCForNextExecutedBytecode(int bytecode_offset,
+ BytecodeArray bytecodes) {
+ DisallowGarbageCollection no_gc;
+ CHECK_EQ(kind(), CodeKind::BASELINE);
+ baseline::BytecodeOffsetIterator offset_iterator(
+ ByteArray::cast(bytecode_offset_table()), bytecodes);
+ Handle<BytecodeArray> bytecodes_handle(
+ reinterpret_cast<Address*>(&bytecodes));
+ interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes_handle,
+ bytecode_offset);
+ interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+ if (bytecode == interpreter::Bytecode::kJumpLoop) {
+ return GetBaselineStartPCForBytecodeOffset(
+ bytecode_iterator.GetJumpTargetOffset(), bytecodes);
+ } else {
+ DCHECK(!interpreter::Bytecodes::IsJump(bytecode));
+ return GetBaselineEndPCForBytecodeOffset(bytecode_offset, bytecodes);
+ }
+}
+
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
@@ -399,7 +440,7 @@ void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
- WriteField<uint32_t>(kFlagsOffset, flags);
+ RELAXED_WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
@@ -413,12 +454,16 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
index == Builtins::kInterpreterEnterBytecodeDispatch);
}
-inline bool Code::is_baseline_leave_frame_builtin() const {
- return builtin_index() == Builtins::kBaselineLeaveFrame;
+inline bool Code::is_baseline_trampoline_builtin() const {
+ const int index = builtin_index();
+ return index != Builtins::kNoBuiltinId &&
+ (index == Builtins::kBaselineOutOfLinePrologue ||
+ index == Builtins::kBaselineEnterAtBytecode ||
+ index == Builtins::kBaselineEnterAtNextBytecode);
}
-inline bool Code::is_baseline_prologue_builtin() const {
- return builtin_index() == Builtins::kBaselineOutOfLinePrologue;
+inline bool Code::is_baseline_leave_frame_builtin() const {
+ return builtin_index() == Builtins::kBaselineLeaveFrame;
}
inline bool Code::checks_optimization_marker() const {
@@ -436,7 +481,8 @@ inline bool Code::has_tagged_outgoing_params() const {
}
inline bool Code::is_turbofanned() const {
- return IsTurbofannedField::decode(ReadField<uint32_t>(kFlagsOffset));
+ const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
+ return IsTurbofannedField::decode(flags);
}
inline bool Code::can_have_weak_objects() const {
@@ -482,7 +528,8 @@ inline void Code::set_is_exception_caught(bool value) {
}
inline bool Code::is_off_heap_trampoline() const {
- return IsOffHeapTrampoline::decode(ReadField<uint32_t>(kFlagsOffset));
+ const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
+ return IsOffHeapTrampoline::decode(flags);
}
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
@@ -492,14 +539,14 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
}
int Code::builtin_index() const {
- int index = ReadField<int>(kBuiltinIndexOffset);
+ int index = RELAXED_READ_INT_FIELD(*this, kBuiltinIndexOffset);
DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
- WriteField<int>(kBuiltinIndexOffset, index);
+ RELAXED_WRITE_INT_FIELD(*this, kBuiltinIndexOffset, index);
}
bool Code::is_builtin() const {
@@ -507,14 +554,14 @@ bool Code::is_builtin() const {
}
unsigned Code::inlined_bytecode_size() const {
- DCHECK(CodeKindIsOptimizedJSFunction(kind()) ||
- ReadField<unsigned>(kInlinedBytecodeSizeOffset) == 0);
- return ReadField<unsigned>(kInlinedBytecodeSizeOffset);
+ unsigned size = RELAXED_READ_UINT_FIELD(*this, kInlinedBytecodeSizeOffset);
+ DCHECK(CodeKindIsOptimizedJSFunction(kind()) || size == 0);
+ return size;
}
void Code::set_inlined_bytecode_size(unsigned size) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()) || size == 0);
- WriteField<unsigned>(kInlinedBytecodeSizeOffset, size);
+ RELAXED_WRITE_UINT_FIELD(*this, kInlinedBytecodeSizeOffset, size);
}
bool Code::has_safepoint_info() const {
@@ -523,7 +570,8 @@ bool Code::has_safepoint_info() const {
int Code::stack_slots() const {
DCHECK(has_safepoint_info());
- return StackSlotsField::decode(ReadField<uint32_t>(kFlagsOffset));
+ const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
+ return StackSlotsField::decode(flags);
}
bool Code::marked_for_deoptimization() const {
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 73068856c9c..7268f001ce9 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -2,20 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <iomanip>
-
-#include "src/execution/isolate-utils.h"
#include "src/objects/code.h"
+#include <iomanip>
+
#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/reloc-info.h"
#include "src/codegen/safepoint-table.h"
+#include "src/codegen/source-position.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-utils.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/code-kind.h"
#include "src/roots/roots-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
@@ -129,8 +131,8 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
}
}
-SafepointEntry Code::GetSafepointEntry(Address pc) {
- SafepointTable table(*this);
+SafepointEntry Code::GetSafepointEntry(Isolate* isolate, Address pc) {
+ SafepointTable table(isolate, pc, *this);
return table.FindEntry(pc);
}
@@ -148,7 +150,17 @@ Address Code::OffHeapInstructionStart() const {
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- EmbeddedData d = EmbeddedData::FromBlob();
+ // TODO(11527): pass Isolate as an argument.
+ // GetIsolateFromWritableObject(*this) works for both read-only and writable
+ // objects here because short builtin calls feature requires pointer
+ // compression.
+ // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
+ // because if the short builtin calls wasn't actually enabled because of not
+ // enough memory, the FromBlob(isolate) would still be the correct one to use.
+ EmbeddedData d =
+ FLAG_short_builtin_calls
+ ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
+ : EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index());
}
@@ -157,7 +169,30 @@ Address Code::OffHeapInstructionEnd() const {
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- EmbeddedData d = EmbeddedData::FromBlob();
+ // TODO(11527): pass Isolate as an argument.
+ // GetIsolateFromWritableObject(*this) works for both read-only and writable
+ // objects here because short builtin calls feature requires pointer
+ // compression.
+ // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
+ // because if the short builtin calls wasn't actually enabled because of not
+ // enough memory, the FromBlob(isolate) would still be the correct one to use.
+ EmbeddedData d =
+ FLAG_short_builtin_calls
+ ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
+ : EmbeddedData::FromBlob();
+ return d.InstructionStartOfBuiltin(builtin_index()) +
+ d.InstructionSizeOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapInstructionStart(Isolate* isolate, Address pc) const {
+ DCHECK(is_off_heap_trampoline());
+ EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
+ return d.InstructionStartOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapInstructionEnd(Isolate* isolate, Address pc) const {
+ DCHECK(is_off_heap_trampoline());
+ EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
@@ -190,8 +225,10 @@ Address Code::OffHeapMetadataEnd() const {
d.MetadataSizeOfBuiltin(builtin_index());
}
+// TODO(cbruni): Move to BytecodeArray
int AbstractCode::SourcePosition(int offset) {
- Object maybe_table = source_position_table();
+ CHECK_NE(kind(), CodeKind::BASELINE);
+ Object maybe_table = SourcePositionTableInternal();
if (maybe_table.IsException()) return kNoSourcePosition;
ByteArray source_position_table = ByteArray::cast(maybe_table);
@@ -208,13 +245,15 @@ int AbstractCode::SourcePosition(int offset) {
return position;
}
+// TODO(cbruni): Move to BytecodeArray
int AbstractCode::SourceStatementPosition(int offset) {
+ CHECK_NE(kind(), CodeKind::BASELINE);
// First find the closest position.
int position = SourcePosition(offset);
// Now find the closest statement position before the position.
int statement_position = 0;
- for (SourcePositionTableIterator it(source_position_table()); !it.done();
- it.Advance()) {
+ for (SourcePositionTableIterator it(SourcePositionTableInternal());
+ !it.done(); it.Advance()) {
if (it.is_statement()) {
int p = it.source_position().ScriptOffset();
if (statement_position < p && p <= position) {
@@ -225,10 +264,10 @@ int AbstractCode::SourceStatementPosition(int offset) {
return statement_position;
}
-bool Code::CanDeoptAt(Address pc) {
+bool Code::CanDeoptAt(Isolate* isolate, Address pc) {
DeoptimizationData deopt_data =
DeoptimizationData::cast(deoptimization_data());
- Address code_start_address = InstructionStart();
+ Address code_start_address = InstructionStart(isolate, pc);
for (int i = 0; i < deopt_data.DeoptCount(); i++) {
if (deopt_data.Pc(i).value() == -1) continue;
Address address = code_start_address + deopt_data.Pc(i).value();
@@ -535,10 +574,12 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
os << "\n";
+ // TODO(cbruni): add support for baseline code.
if (kind() != CodeKind::BASELINE) {
{
SourcePositionTableIterator it(
- SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
+ source_position_table(),
+ SourcePositionTableIterator::kJavaScriptOnly);
if (!it.done()) {
os << "Source positions:\n pc offset position\n";
for (; !it.done(); it.Advance()) {
@@ -552,7 +593,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
{
SourcePositionTableIterator it(
- SourcePositionTable(), SourcePositionTableIterator::kExternalOnly);
+ source_position_table(), SourcePositionTableIterator::kExternalOnly);
if (!it.done()) {
os << "External Source positions:\n pc offset fileid line\n";
for (; !it.done(); it.Advance()) {
@@ -574,7 +615,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
os << "\n";
if (has_safepoint_info()) {
- SafepointTable table(*this);
+ SafepointTable table(isolate, current_pc, *this);
os << "Safepoints (size = " << table.size() << ")\n";
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
@@ -881,6 +922,7 @@ void DependentCode::DeoptimizeDependentCodeGroup(
bool marked = MarkCodeForDeoptimization(group);
if (marked) {
DCHECK(AllowCodeDependencyChange::IsAllowed());
+ // TODO(11527): pass Isolate as an argument.
Deoptimizer::DeoptimizeMarkedCode(GetIsolateFromWritableObject(*this));
}
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index da7d6c92432..d431701936f 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -13,6 +13,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -149,6 +150,28 @@ class Code : public HeapObject {
inline Address InstructionEnd() const;
V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
+ // When builtins un-embedding is enabled for the Isolate
+ // (see Isolate::is_short_builtin_calls_enabled()) then both embedded and
+ // un-embedded builtins might be exeuted and thus two kinds of |pc|s might
+ // appear on the stack.
+ // Unlike the paremeterless versions of the functions above the below variants
+ // ensure that the instruction start correspond to the given |pc| value.
+ // Thus for off-heap trampoline Code objects the result might be the
+ // instruction start/end of the embedded code stream or of un-embedded one.
+ // For normal Code objects these functions just return the
+ // raw_instruction_start/end() values.
+ // TODO(11527): remove these versions once the full solution is ready.
+ inline Address InstructionStart(Isolate* isolate, Address pc) const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionStart(Isolate* isolate,
+ Address pc) const;
+ inline Address InstructionEnd(Isolate* isolate, Address pc) const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionEnd(Isolate* isolate,
+ Address pc) const;
+
+ // Computes offset of the |pc| from the instruction start. The |pc| must
+ // belong to this code.
+ inline int GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const;
+
inline int raw_instruction_size() const;
inline void set_raw_instruction_size(int value);
inline int InstructionSize() const;
@@ -220,12 +243,16 @@ class Code : public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [source_position_table]: ByteArray for the source positions table.
- DECL_ACCESSORS(source_position_table, Object)
+ // [source_position_table]: ByteArray for the source positions table for
+ // non-baseline code.
+ DECL_ACCESSORS(source_position_table, ByteArray)
+ // [bytecode_offset_table]: ByteArray for the bytecode offset for baseline
+ // code.
+ DECL_ACCESSORS(bytecode_offset_table, ByteArray)
// If source positions have not been collected or an exception has been thrown
// this will return empty_byte_array.
- inline ByteArray SourcePositionTable() const;
+ inline ByteArray SourcePositionTable(SharedFunctionInfo sfi) const;
// [code_data_container]: A container indirection for all mutable fields.
DECL_RELEASE_ACQUIRE_ACCESSORS(code_data_container, CodeDataContainer)
@@ -250,7 +277,7 @@ class Code : public HeapObject {
inline bool is_interpreter_trampoline_builtin() const;
// Testers for baseline builtins.
- inline bool is_baseline_prologue_builtin() const;
+ inline bool is_baseline_trampoline_builtin() const;
inline bool is_baseline_leave_frame_builtin() const;
// Tells whether the code checks the optimization marker in the function's
@@ -326,7 +353,7 @@ class Code : public HeapObject {
inline bool is_off_heap_trampoline() const;
// Get the safepoint entry for the given pc.
- SafepointEntry GetSafepointEntry(Address pc);
+ SafepointEntry GetSafepointEntry(Isolate* isolate, Address pc);
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
@@ -365,7 +392,7 @@ class Code : public HeapObject {
inline Address entry() const;
// Returns true if pc is inside this object's instructions.
- inline bool contains(Address pc);
+ inline bool contains(Isolate* isolate, Address pc);
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
@@ -379,9 +406,22 @@ class Code : public HeapObject {
static inline void CopyRelocInfoToByteArray(ByteArray dest,
const CodeDesc& desc);
- inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
- bool precise = true);
- inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc);
+ inline uintptr_t GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes);
+
+ inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes);
+
+ // Returns the PC of the next bytecode in execution order.
+ // If the bytecode at the given offset is JumpLoop, the PC of the jump target
+ // is returned. Other jumps are not allowed.
+ // For other bytecodes this is equivalent to
+ // GetBaselineEndPCForBytecodeOffset.
+ inline uintptr_t GetBaselinePCForNextExecutedBytecode(
+ int bytecode_offset, BytecodeArray bytecodes);
+
+ inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
+ BytecodeArray bytecodes);
// Flushes the instruction cache for the executable instructions of this code
// object. Make sure to call this while the code is still writable.
@@ -400,7 +440,7 @@ class Code : public HeapObject {
DECL_PRINTER(Code)
DECL_VERIFIER(Code)
- bool CanDeoptAt(Address pc);
+ bool CanDeoptAt(Isolate* isolate, Address pc);
void SetMarkedForDeoptimization(const char* reason);
@@ -428,7 +468,7 @@ class Code : public HeapObject {
#define CODE_FIELDS(V) \
V(kRelocationInfoOffset, kTaggedSize) \
V(kDeoptimizationDataOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
+ V(kPositionTableOffset, kTaggedSize) \
V(kCodeDataContainerOffset, kTaggedSize) \
/* Data or code not directly visited by GC directly starts here. */ \
/* The serializer needs to copy bytes starting from here verbatim. */ \
@@ -526,6 +566,17 @@ class Code : public HeapObject {
bool is_promise_rejection() const;
bool is_exception_caught() const;
+ enum BytecodeToPCPosition {
+ kPcAtStartOfBytecode,
+ // End of bytecode equals the start of the next bytecode.
+ // We need it when we deoptimize to the next bytecode (lazy deopt or deopt
+ // of non-topmost frame).
+ kPcAtEndOfBytecode
+ };
+ inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
+ BytecodeToPCPosition position,
+ BytecodeArray bytecodes);
+
OBJECT_CONSTRUCTORS(Code, HeapObject);
};
@@ -577,8 +628,8 @@ class AbstractCode : public HeapObject {
// at instruction_start.
inline int InstructionSize();
- // Return the source position table.
- inline ByteArray source_position_table();
+ // Return the source position table for interpreter code.
+ inline ByteArray SourcePositionTable(SharedFunctionInfo sfi);
void DropStackFrameCache();
@@ -600,6 +651,9 @@ class AbstractCode : public HeapObject {
static const int kMaxLoopNestingMarker = 6;
OBJECT_CONSTRUCTORS(AbstractCode, HeapObject);
+
+ private:
+ inline ByteArray SourcePositionTableInternal();
};
// Dependent code is a singly linked list of weak fixed arrays. Each array
@@ -630,9 +684,14 @@ class DependentCode : public WeakFixedArray {
// deoptimized when the transition is replaced by a new version.
kTransitionGroup,
// Group of code that omit run-time prototype checks for prototypes
- // described by this map. The group is deoptimized whenever an object
- // described by this map changes shape (and transitions to a new map),
- // possibly invalidating the assumptions embedded in the code.
+ // described by this map. The group is deoptimized whenever the following
+ // conditions hold, possibly invalidating the assumptions embedded in the
+ // code:
+ // a) A fast-mode object described by this map changes shape (and
+ // transitions to a new map), or
+ // b) A dictionary-mode prototype described by this map changes shape, the
+ // const-ness of one of its properties changes, or its [[Prototype]]
+ // changes (only the latter causes a transition).
kPrototypeCheckGroup,
// Group of code that depends on global property values in property cells
// not being changed.
diff --git a/deps/v8/src/objects/compilation-cache-table-inl.h b/deps/v8/src/objects/compilation-cache-table-inl.h
index 496332490e1..2e3a88d77ce 100644
--- a/deps/v8/src/objects/compilation-cache-table-inl.h
+++ b/deps/v8/src/objects/compilation-cache-table-inl.h
@@ -43,10 +43,18 @@ uint32_t CompilationCacheShape::StringSharedHash(String source,
// collection.
Script script(Script::cast(shared.script()));
hash ^= String::cast(script.source()).EnsureHash();
- STATIC_ASSERT(LanguageModeSize == 2);
- if (is_strict(language_mode)) hash ^= 0x8000;
- hash += position;
}
+ STATIC_ASSERT(LanguageModeSize == 2);
+ if (is_strict(language_mode)) hash ^= 0x8000;
+ hash += position;
+ return hash;
+}
+
+uint32_t CompilationCacheShape::StringSharedHash(String source,
+ LanguageMode language_mode) {
+ uint32_t hash = source.EnsureHash();
+ STATIC_ASSERT(LanguageModeSize == 2);
+ if (is_strict(language_mode)) hash ^= 0x8000;
return hash;
}
@@ -64,13 +72,19 @@ uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
FixedArray val = FixedArray::cast(object);
if (val.map() == roots.fixed_cow_array_map()) {
DCHECK_EQ(4, val.length());
- SharedFunctionInfo shared = SharedFunctionInfo::cast(val.get(0));
String source = String::cast(val.get(1));
int language_unchecked = Smi::ToInt(val.get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
int position = Smi::ToInt(val.get(3));
- return StringSharedHash(source, shared, language_mode, position);
+ Object shared_or_smi = val.get(0);
+ if (shared_or_smi.IsSmi()) {
+ DCHECK_EQ(position, kNoSourcePosition);
+ return StringSharedHash(source, language_mode);
+ } else {
+ return StringSharedHash(source, SharedFunctionInfo::cast(shared_or_smi),
+ language_mode, position);
+ }
}
// RegExp: The key field (and the value field) contains the
diff --git a/deps/v8/src/objects/compilation-cache-table.cc b/deps/v8/src/objects/compilation-cache-table.cc
index a995da38830..9ef14689753 100644
--- a/deps/v8/src/objects/compilation-cache-table.cc
+++ b/deps/v8/src/objects/compilation-cache-table.cc
@@ -151,6 +151,14 @@ class StringSharedKey : public HashTableKey {
language_mode_(language_mode),
position_(position) {}
+ // This tuple unambiguously identifies script compilation.
+ StringSharedKey(Handle<String> source, LanguageMode language_mode)
+ : HashTableKey(
+ CompilationCacheShape::StringSharedHash(*source, language_mode)),
+ source_(source),
+ language_mode_(language_mode),
+ position_(kNoSourcePosition) {}
+
bool IsMatch(Object other) override {
DisallowGarbageCollection no_gc;
if (!other.IsFixedArray()) {
@@ -159,8 +167,14 @@ class StringSharedKey : public HashTableKey {
return Hash() == other_hash;
}
FixedArray other_array = FixedArray::cast(other);
- SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
- if (shared != *shared_) return false;
+ DCHECK(other_array.get(0).IsSharedFunctionInfo() ||
+ other_array.get(0) == Smi::zero());
+ Handle<SharedFunctionInfo> shared;
+ if (shared_.ToHandle(&shared)) {
+ if (*shared != other_array.get(0)) return false;
+ } else {
+ if (Smi::zero() != other_array.get(0)) return false;
+ }
int language_unchecked = Smi::ToInt(other_array.get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
@@ -173,7 +187,12 @@ class StringSharedKey : public HashTableKey {
Handle<Object> AsHandle(Isolate* isolate) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
- array->set(0, *shared_);
+ Handle<SharedFunctionInfo> shared;
+ if (shared_.ToHandle(&shared)) {
+ array->set(0, *shared);
+ } else {
+ array->set(0, Smi::zero());
+ }
array->set(1, *source_);
array->set(2, Smi::FromEnum(language_mode_));
array->set(3, Smi::FromInt(position_));
@@ -183,7 +202,7 @@ class StringSharedKey : public HashTableKey {
private:
Handle<String> source_;
- Handle<SharedFunctionInfo> shared_;
+ MaybeHandle<SharedFunctionInfo> shared_;
LanguageMode language_mode_;
int position_;
};
@@ -227,16 +246,9 @@ class CodeKey : public HashTableKey {
MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode) {
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- native_context->GetIsolate());
- Isolate* isolate = native_context->GetIsolate();
+ LanguageMode language_mode, Isolate* isolate) {
src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ StringSharedKey key(src, language_mode);
InternalIndex entry = table->FindEntry(isolate, &key);
if (entry.is_not_found()) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
@@ -245,7 +257,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
}
Object obj = table->get(index + 1);
if (obj.IsSharedFunctionInfo()) {
- return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
+ return handle(SharedFunctionInfo::cast(obj), isolate);
}
return MaybeHandle<SharedFunctionInfo>();
}
@@ -295,17 +307,10 @@ MaybeHandle<Code> CompilationCacheTable::LookupCode(
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value) {
- Isolate* isolate = native_context->GetIsolate();
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- isolate);
+ LanguageMode language_mode, Handle<SharedFunctionInfo> value,
+ Isolate* isolate) {
src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ StringSharedKey key(src, language_mode);
Handle<Object> k = key.AsHandle(isolate);
cache = EnsureCapacity(isolate, cache);
InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
diff --git a/deps/v8/src/objects/compilation-cache-table.h b/deps/v8/src/objects/compilation-cache-table.h
index e8101943ad5..2fd548bfe57 100644
--- a/deps/v8/src/objects/compilation-cache-table.h
+++ b/deps/v8/src/objects/compilation-cache-table.h
@@ -34,6 +34,9 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
LanguageMode language_mode,
int position);
+ static inline uint32_t StringSharedHash(String source,
+ LanguageMode language_mode);
+
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
@@ -86,11 +89,11 @@ class CompilationCacheTable
// The 'script' cache contains SharedFunctionInfos.
static MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode);
+ LanguageMode language_mode, Isolate* isolate);
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value);
+ LanguageMode language_mode, Handle<SharedFunctionInfo> value,
+ Isolate* isolate);
// Eval code only gets cached after a second probe for the
// code object. To do so, on first "put" only a hash identifying the
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index ecb276ce366..54c828d919a 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -33,9 +33,9 @@ Object CompressedObjectSlot::operator*() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::load(IsolateRoot isolate) const {
+Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return Object(DecompressTaggedAny(isolate, value));
+ return Object(DecompressTaggedAny(cage_base, value));
}
void CompressedObjectSlot::store(Object value) const {
@@ -52,9 +52,9 @@ Object CompressedObjectSlot::Relaxed_Load() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
+Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(DecompressTaggedAny(isolate, value));
+ return Object(DecompressTaggedAny(cage_base, value));
}
void CompressedObjectSlot::Relaxed_Store(Object value) const {
@@ -85,9 +85,9 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::load(IsolateRoot isolate) const {
+MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return MaybeObject(DecompressTaggedAny(isolate, value));
+ return MaybeObject(DecompressTaggedAny(cage_base, value));
}
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
@@ -99,9 +99,10 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
+MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
+ PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return MaybeObject(DecompressTaggedAny(isolate, value));
+ return MaybeObject(DecompressTaggedAny(cage_base, value));
}
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
@@ -125,9 +126,10 @@ HeapObjectReference CompressedHeapObjectSlot::operator*() const {
return HeapObjectReference(DecompressTaggedPointer(address(), value));
}
-HeapObjectReference CompressedHeapObjectSlot::load(IsolateRoot isolate) const {
+HeapObjectReference CompressedHeapObjectSlot::load(
+ PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return HeapObjectReference(DecompressTaggedPointer(isolate, value));
+ return HeapObjectReference(DecompressTaggedPointer(cage_base, value));
}
void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
@@ -148,23 +150,25 @@ void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
// OffHeapCompressedObjectSlot implementation.
//
-Object OffHeapCompressedObjectSlot::load(IsolateRoot isolate) const {
+Object OffHeapCompressedObjectSlot::load(PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return Object(DecompressTaggedAny(isolate, value));
+ return Object(DecompressTaggedAny(cage_base, value));
}
void OffHeapCompressedObjectSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
-Object OffHeapCompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
+Object OffHeapCompressedObjectSlot::Relaxed_Load(
+ PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(DecompressTaggedAny(isolate, value));
+ return Object(DecompressTaggedAny(cage_base, value));
}
-Object OffHeapCompressedObjectSlot::Acquire_Load(IsolateRoot isolate) const {
+Object OffHeapCompressedObjectSlot::Acquire_Load(
+ PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
- return Object(DecompressTaggedAny(isolate, value));
+ return Object(DecompressTaggedAny(cage_base, value));
}
void OffHeapCompressedObjectSlot::Relaxed_Store(Object value) const {
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index 6f74b723c8e..7737e685fe0 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -41,12 +41,12 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
// TODO(leszeks): Consider deprecating the operator* load, and always pass the
// Isolate.
inline Object operator*() const;
- inline Object load(IsolateRoot isolate) const;
+ inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(IsolateRoot isolate) const;
+ inline Object Relaxed_Load(PtrComprCageBase cage_base) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
@@ -77,11 +77,11 @@ class CompressedMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(IsolateRoot isolate) const;
+ inline MaybeObject load(PtrComprCageBase cage_base) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
+ inline MaybeObject Relaxed_Load(PtrComprCageBase cage_base) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -105,7 +105,7 @@ class CompressedHeapObjectSlot
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(IsolateRoot isolate) const;
+ inline HeapObjectReference load(PtrComprCageBase cage_base) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
@@ -131,11 +131,11 @@ class OffHeapCompressedObjectSlot
explicit OffHeapCompressedObjectSlot(const uint32_t* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
- inline Object load(IsolateRoot isolate) const;
+ inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
- inline Object Relaxed_Load(IsolateRoot isolate) const;
- inline Object Acquire_Load(IsolateRoot isolate) const;
+ inline Object Relaxed_Load(PtrComprCageBase cage_base) const;
+ inline Object Acquire_Load(PtrComprCageBase cage_base) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline void Release_CompareAndSwap(Object old, Object target) const;
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index f2164d7d3b7..356df687ded 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -56,8 +56,8 @@ NEVER_READ_ONLY_SPACE_IMPL(Context)
CAST_ACCESSOR(NativeContext)
V8_INLINE Object Context::get(int index) const { return elements(index); }
-V8_INLINE Object Context::get(IsolateRoot isolate, int index) const {
- return elements(isolate, index);
+V8_INLINE Object Context::get(PtrComprCageBase cage_base, int index) const {
+ return elements(cage_base, index);
}
V8_INLINE void Context::set(int index, Object value) {
set_elements(index, value);
@@ -66,16 +66,16 @@ V8_INLINE void Context::set(int index, Object value, WriteBarrierMode mode) {
set_elements(index, value, mode);
}
-void Context::set_scope_info(ScopeInfo scope_info) {
- set(SCOPE_INFO_INDEX, scope_info);
+void Context::set_scope_info(ScopeInfo scope_info, WriteBarrierMode mode) {
+ set(SCOPE_INFO_INDEX, scope_info, mode);
}
Object Context::synchronized_get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return synchronized_get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return synchronized_get(cage_base, index);
}
-Object Context::synchronized_get(IsolateRoot isolate, int index) const {
+Object Context::synchronized_get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned int>(index),
static_cast<unsigned int>(this->length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
@@ -96,7 +96,9 @@ Context Context::previous() {
DCHECK(IsBootstrappingOrValidParentContext(result, *this));
return Context::unchecked_cast(result);
}
-void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); }
+void Context::set_previous(Context context, WriteBarrierMode mode) {
+ set(PREVIOUS_INDEX, context, mode);
+}
Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
@@ -109,9 +111,9 @@ HeapObject Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
-void Context::set_extension(HeapObject object) {
+void Context::set_extension(HeapObject object, WriteBarrierMode mode) {
DCHECK(scope_info().HasContextExtensionSlot());
- set(EXTENSION_INDEX, object);
+ set(EXTENSION_INDEX, object, mode);
}
NativeContext Context::native_context() const {
@@ -241,7 +243,7 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
return reinterpret_cast<MicrotaskQueue*>(ReadExternalPointerField(
- kMicrotaskQueueOffset, isolate, kNativeContextMicrotaskQueueTag));
+ kMicrotaskQueueOffset, cage_base, kNativeContextMicrotaskQueueTag));
}
void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index 771fbea40bb..af73bf02568 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -511,53 +511,5 @@ STATIC_ASSERT(NativeContext::kSize ==
(Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS) +
kSystemPointerSize));
-void NativeContext::RunPromiseHook(PromiseHookType type,
- Handle<JSPromise> promise,
- Handle<Object> parent) {
- Isolate* isolate = promise->GetIsolate();
- DCHECK(isolate->HasContextPromiseHooks());
- int contextSlot;
-
- switch (type) {
- case PromiseHookType::kInit:
- contextSlot = PROMISE_HOOK_INIT_FUNCTION_INDEX;
- break;
- case PromiseHookType::kResolve:
- contextSlot = PROMISE_HOOK_RESOLVE_FUNCTION_INDEX;
- break;
- case PromiseHookType::kBefore:
- contextSlot = PROMISE_HOOK_BEFORE_FUNCTION_INDEX;
- break;
- case PromiseHookType::kAfter:
- contextSlot = PROMISE_HOOK_AFTER_FUNCTION_INDEX;
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<Object> hook(isolate->native_context()->get(contextSlot), isolate);
- if (hook->IsUndefined()) return;
-
- int argc = type == PromiseHookType::kInit ? 2 : 1;
- Handle<Object> argv[2] = {
- Handle<Object>::cast(promise),
- parent
- };
-
- Handle<Object> receiver = isolate->global_proxy();
-
- if (Execution::Call(isolate, hook, receiver, argc, argv).is_null()) {
- DCHECK(isolate->has_pending_exception());
- Handle<Object> exception(isolate->pending_exception(), isolate);
-
- MessageLocation* no_location = nullptr;
- Handle<JSMessageObject> message =
- isolate->CreateMessageOrAbort(exception, no_location);
- MessageHandler::ReportMessage(isolate, no_location, message);
-
- isolate->clear_pending_exception();
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 0a67d52d5ed..79aed5d40ff 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -198,11 +198,6 @@ enum ContextLookupFlags {
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
- V(PROMISE_HOOK_INIT_FUNCTION_INDEX, Object, promise_hook_init_function) \
- V(PROMISE_HOOK_BEFORE_FUNCTION_INDEX, Object, promise_hook_before_function) \
- V(PROMISE_HOOK_AFTER_FUNCTION_INDEX, Object, promise_hook_after_function) \
- V(PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, Object, \
- promise_hook_resolve_function) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
@@ -427,13 +422,14 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
- V8_INLINE Object get(IsolateRoot isolate, int index) const;
+ V8_INLINE Object get(PtrComprCageBase cage_base, int index) const;
V8_INLINE void set(int index, Object value);
// Setter with explicit barrier mode.
V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
// Setter and getter with synchronization semantics.
V8_INLINE Object synchronized_get(int index) const;
- V8_INLINE Object synchronized_get(IsolateRoot isolate, int index) const;
+ V8_INLINE Object synchronized_get(PtrComprCageBase cage_base,
+ int index) const;
V8_INLINE void synchronized_set(int index, Object value);
static const int kScopeInfoOffset = kElementsOffset;
@@ -522,17 +518,20 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
static const int kInvalidContext = 1;
// Direct slot access.
- inline void set_scope_info(ScopeInfo scope_info);
+ inline void set_scope_info(ScopeInfo scope_info,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline Object unchecked_previous();
inline Context previous();
- inline void set_previous(Context context);
+ inline void set_previous(Context context,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline Object next_context_link();
inline bool has_extension();
inline HeapObject extension();
- inline void set_extension(HeapObject object);
+ inline void set_extension(HeapObject object,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
JSObject extension_object();
JSReceiver extension_receiver();
V8_EXPORT_PRIVATE ScopeInfo scope_info();
@@ -697,9 +696,6 @@ class NativeContext : public Context {
void IncrementErrorsThrown();
int GetErrorsThrown();
- void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
- Handle<Object> parent);
-
private:
STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
Internals::kNativeContextEmbedderDataOffset);
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 28ea1300ee0..604852c24ea 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -124,11 +124,6 @@ extern enum ContextSlot extends intptr constexpr 'Context::Field' {
PROMISE_PROTOTYPE_INDEX: Slot<NativeContext, JSObject>,
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX: Slot<NativeContext, Map>,
- PROMISE_HOOK_INIT_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
- PROMISE_HOOK_BEFORE_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
- PROMISE_HOOK_AFTER_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
- PROMISE_HOOK_RESOLVE_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
-
CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX: Slot<NativeContext, HeapObject>,
BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX: Slot<NativeContext, Map>,
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 9a402984205..9bb01ffc4db 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -106,15 +106,16 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
}
Name DescriptorArray::GetKey(InternalIndex descriptor_number) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return GetKey(isolate, descriptor_number);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return GetKey(cage_base, descriptor_number);
}
-Name DescriptorArray::GetKey(IsolateRoot isolate,
+Name DescriptorArray::GetKey(PtrComprCageBase cage_base,
InternalIndex descriptor_number) const {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
- return Name::cast(EntryKeyField::Relaxed_Load(isolate, *this, entry_offset));
+ return Name::cast(
+ EntryKeyField::Relaxed_Load(cage_base, *this, entry_offset));
}
void DescriptorArray::SetKey(InternalIndex descriptor_number, Name key) {
@@ -129,12 +130,13 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return GetSortedKey(isolate, descriptor_number);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return GetSortedKey(cage_base, descriptor_number);
}
-Name DescriptorArray::GetSortedKey(IsolateRoot isolate, int descriptor_number) {
- return GetKey(isolate, InternalIndex(GetSortedKeyIndex(descriptor_number)));
+Name DescriptorArray::GetSortedKey(PtrComprCageBase cage_base,
+ int descriptor_number) {
+ return GetKey(cage_base, InternalIndex(GetSortedKeyIndex(descriptor_number)));
}
void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
@@ -143,13 +145,13 @@ void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
}
Object DescriptorArray::GetStrongValue(InternalIndex descriptor_number) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return GetStrongValue(isolate, descriptor_number);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return GetStrongValue(cage_base, descriptor_number);
}
-Object DescriptorArray::GetStrongValue(IsolateRoot isolate,
+Object DescriptorArray::GetStrongValue(PtrComprCageBase cage_base,
InternalIndex descriptor_number) {
- return GetValue(isolate, descriptor_number).cast<Object>();
+ return GetValue(cage_base, descriptor_number).cast<Object>();
}
void DescriptorArray::SetValue(InternalIndex descriptor_number,
@@ -161,15 +163,15 @@ void DescriptorArray::SetValue(InternalIndex descriptor_number,
}
MaybeObject DescriptorArray::GetValue(InternalIndex descriptor_number) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return GetValue(isolate, descriptor_number);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return GetValue(cage_base, descriptor_number);
}
-MaybeObject DescriptorArray::GetValue(IsolateRoot isolate,
+MaybeObject DescriptorArray::GetValue(PtrComprCageBase cage_base,
InternalIndex descriptor_number) {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
- return EntryValueField::Relaxed_Load(isolate, *this, entry_offset);
+ return EntryValueField::Relaxed_Load(cage_base, *this, entry_offset);
}
PropertyDetails DescriptorArray::GetDetails(InternalIndex descriptor_number) {
@@ -192,14 +194,14 @@ int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
}
FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return GetFieldType(isolate, descriptor_number);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return GetFieldType(cage_base, descriptor_number);
}
-FieldType DescriptorArray::GetFieldType(IsolateRoot isolate,
+FieldType DescriptorArray::GetFieldType(PtrComprCageBase cage_base,
InternalIndex descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
+ MaybeObject wrapped_type = GetValue(cage_base, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 57f9162c656..327931a421e 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -69,22 +69,22 @@ class DescriptorArray
// Accessors for fetching instance descriptor at descriptor number.
inline Name GetKey(InternalIndex descriptor_number) const;
- inline Name GetKey(IsolateRoot isolate,
+ inline Name GetKey(PtrComprCageBase cage_base,
InternalIndex descriptor_number) const;
inline Object GetStrongValue(InternalIndex descriptor_number);
- inline Object GetStrongValue(IsolateRoot isolate,
+ inline Object GetStrongValue(PtrComprCageBase cage_base,
InternalIndex descriptor_number);
inline MaybeObject GetValue(InternalIndex descriptor_number);
- inline MaybeObject GetValue(IsolateRoot isolate,
+ inline MaybeObject GetValue(PtrComprCageBase cage_base,
InternalIndex descriptor_number);
inline PropertyDetails GetDetails(InternalIndex descriptor_number);
inline int GetFieldIndex(InternalIndex descriptor_number);
inline FieldType GetFieldType(InternalIndex descriptor_number);
- inline FieldType GetFieldType(IsolateRoot isolate,
+ inline FieldType GetFieldType(PtrComprCageBase cage_base,
InternalIndex descriptor_number);
inline Name GetSortedKey(int descriptor_number);
- inline Name GetSortedKey(IsolateRoot isolate, int descriptor_number);
+ inline Name GetSortedKey(PtrComprCageBase cage_base, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
// Accessor for complete descriptor.
@@ -217,6 +217,7 @@ class DescriptorArray
using EntryValueField = TaggedField<MaybeObject, kEntryValueOffset>;
private:
+ friend class WebSnapshotDeserializer;
DECL_INT16_ACCESSORS(filler16bits)
inline void SetKey(InternalIndex descriptor_number, Name key);
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index 981f5aac932..bb3d8d58799 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -30,15 +30,15 @@ Dictionary<Derived, Shape>::Dictionary(Address ptr)
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::ValueAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return ValueAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return ValueAt(cage_base, entry);
}
template <typename Derived, typename Shape>
-Object Dictionary<Derived, Shape>::ValueAt(IsolateRoot isolate,
+Object Dictionary<Derived, Shape>::ValueAt(PtrComprCageBase cage_base,
InternalIndex entry) {
- return this->get(isolate, DerivedHashTable::EntryToIndex(entry) +
- Derived::kEntryValueIndex);
+ return this->get(cage_base, DerivedHashTable::EntryToIndex(entry) +
+ Derived::kEntryValueIndex);
}
template <typename Derived, typename Shape>
@@ -181,12 +181,12 @@ Handle<Map> GlobalDictionary::GetMap(ReadOnlyRoots roots) {
}
Name NameDictionary::NameAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return NameAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return NameAt(cage_base, entry);
}
-Name NameDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
- return Name::cast(KeyAt(isolate, entry));
+Name NameDictionary::NameAt(PtrComprCageBase cage_base, InternalIndex entry) {
+ return Name::cast(KeyAt(cage_base, entry));
}
Handle<Map> NameDictionary::GetMap(ReadOnlyRoots roots) {
@@ -194,32 +194,33 @@ Handle<Map> NameDictionary::GetMap(ReadOnlyRoots roots) {
}
PropertyCell GlobalDictionary::CellAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return CellAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return CellAt(cage_base, entry);
}
-PropertyCell GlobalDictionary::CellAt(IsolateRoot isolate,
+PropertyCell GlobalDictionary::CellAt(PtrComprCageBase cage_base,
InternalIndex entry) {
- DCHECK(KeyAt(isolate, entry).IsPropertyCell(isolate));
- return PropertyCell::cast(KeyAt(isolate, entry));
+ DCHECK(KeyAt(cage_base, entry).IsPropertyCell(cage_base));
+ return PropertyCell::cast(KeyAt(cage_base, entry));
}
Name GlobalDictionary::NameAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return NameAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return NameAt(cage_base, entry);
}
-Name GlobalDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
- return CellAt(isolate, entry).name(isolate);
+Name GlobalDictionary::NameAt(PtrComprCageBase cage_base, InternalIndex entry) {
+ return CellAt(cage_base, entry).name(cage_base);
}
Object GlobalDictionary::ValueAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return ValueAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return ValueAt(cage_base, entry);
}
-Object GlobalDictionary::ValueAt(IsolateRoot isolate, InternalIndex entry) {
- return CellAt(isolate, entry).value(isolate);
+Object GlobalDictionary::ValueAt(PtrComprCageBase cage_base,
+ InternalIndex entry) {
+ return CellAt(cage_base, entry).value(cage_base);
}
void GlobalDictionary::SetEntry(InternalIndex entry, Object key, Object value,
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 4aeeca6fb0a..be255f8162d 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -18,6 +18,13 @@
namespace v8 {
namespace internal {
+#ifdef V8_ENABLE_SWISS_NAME_DICTIONARY
+class SwissNameDictionary;
+using PropertyDictionary = SwissNameDictionary;
+#else
+using PropertyDictionary = NameDictionary;
+#endif
+
template <typename T>
class Handle;
@@ -32,7 +39,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using Key = typename Shape::Key;
// Returns the value at entry.
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
+ inline Object ValueAt(PtrComprCageBase cage_base, InternalIndex entry);
// Set the value for entry.
inline void ValueAtPut(InternalIndex entry, Object value);
@@ -75,6 +82,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
+ static Handle<Derived> ShallowCopy(Isolate* isolate,
+ Handle<Derived> dictionary);
+
protected:
// Generic at put operation.
V8_WARN_UNUSED_RESULT static Handle<Derived> AtPut(Isolate* isolate,
@@ -183,7 +193,7 @@ class V8_EXPORT_PRIVATE NameDictionary
static const int kInitialCapacity = 2;
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
+ inline Name NameAt(PtrComprCageBase cage_base, InternalIndex entry);
inline void set_hash(int hash);
inline int hash() const;
@@ -221,14 +231,14 @@ class V8_EXPORT_PRIVATE GlobalDictionary
DECL_PRINTER(GlobalDictionary)
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
+ inline Object ValueAt(PtrComprCageBase cage_base, InternalIndex entry);
inline PropertyCell CellAt(InternalIndex entry);
- inline PropertyCell CellAt(IsolateRoot isolate, InternalIndex entry);
+ inline PropertyCell CellAt(PtrComprCageBase cage_base, InternalIndex entry);
inline void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
inline void ClearEntry(InternalIndex entry);
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
+ inline Name NameAt(PtrComprCageBase cage_base, InternalIndex entry);
inline void ValueAtPut(InternalIndex entry, Object value);
OBJECT_CONSTRUCTORS(
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index f9ba145cf40..9b1c7936bb6 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -154,10 +154,11 @@ MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
Object);
}
-WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
+WriteBarrierMode GetWriteBarrierMode(FixedArrayBase elements, ElementsKind kind,
+ const DisallowGarbageCollection& promise) {
if (IsSmiElementsKind(kind)) return SKIP_WRITE_BARRIER;
if (IsDoubleElementsKind(kind)) return SKIP_WRITE_BARRIER;
- return UPDATE_WRITE_BARRIER;
+ return elements.GetWriteBarrierMode(promise);
}
// If kCopyToEndAndInitializeToHole is specified as the copy_size to
@@ -226,7 +227,7 @@ void CopyDictionaryToObjectElements(Isolate* isolate, FixedArrayBase from_base,
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
- WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to_kind);
+ WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to, to_kind, no_gc);
for (int i = 0; i < copy_size; i++) {
InternalIndex entry = from.FindEntry(isolate, i + from_start);
if (entry.is_found()) {
@@ -1420,10 +1421,10 @@ class DictionaryElementsAccessor
DisallowGarbageCollection no_gc;
NumberDictionary dict = NumberDictionary::cast(backing_store);
if (!dict.requires_slow_elements()) return false;
- IsolateRoot isolate = GetIsolateForPtrCompr(holder);
- ReadOnlyRoots roots = holder.GetReadOnlyRoots(isolate);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(holder);
+ ReadOnlyRoots roots = holder.GetReadOnlyRoots(cage_base);
for (InternalIndex i : dict.IterateEntries()) {
- Object key = dict.KeyAt(isolate, i);
+ Object key = dict.KeyAt(cage_base, i);
if (!dict.IsKey(roots, key)) continue;
PropertyDetails details = dict.DetailsAt(i);
if (details.kind() == kAccessor) return true;
@@ -2113,25 +2114,26 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> backing_store, int dst_index,
int src_index, int len, int hole_start,
int hole_end) {
- Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
+ DisallowGarbageCollection no_gc;
+ BackingStore dst_elms = BackingStore::cast(*backing_store);
if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
- isolate->heap()->CanMoveObjectStart(*dst_elms)) {
+ isolate->heap()->CanMoveObjectStart(dst_elms)) {
+ dst_elms = BackingStore::cast(
+ isolate->heap()->LeftTrimFixedArray(dst_elms, src_index));
// Update all the copies of this backing_store handle.
- *dst_elms.location() =
- BackingStore::cast(
- isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index))
- .ptr();
- receiver->set_elements(*dst_elms);
+ *backing_store.location() = dst_elms.ptr();
+ receiver->set_elements(dst_elms);
// Adjust the hole offset as the array has been shrunk.
hole_end -= src_index;
DCHECK_LE(hole_start, backing_store->length());
DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) {
- WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
- dst_elms->MoveElements(isolate, dst_index, src_index, len, mode);
+ WriteBarrierMode mode =
+ GetWriteBarrierMode(dst_elms, KindTraits::Kind, no_gc);
+ dst_elms.MoveElements(isolate, dst_index, src_index, len, mode);
}
if (hole_start != hole_end) {
- dst_elms->FillWithHoles(hole_start, hole_end);
+ dst_elms.FillWithHoles(hole_start, hole_end);
}
}
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index f9ef6e1e566..3f8deb39f09 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -81,7 +81,7 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
#endif
}
-bool EmbedderDataSlot::ToAlignedPointer(IsolateRoot isolate_root,
+bool EmbedderDataSlot::ToAlignedPointer(PtrComprCageBase isolate_root,
void** out_pointer) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
@@ -89,6 +89,12 @@ bool EmbedderDataSlot::ToAlignedPointer(IsolateRoot isolate_root,
// at the tagged part of the embedder slot but read-only access is ok).
Address raw_value;
#ifdef V8_HEAP_SANDBOX
+
+ // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
+#endif
+
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
raw_value = isolate->external_pointer_table().get(index) ^
@@ -108,9 +114,15 @@ bool EmbedderDataSlot::ToAlignedPointer(IsolateRoot isolate_root,
return HAS_SMI_TAG(raw_value);
}
-bool EmbedderDataSlot::ToAlignedPointerSafe(IsolateRoot isolate_root,
+bool EmbedderDataSlot::ToAlignedPointerSafe(PtrComprCageBase isolate_root,
void** out_pointer) const {
#ifdef V8_HEAP_SANDBOX
+
+ // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
+#endif
+
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
Address raw_value;
const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 8f4fcc8af2c..65fe78403a2 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -75,7 +75,8 @@ class EmbedderDataSlot
// When V8 heap sandbox is enabled, calling this method when the raw part of
// the slot does not contain valid external pointer table index is undefined
// behaviour and most likely result in crashes.
- V8_INLINE bool ToAlignedPointer(IsolateRoot isolate, void** out_result) const;
+ V8_INLINE bool ToAlignedPointer(PtrComprCageBase isolate_root,
+ void** out_result) const;
// Same as ToAlignedPointer() but with a workaround for V8 heap sandbox.
// When V8 heap sandbox is enabled, this method doesn't crash when the raw
@@ -86,7 +87,7 @@ class EmbedderDataSlot
//
// Call this function if you are not sure whether the slot contains valid
// external pointer or not.
- V8_INLINE bool ToAlignedPointerSafe(IsolateRoot isolate,
+ V8_INLINE bool ToAlignedPointerSafe(PtrComprCageBase isolate_root,
void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index a66ec312f6f..8853dabdbdb 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -187,8 +187,9 @@ MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
return value;
}
-MaybeObject FeedbackVector::Get(IsolateRoot isolate, FeedbackSlot slot) const {
- MaybeObject value = raw_feedback_slots(isolate, GetIndex(slot));
+MaybeObject FeedbackVector::Get(PtrComprCageBase cage_base,
+ FeedbackSlot slot) const {
+ MaybeObject value = raw_feedback_slots(cage_base, GetIndex(slot));
DCHECK(!IsOfLegacyType(value));
return value;
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index e6a850fe526..cc5e867f720 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -259,7 +259,7 @@ class FeedbackVector
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline MaybeObject Get(FeedbackSlot slot) const;
- inline MaybeObject Get(IsolateRoot isolate, FeedbackSlot slot) const;
+ inline MaybeObject Get(PtrComprCageBase cage_base, FeedbackSlot slot) const;
// Returns the feedback cell at |index| that is used to create the
// closure.
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index 09056cfd996..64bb4213013 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -61,13 +61,13 @@ int FieldIndex::GetLoadByFieldIndex() const {
}
FieldIndex FieldIndex::ForDescriptor(Map map, InternalIndex descriptor_index) {
- IsolateRoot isolate = GetIsolateForPtrCompr(map);
- return ForDescriptor(isolate, map, descriptor_index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(map);
+ return ForDescriptor(cage_base, map, descriptor_index);
}
-FieldIndex FieldIndex::ForDescriptor(IsolateRoot isolate, Map map,
+FieldIndex FieldIndex::ForDescriptor(PtrComprCageBase cage_base, Map map,
InternalIndex descriptor_index) {
- PropertyDetails details = map.instance_descriptors(isolate, kRelaxedLoad)
+ PropertyDetails details = map.instance_descriptors(cage_base, kRelaxedLoad)
.GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index 7819c8c06b1..7ccf0492692 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -31,7 +31,7 @@ class FieldIndex final {
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
static inline FieldIndex ForDescriptor(Map map,
InternalIndex descriptor_index);
- static inline FieldIndex ForDescriptor(IsolateRoot isolate, Map map,
+ static inline FieldIndex ForDescriptor(PtrComprCageBase cage_base, Map map,
InternalIndex descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index bfd7d9563bc..cca6d400705 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -70,13 +70,13 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object FixedArray::get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
}
-Object FixedArray::get(IsolateRoot isolate, int index) const {
+Object FixedArray::get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this,
OffsetOfElementAt(index));
}
@@ -88,7 +88,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || defined(_WIN64)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -96,7 +95,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -126,11 +124,12 @@ void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
}
Object FixedArray::get(int index, RelaxedLoadTag) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
}
-Object FixedArray::get(IsolateRoot isolate, int index, RelaxedLoadTag) const {
+Object FixedArray::get(PtrComprCageBase cage_base, int index,
+ RelaxedLoadTag) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
}
@@ -149,11 +148,12 @@ void FixedArray::set(int index, Smi value, RelaxedStoreTag tag) {
}
Object FixedArray::get(int index, AcquireLoadTag) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
}
-Object FixedArray::get(IsolateRoot isolate, int index, AcquireLoadTag) const {
+Object FixedArray::get(PtrComprCageBase cage_base, int index,
+ AcquireLoadTag) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
}
@@ -439,13 +439,13 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject WeakFixedArray::Get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return Get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return Get(cage_base, index);
}
-MaybeObject WeakFixedArray::Get(IsolateRoot isolate, int index) const {
+MaybeObject WeakFixedArray::Get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return objects(isolate, index);
+ return objects(cage_base, index);
}
void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
@@ -474,13 +474,13 @@ void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
}
MaybeObject WeakArrayList::Get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return Get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return Get(cage_base, index);
}
-MaybeObject WeakArrayList::Get(IsolateRoot isolate, int index) const {
+MaybeObject WeakArrayList::Get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
- return objects(isolate, index);
+ return objects(cage_base, index);
}
void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
@@ -529,8 +529,8 @@ Object ArrayList::Get(int index) const {
return FixedArray::cast(*this).get(kFirstIndex + index);
}
-Object ArrayList::Get(IsolateRoot isolate, int index) const {
- return FixedArray::cast(*this).get(isolate, kFirstIndex + index);
+Object ArrayList::Get(PtrComprCageBase cage_base, int index) const {
+ return FixedArray::cast(*this).get(cage_base, kFirstIndex + index);
}
ObjectSlot ArrayList::Slot(int index) {
@@ -654,8 +654,8 @@ Object TemplateList::get(int index) const {
return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
-Object TemplateList::get(IsolateRoot isolate, int index) const {
- return FixedArray::cast(*this).get(isolate, kFirstElementIndex + index);
+Object TemplateList::get(PtrComprCageBase cage_base, int index) const {
+ return FixedArray::cast(*this).get(cage_base, kFirstElementIndex + index);
}
void TemplateList::set(int index, Object value) {
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index cc9fcc61863..98c5d8d5b5c 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -101,7 +101,7 @@ class FixedArray
public:
// Setter and getter for elements.
inline Object get(int index) const;
- inline Object get(IsolateRoot isolate, int index) const;
+ inline Object get(PtrComprCageBase cage_base, int index) const;
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
@@ -113,14 +113,16 @@ class FixedArray
// Relaxed accessors.
inline Object get(int index, RelaxedLoadTag) const;
- inline Object get(IsolateRoot isolate, int index, RelaxedLoadTag) const;
+ inline Object get(PtrComprCageBase cage_base, int index,
+ RelaxedLoadTag) const;
inline void set(int index, Object value, RelaxedStoreTag,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void set(int index, Smi value, RelaxedStoreTag);
// Acquire/release accessors.
inline Object get(int index, AcquireLoadTag) const;
- inline Object get(IsolateRoot isolate, int index, AcquireLoadTag) const;
+ inline Object get(PtrComprCageBase cage_base, int index,
+ AcquireLoadTag) const;
inline void set(int index, Object value, ReleaseStoreTag,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void set(int index, Smi value, ReleaseStoreTag);
@@ -130,18 +132,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if defined(_WIN32) && !defined(_WIN64)
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#else
inline void set(int index, Smi value);
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -286,7 +277,7 @@ class WeakFixedArray
: public TorqueGeneratedWeakFixedArray<WeakFixedArray, HeapObject> {
public:
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(IsolateRoot isolate, int index) const;
+ inline MaybeObject Get(PtrComprCageBase cage_base, int index) const;
inline void Set(
int index, MaybeObject value,
@@ -361,7 +352,7 @@ class WeakArrayList
V8_EXPORT_PRIVATE void Compact(Isolate* isolate);
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(IsolateRoot isolate, int index) const;
+ inline MaybeObject Get(PtrComprCageBase cage_base, int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
@@ -461,7 +452,7 @@ class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
// storage capacity, i.e., length().
inline void SetLength(int length);
inline Object Get(int index) const;
- inline Object Get(IsolateRoot isolate, int index) const;
+ inline Object Get(PtrComprCageBase cage_base, int index) const;
inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
@@ -607,7 +598,7 @@ class TemplateList
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
inline Object get(int index) const;
- inline Object get(IsolateRoot isolate, int index) const;
+ inline Object get(PtrComprCageBase cage_base, int index) const;
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index cb3dac91eb8..150857f49a0 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -29,7 +29,7 @@ bool Foreign::IsNormalized(Object value) {
}
DEF_GETTER(Foreign, foreign_address, Address) {
- return ReadExternalPointerField(kForeignAddressOffset, isolate,
+ return ReadExternalPointerField(kForeignAddressOffset, cage_base,
kForeignForeignAddressTag);
}
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 08f30ad004e..27645058b30 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -139,7 +139,7 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(LocalIsolate* isolate,
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
-InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateRoot isolate,
+InternalIndex HashTable<Derived, Shape>::FindEntry(PtrComprCageBase cage_base,
ReadOnlyRoots roots, Key key,
int32_t hash) {
DisallowGarbageCollection no_gc;
@@ -151,7 +151,7 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateRoot isolate,
// EnsureCapacity will guarantee the hash table is never full.
for (InternalIndex entry = FirstProbe(hash, capacity);;
entry = NextProbe(entry, count++, capacity)) {
- Object element = KeyAt(isolate, entry);
+ Object element = KeyAt(cage_base, entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
if (element == undefined) return InternalIndex::NotFound();
@@ -177,24 +177,24 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, InternalIndex entry,
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::ToKey(IsolateRoot isolate, InternalIndex entry,
- Object* out_k) {
- Object k = KeyAt(isolate, entry);
- if (!IsKey(GetReadOnlyRoots(isolate), k)) return false;
+bool HashTable<Derived, Shape>::ToKey(PtrComprCageBase cage_base,
+ InternalIndex entry, Object* out_k) {
+ Object k = KeyAt(cage_base, entry);
+ if (!IsKey(GetReadOnlyRoots(cage_base), k)) return false;
*out_k = Shape::Unwrap(k);
return true;
}
template <typename Derived, typename Shape>
Object HashTable<Derived, Shape>::KeyAt(InternalIndex entry) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return KeyAt(isolate, entry);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return KeyAt(cage_base, entry);
}
template <typename Derived, typename Shape>
-Object HashTable<Derived, Shape>::KeyAt(IsolateRoot isolate,
+Object HashTable<Derived, Shape>::KeyAt(PtrComprCageBase cage_base,
InternalIndex entry) {
- return get(isolate, EntryToIndex(entry) + kEntryKeyIndex);
+ return get(cage_base, EntryToIndex(entry) + kEntryKeyIndex);
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 39d8e326f64..12ac020fb76 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -138,24 +138,25 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
void IterateElements(ObjectVisitor* visitor);
// Find entry for key otherwise return kNotFound.
- inline InternalIndex FindEntry(IsolateRoot isolate, ReadOnlyRoots roots,
- Key key, int32_t hash);
+ inline InternalIndex FindEntry(PtrComprCageBase cage_base,
+ ReadOnlyRoots roots, Key key, int32_t hash);
template <typename LocalIsolate>
inline InternalIndex FindEntry(LocalIsolate* isolate, Key key);
// Rehashes the table in-place.
- void Rehash(IsolateRoot isolate);
+ void Rehash(PtrComprCageBase cage_base);
// Returns whether k is a real key. The hole and undefined are not allowed as
// keys and can be used to indicate missing or deleted elements.
static inline bool IsKey(ReadOnlyRoots roots, Object k);
inline bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_k);
- inline bool ToKey(IsolateRoot isolate, InternalIndex entry, Object* out_k);
+ inline bool ToKey(PtrComprCageBase cage_base, InternalIndex entry,
+ Object* out_k);
// Returns the key at entry.
inline Object KeyAt(InternalIndex entry);
- inline Object KeyAt(IsolateRoot isolate, InternalIndex entry);
+ inline Object KeyAt(PtrComprCageBase cage_base, InternalIndex entry);
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
@@ -217,8 +218,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find the entry at which to insert element with the given key that
// has the given hash value.
- InternalIndex FindInsertionEntry(IsolateRoot isolate, ReadOnlyRoots roots,
- uint32_t hash);
+ InternalIndex FindInsertionEntry(PtrComprCageBase cage_base,
+ ReadOnlyRoots roots, uint32_t hash);
InternalIndex FindInsertionEntry(Isolate* isolate, uint32_t hash);
// Computes the capacity a table with the given capacity would need to have
@@ -231,7 +232,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
Isolate* isolate, Handle<Derived> table, int additionalCapacity = 0);
// Rehashes this hash-table into the new table.
- void Rehash(IsolateRoot isolate, Derived new_table);
+ void Rehash(PtrComprCageBase cage_base, Derived new_table);
inline void set_key(int index, Object value);
inline void set_key(int index, Object value, WriteBarrierMode mode);
@@ -322,7 +323,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) ObjectHashTableBase
// returned in case the key is not present.
Object Lookup(Handle<Object> key);
Object Lookup(Handle<Object> key, int32_t hash);
- Object Lookup(IsolateRoot isolate, Handle<Object> key, int32_t hash);
+ Object Lookup(PtrComprCageBase cage_base, Handle<Object> key, int32_t hash);
// Returns the value at entry.
Object ValueAt(InternalIndex entry);
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index e62356218d4..e0aea975371 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -70,12 +70,12 @@ class HeapObject : public Object {
// places where it might not be safe to access it.
inline ReadOnlyRoots GetReadOnlyRoots() const;
// This version is intended to be used for the isolate values produced by
- // i::GetIsolateForPtrCompr(HeapObject) function which may return nullptr.
- inline ReadOnlyRoots GetReadOnlyRoots(IsolateRoot isolate) const;
+ // i::GetPtrComprCageBase(HeapObject) function which may return nullptr.
+ inline ReadOnlyRoots GetReadOnlyRoots(PtrComprCageBase cage_base) const;
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(IsolateRoot isolate) const;
+ V8_INLINE bool Is##Type(PtrComprCageBase cage_base) const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
@@ -96,7 +96,7 @@ class HeapObject : public Object {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(IsolateRoot isolate) const;
+ V8_INLINE bool Is##Name(PtrComprCageBase cage_base) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index fc7e1829397..d9fca11e735 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -1472,13 +1472,19 @@ icu::LocaleMatcher BuildLocaleMatcher(
UErrorCode* status) {
icu::Locale default_locale =
icu::Locale::forLanguageTag(DefaultLocale(isolate), *status);
- DCHECK(U_SUCCESS(*status));
icu::LocaleMatcher::Builder builder;
+ if (U_FAILURE(*status)) {
+ return builder.build(*status);
+ }
builder.setDefaultLocale(&default_locale);
for (auto it = available_locales.begin(); it != available_locales.end();
++it) {
- builder.addSupportedLocale(
- icu::Locale::forLanguageTag(it->c_str(), *status));
+ *status = U_ZERO_ERROR;
+ icu::Locale l = icu::Locale::forLanguageTag(it->c_str(), *status);
+ // skip invalid locale such as no-NO-NY
+ if (U_SUCCESS(*status)) {
+ builder.addSupportedLocale(l);
+ }
}
return builder.build(*status);
@@ -1599,10 +1605,6 @@ Handle<JSArray> CreateArrayFromList(Isolate* isolate,
return array;
}
-// To mitigate the risk of bestfit locale matcher, we first check in without
-// turnning it on.
-static bool implement_bestfit = false;
-
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
// https://tc39.github.io/ecma402/#sec-supportedlocales
MaybeHandle<JSObject> SupportedLocales(
@@ -1611,28 +1613,24 @@ MaybeHandle<JSObject> SupportedLocales(
const std::vector<std::string>& requested_locales, Handle<Object> options) {
std::vector<std::string> supported_locales;
- // 2. Else, let matcher be "best fit".
- Intl::MatcherOption matcher = Intl::MatcherOption::kBestFit;
-
- // 1. If options is not undefined, then
- if (!options->IsUndefined(isolate)) {
- // 1. a. Let options be ? ToObject(options).
- Handle<JSReceiver> options_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
- Object::ToObject(isolate, options), JSObject);
+ // 1. Set options to ? CoerceOptionsToObject(options).
+ Handle<JSReceiver> options_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options_obj,
+ Intl::CoerceOptionsToObject(isolate, options, method), JSObject);
- // 1. b. Let matcher be ? GetOption(options, "localeMatcher", "string",
- // « "lookup", "best fit" », "best fit").
- Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options_obj, method);
- MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSObject>());
- matcher = maybe_locale_matcher.FromJust();
- }
+ // 2. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options_obj, method);
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSObject>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
// 3. If matcher is "best fit", then
// a. Let supportedLocales be BestFitSupportedLocales(availableLocales,
// requestedLocales).
- if (matcher == Intl::MatcherOption::kBestFit && implement_bestfit) {
+ if (matcher == Intl::MatcherOption::kBestFit &&
+ FLAG_harmony_intl_best_fit_matcher) {
supported_locales =
BestFitSupportedLocales(isolate, available_locales, requested_locales);
} else {
@@ -1889,7 +1887,8 @@ Maybe<Intl::ResolvedLocale> Intl::ResolveLocale(
const std::vector<std::string>& requested_locales, MatcherOption matcher,
const std::set<std::string>& relevant_extension_keys) {
std::string locale;
- if (matcher == Intl::MatcherOption::kBestFit && implement_bestfit) {
+ if (matcher == Intl::MatcherOption::kBestFit &&
+ FLAG_harmony_intl_best_fit_matcher) {
locale = BestFitMatcher(isolate, available_locales, requested_locales);
} else {
locale = LookupMatcher(isolate, available_locales, requested_locales);
@@ -2218,6 +2217,40 @@ MaybeHandle<String> Intl::FormattedToString(
return Intl::ToString(isolate, result);
}
+// ecma402/#sec-getoptionsobject
+MaybeHandle<JSReceiver> Intl::GetOptionsObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* service) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. If Type(options) is Object, then
+ if (options->IsJSReceiver()) {
+ // a. Return options.
+ return Handle<JSReceiver>::cast(options);
+ }
+ // 3. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidArgument),
+ JSReceiver);
+}
+
+// ecma402/#sec-coerceoptionstoobject
+MaybeHandle<JSReceiver> Intl::CoerceOptionsToObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* service) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. Return ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, options, service),
+ JSReceiver);
+ return Handle<JSReceiver>::cast(options);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 8530a278e63..d4b4feed686 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -330,6 +330,14 @@ class Intl {
static const std::set<std::string>& GetAvailableLocales();
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
+
+ // ecma402/#sec-getoptionsobject
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetOptionsObject(
+ Isolate* isolate, Handle<Object> options, const char* service);
+
+ // ecma402/#sec-coerceoptionstoobject
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
+ Isolate* isolate, Handle<Object> options, const char* service);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 12f8ef7796b..b4aa5e33b98 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -12,7 +12,6 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -44,7 +43,7 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
+ Address value = ReadExternalPointerField(kBackingStoreOffset, cage_base,
kArrayBufferBackingStoreTag);
return reinterpret_cast<void*>(value);
}
@@ -200,7 +199,7 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- return ReadExternalPointerField(kExternalPointerOffset, isolate,
+ return ReadExternalPointerField(kExternalPointerOffset, cage_base,
kTypedArrayExternalPointerTag);
}
@@ -214,9 +213,9 @@ void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
- IsolateRoot isolate) {
+ PtrComprCageBase cage_base) {
#ifdef V8_COMPRESS_POINTERS
- return isolate.address();
+ return cage_base.address();
#else
return 0;
#endif
@@ -322,7 +321,7 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
DEF_GETTER(JSDataView, data_pointer, void*) {
return reinterpret_cast<void*>(ReadExternalPointerField(
- kDataPointerOffset, isolate, kDataViewDataPointerTag));
+ kDataPointerOffset, cage_base, kDataViewDataPointerTag));
}
void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 0c259ddece7..3ec5e0d5177 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -300,7 +300,7 @@ class JSTypedArray
// as Tagged_t value and an |external_pointer| value.
// For full-pointer mode the compensation value is zero.
static inline Address ExternalPointerCompensationForOnHeapArray(
- IsolateRoot isolate);
+ PtrComprCageBase cage_base);
//
// Serializer/deserializer support.
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index b53a8919a5a..ed7ab4e003f 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -22,7 +22,7 @@ CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayIterator)
DEF_GETTER(JSArray, length, Object) {
- return TaggedField<Object, kLengthOffset>::load(isolate, *this);
+ return TaggedField<Object, kLengthOffset>::load(cage_base, *this);
}
void JSArray::set_length(Object value, WriteBarrierMode mode) {
@@ -31,8 +31,8 @@ void JSArray::set_length(Object value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
}
-Object JSArray::length(IsolateRoot isolate, RelaxedLoadTag tag) const {
- return TaggedField<Object, kLengthOffset>::Relaxed_Load(isolate, *this);
+Object JSArray::length(PtrComprCageBase cage_base, RelaxedLoadTag tag) const {
+ return TaggedField<Object, kLengthOffset>::Relaxed_Load(cage_base, *this);
}
void JSArray::set_length(Smi length) {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 5a7da797cc6..a8b336d2be9 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -32,7 +32,7 @@ class JSArray : public JSObject {
// acquire/release semantics ever become necessary, the default setter should
// be reverted to non-atomic behavior, and setters with explicit tags
// introduced and used when required.
- Object length(IsolateRoot isolate, AcquireLoadTag tag) const = delete;
+ Object length(PtrComprCageBase cage_base, AcquireLoadTag tag) const = delete;
void set_length(Object value, ReleaseStoreTag tag,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER) = delete;
@@ -126,8 +126,15 @@ class JSArray : public JSObject {
// Max. number of elements being copied in Array builtins.
static const int kMaxCopyElements = 100;
+ // Valid array indices range from +0 <= i < 2^32 - 1 (kMaxUInt32).
+ static constexpr uint32_t kMaxArrayLength = JSObject::kMaxElementCount;
+ static constexpr uint32_t kMaxArrayIndex = JSObject::kMaxElementIndex;
+ STATIC_ASSERT(kMaxArrayLength == kMaxUInt32);
+ STATIC_ASSERT(kMaxArrayIndex == kMaxUInt32 - 1);
+
// This constant is somewhat arbitrary. Any large enough value would work.
- static const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
+ static constexpr uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
+ STATIC_ASSERT(kMaxFastArrayLength <= kMaxArrayLength);
// Min. stack size for detecting an Array.prototype.join() call cycle.
static const uint32_t kMinJoinStackSize = 2;
@@ -137,9 +144,6 @@ class JSArray : public JSObject {
AllocationMemento::kSize) >>
kDoubleSizeLog2;
- // Valid array indices range from +0 <= i < 2^32 - 1 (kMaxUInt32).
- static const uint32_t kMaxArrayIndex = kMaxUInt32 - 1;
-
OBJECT_CONSTRUCTORS(JSArray, JSObject);
};
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index e51317cb5a2..be3541f29d1 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -283,20 +283,11 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 2. If options is undefined, then
- if (options_obj->IsUndefined(isolate)) {
- // 2. a. Let options be ObjectCreate(null).
- options_obj = isolate->factory()->NewJSObjectWithNullProto();
- } else {
- // 3. Else
- // 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
- Object::ToObject(isolate, options_obj, service),
- JSCollator);
- }
-
- // At this point, options_obj can either be a JSObject or a JSProxy only.
- Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+ // 2. Set options to ? CoerceOptionsToObject(options).
+ Handle<JSReceiver> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ Intl::CoerceOptionsToObject(isolate, options_obj, service), JSCollator);
// 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
// "search" », "sort").
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 42c9c6f31ca..89ac294a0c8 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -1955,7 +1955,7 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- double date_value) {
+ double date_value, bool output_source) {
Factory* factory = isolate->factory();
icu::SimpleDateFormat* format =
date_time_format->icu_simple_date_format().raw();
@@ -1986,16 +1986,30 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
isolate, substring,
Intl::ToString(isolate, formatted, previous_end_pos, begin_pos),
JSArray);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(-1, isolate), substring);
+ if (output_source) {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring,
+ isolate->factory()->source_string(),
+ isolate->factory()->shared_string());
+ } else {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
+ }
++index;
}
ASSIGN_RETURN_ON_EXCEPTION(
isolate, substring,
Intl::ToString(isolate, formatted, begin_pos, end_pos), JSArray);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(fp.getField(), isolate),
- substring);
+ if (output_source) {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(fp.getField(), isolate),
+ substring, isolate->factory()->source_string(),
+ isolate->factory()->shared_string());
+ } else {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(fp.getField(), isolate),
+ substring);
+ }
previous_end_pos = end_pos;
++index;
}
@@ -2003,8 +2017,15 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
ASSIGN_RETURN_ON_EXCEPTION(
isolate, substring,
Intl::ToString(isolate, formatted, previous_end_pos, length), JSArray);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(-1, isolate), substring);
+ if (output_source) {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring,
+ isolate->factory()->source_string(),
+ isolate->factory()->shared_string());
+ } else {
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
+ }
}
JSObject::ValidateElements(*result);
return result;
@@ -2092,10 +2113,29 @@ Maybe<bool> AddPartForFormatRange(Isolate* isolate, Handle<JSArray> array,
return Just(true);
}
+MaybeHandle<String> FormattedToString(Isolate* isolate,
+ const icu::FormattedValue& formatted,
+ bool* outputRange) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+ }
+ *outputRange = false;
+ icu::ConstrainedFieldPosition cfpos;
+ while (formatted.nextPosition(cfpos, status)) {
+ if (cfpos.getCategory() == UFIELD_CATEGORY_DATE_INTERVAL_SPAN) {
+ *outputRange = true;
+ break;
+ }
+ }
+ return Intl::ToString(isolate, result);
+}
+
// A helper function to convert the FormattedDateInterval to a
// MaybeHandle<JSArray> for the implementation of formatRangeToParts.
MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
- Isolate* isolate, const icu::FormattedValue& formatted) {
+ Isolate* isolate, const icu::FormattedValue& formatted, bool* outputRange) {
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString result = formatted.toString(status);
@@ -2105,6 +2145,7 @@ MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
int index = 0;
int32_t previous_end_pos = 0;
SourceTracker tracker;
+ *outputRange = false;
while (formatted.nextPosition(cfpos, status)) {
int32_t category = cfpos.getCategory();
int32_t field = cfpos.getField();
@@ -2113,6 +2154,7 @@ MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
if (category == UFIELD_CATEGORY_DATE_INTERVAL_SPAN) {
DCHECK_LE(field, 2);
+ *outputRange = true;
tracker.Add(field, start, limit);
} else {
DCHECK(category == UFIELD_CATEGORY_DATE);
@@ -2154,7 +2196,9 @@ template <typename T>
MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
+ MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&,
+ bool*),
+ bool* outputRange) {
// Track newer feature formateRange and formatRangeToParts
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
@@ -2197,7 +2241,7 @@ MaybeHandle<T> FormatRangeCommon(
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
}
- return formatToResult(isolate, formatted);
+ return formatToResult(isolate, formatted, outputRange);
}
} // namespace
@@ -2205,15 +2249,27 @@ MaybeHandle<T> FormatRangeCommon(
MaybeHandle<String> JSDateTimeFormat::FormatRange(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y) {
- return FormatRangeCommon<String>(isolate, date_time_format, x, y,
- Intl::FormattedToString);
+ bool outputRange = true;
+ MaybeHandle<String> ret = FormatRangeCommon<String>(
+ isolate, date_time_format, x, y, FormattedToString, &outputRange);
+ if (outputRange) {
+ return ret;
+ }
+ return FormatDateTime(isolate,
+ *(date_time_format->icu_simple_date_format().raw()), x);
}
MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y) {
- return FormatRangeCommon<JSArray>(isolate, date_time_format, x, y,
- FormattedDateIntervalToJSArray);
+ bool outputRange = true;
+ MaybeHandle<JSArray> ret =
+ FormatRangeCommon<JSArray>(isolate, date_time_format, x, y,
+ FormattedDateIntervalToJSArray, &outputRange);
+ if (outputRange) {
+ return ret;
+ }
+ return JSDateTimeFormat::FormatToParts(isolate, date_time_format, x, true);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index de8ccfd069c..335d80a2dbc 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -60,7 +60,7 @@ class JSDateTimeFormat
// ecma402/#sec-Intl.DateTimeFormat.prototype.formatToParts
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatToParts(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- double date_value);
+ double date_value, bool output_source);
// ecma402/#sec-intl.datetimeformat.prototype.formatRange
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatRange(
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index 305aadb08ec..f95d90fcda6 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -506,10 +506,10 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 4. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, input_options),
- JSDisplayNames);
+ // 4. Let options be ? GetOptionsObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
+ JSDisplayNames);
// Note: No need to create a record. It's not observable.
// 5. Let opt be a new Record.
@@ -519,7 +519,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 7. Let matcher be ? GetOption(options, "localeMatcher", "string", «
// "lookup", "best fit" », "best fit").
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.DisplayNames");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDisplayNames>());
// 8. Set opt.[[localeMatcher]] to matcher.
@@ -579,8 +579,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 10. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", "Intl.DisplayNames",
- {"long", "short", "narrow"},
+ isolate, options, "style", service, {"long", "short", "narrow"},
{Style::kLong, Style::kShort, Style::kNarrow}, Style::kLong);
MAYBE_RETURN(maybe_style, MaybeHandle<JSDisplayNames>());
Style style_enum = maybe_style.FromJust();
@@ -593,7 +592,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
Maybe<Type> maybe_type =
FLAG_harmony_intl_displaynames_date_types
? Intl::GetStringOption<Type>(
- isolate, options, "type", "Intl.DisplayNames",
+ isolate, options, "type", service,
{"language", "region", "script", "currency", "weekday", "month",
"quarter", "dayPeriod", "dateTimeField"},
{
@@ -609,7 +608,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
},
Type::kUndefined)
: Intl::GetStringOption<Type>(
- isolate, options, "type", "Intl.DisplayNames",
+ isolate, options, "type", service,
{"language", "region", "script", "currency"},
{
Type::kLanguage,
@@ -632,7 +631,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 15. Let fallback be ? GetOption(options, "fallback", "string",
// « "code", "none" », "code").
Maybe<Fallback> maybe_fallback = Intl::GetStringOption<Fallback>(
- isolate, options, "fallback", "Intl.DisplayNames", {"code", "none"},
+ isolate, options, "fallback", service, {"code", "none"},
{Fallback::kCode, Fallback::kNone}, Fallback::kCode);
MAYBE_RETURN(maybe_fallback, MaybeHandle<JSDisplayNames>());
Fallback fallback_enum = maybe_fallback.FromJust();
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 969e756156d..5c8cb5b644a 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -5,13 +5,17 @@
#ifndef V8_OBJECTS_JS_FUNCTION_INL_H_
#define V8_OBJECTS_JS_FUNCTION_INL_H_
+#include "src/objects/js-function.h"
+
+// Include other inline headers *after* including js-function.h, such that e.g.
+// the definition of JSFunction is available (and this comment prevents
+// clang-format from merging that include into the following ones).
#include "src/codegen/compiler.h"
#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
#include "src/objects/feedback-cell-inl.h"
-#include "src/objects/js-function.h"
#include "src/strings/string-builder-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -196,73 +200,72 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(HeapObject value) {
+void JSFunction::set_context(HeapObject value, WriteBarrierMode mode) {
DCHECK(value.IsUndefined() || value.IsContext());
WRITE_FIELD(*this, kContextOffset, value);
- WRITE_BARRIER(*this, kContextOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kContextOffset, value, mode);
}
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, HeapObject,
kPrototypeOrInitialMapOffset, map().has_prototype_slot())
DEF_GETTER(JSFunction, has_prototype_slot, bool) {
- return map(isolate).has_prototype_slot();
+ return map(cage_base).has_prototype_slot();
}
DEF_GETTER(JSFunction, initial_map, Map) {
- return Map::cast(prototype_or_initial_map(isolate));
+ return Map::cast(prototype_or_initial_map(cage_base));
}
DEF_GETTER(JSFunction, has_initial_map, bool) {
- DCHECK(has_prototype_slot(isolate));
- return prototype_or_initial_map(isolate).IsMap(isolate);
+ DCHECK(has_prototype_slot(cage_base));
+ return prototype_or_initial_map(cage_base).IsMap(cage_base);
}
DEF_GETTER(JSFunction, has_instance_prototype, bool) {
- DCHECK(has_prototype_slot(isolate));
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- return has_initial_map(isolate) ||
- !prototype_or_initial_map(isolate).IsTheHole(
- GetReadOnlyRoots(isolate));
+ DCHECK(has_prototype_slot(cage_base));
+ return has_initial_map(cage_base) ||
+ !prototype_or_initial_map(cage_base).IsTheHole(
+ GetReadOnlyRoots(cage_base));
}
DEF_GETTER(JSFunction, has_prototype, bool) {
- DCHECK(has_prototype_slot(isolate));
- return map(isolate).has_non_instance_prototype() ||
- has_instance_prototype(isolate);
+ DCHECK(has_prototype_slot(cage_base));
+ return map(cage_base).has_non_instance_prototype() ||
+ has_instance_prototype(cage_base);
}
DEF_GETTER(JSFunction, has_prototype_property, bool) {
- return (has_prototype_slot(isolate) && IsConstructor(isolate)) ||
- IsGeneratorFunction(shared(isolate).kind());
+ return (has_prototype_slot(cage_base) && IsConstructor(cage_base)) ||
+ IsGeneratorFunction(shared(cage_base).kind());
}
DEF_GETTER(JSFunction, PrototypeRequiresRuntimeLookup, bool) {
- return !has_prototype_property(isolate) ||
- map(isolate).has_non_instance_prototype();
+ return !has_prototype_property(cage_base) ||
+ map(cage_base).has_non_instance_prototype();
}
DEF_GETTER(JSFunction, instance_prototype, HeapObject) {
- DCHECK(has_instance_prototype(isolate));
- if (has_initial_map(isolate)) return initial_map(isolate).prototype(isolate);
+ DCHECK(has_instance_prototype(cage_base));
+ if (has_initial_map(cage_base))
+ return initial_map(cage_base).prototype(cage_base);
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
- return HeapObject::cast(prototype_or_initial_map(isolate));
+ return HeapObject::cast(prototype_or_initial_map(cage_base));
}
DEF_GETTER(JSFunction, prototype, Object) {
- DCHECK(has_prototype(isolate));
+ DCHECK(has_prototype(cage_base));
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
- if (map(isolate).has_non_instance_prototype()) {
- Object prototype = map(isolate).GetConstructor(isolate);
+ if (map(cage_base).has_non_instance_prototype()) {
+ Object prototype = map(cage_base).GetConstructor(cage_base);
// The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype.IsMap(isolate));
- DCHECK(!prototype.IsFunctionTemplateInfo(isolate));
+ DCHECK(!prototype.IsMap(cage_base));
+ DCHECK(!prototype.IsFunctionTemplateInfo(cage_base));
return prototype;
}
- return instance_prototype(isolate);
+ return instance_prototype(cage_base);
}
bool JSFunction::is_compiled() const {
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 3fef9b665fc..35010be838e 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -283,7 +283,9 @@ void JSFunction::EnsureClosureFeedbackCellArray(
Isolate* const isolate = function->GetIsolate();
DCHECK(function->shared().is_compiled());
DCHECK(function->shared().HasFeedbackMetadata());
+#if V8_ENABLE_WEBASSEMBLY
if (function->shared().HasAsmWasmData()) return;
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
@@ -333,7 +335,9 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
DCHECK(is_compiled_scope->is_compiled());
DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_feedback_vector()) return;
+#if V8_ENABLE_WEBASSEMBLY
if (function->shared().HasAsmWasmData()) return;
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
@@ -410,7 +414,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
} else {
Handle<Map> new_map =
Map::Copy(isolate, initial_map, "SetInstancePrototype");
- JSFunction::SetInitialMap(function, new_map, value);
+ JSFunction::SetInitialMap(isolate, function, new_map, value);
// If the function is used as the global Array function, cache the
// updated initial maps (and transitioned versions) in the native context.
@@ -459,9 +463,9 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Map> new_map =
Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
- JSObject::MigrateToMap(isolate, function, new_map);
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
+ JSObject::MigrateToMap(isolate, function, new_map);
FunctionKind kind = function->shared().kind();
Handle<Context> native_context(function->context().native_context(),
@@ -482,14 +486,19 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
SetInstancePrototype(isolate, function, construct_prototype);
}
-void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
- Handle<HeapObject> prototype) {
- Isolate* isolate = function->GetIsolate();
+void JSFunction::SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
+ Handle<Map> map, Handle<HeapObject> prototype) {
+ SetInitialMap(isolate, function, map, prototype, function);
+}
+
+void JSFunction::SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
+ Handle<Map> map, Handle<HeapObject> prototype,
+ Handle<JSFunction> constructor) {
if (map->prototype() != *prototype) {
Map::SetPrototype(isolate, map, prototype);
}
+ map->SetConstructor(*constructor);
function->set_prototype_or_initial_map(*map);
- map->SetConstructor(*function);
if (FLAG_log_maps) {
LOG(isolate, MapEvent("InitialMap", Handle<Map>(), map, "",
SharedFunctionInfo::DebugName(
@@ -543,7 +552,7 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// Finally link initial map and constructor function.
DCHECK(prototype->IsJSReceiver());
- JSFunction::SetInitialMap(function, map, prototype);
+ JSFunction::SetInitialMap(isolate, function, map, prototype);
map->StartInobjectSlackTracking();
}
@@ -609,12 +618,14 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
+#if V8_ENABLE_WEBASSEMBLY
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_INSTANCE_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
return true;
case BIGINT_TYPE:
@@ -713,9 +724,8 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
in_object_properties, unused_property_fields);
map->set_new_target_is_base(false);
Handle<HeapObject> prototype(new_target->instance_prototype(), isolate);
- JSFunction::SetInitialMap(new_target, map, prototype);
+ JSFunction::SetInitialMap(isolate, new_target, map, prototype, constructor);
DCHECK(new_target->instance_prototype().IsJSReceiver());
- map->SetConstructor(*constructor);
map->set_construction_counter(Map::kNoSlackTracking);
map->StartInobjectSlackTracking();
return true;
@@ -819,7 +829,7 @@ bool UseFastFunctionNameLookup(Isolate* isolate, Map map) {
DCHECK(!map.is_dictionary_map());
HeapObject value;
ReadOnlyRoots roots(isolate);
- auto descriptors = map.instance_descriptors(kRelaxedLoad);
+ auto descriptors = map.instance_descriptors(isolate);
InternalIndex kNameIndex{JSFunction::kNameDescriptorIndex};
if (descriptors.GetKey(kNameIndex) != roots.name_string() ||
!descriptors.GetValue(kNameIndex)
@@ -922,6 +932,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
// If this function was compiled from asm.js, use the recorded offset
// information.
+#if V8_ENABLE_WEBASSEMBLY
if (shared_info->HasWasmExportedFunctionData()) {
Handle<WasmExportedFunctionData> function_data(
shared_info->wasm_exported_function_data(), isolate);
@@ -936,6 +947,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
offsets.second);
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (shared_info->function_token_position() == kNoSourcePosition) {
// If the function token position isn't valid, return [native code] to
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 4583c3e868e..76af98efe72 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -70,7 +70,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// [context]: The context for this function.
inline Context context();
inline bool has_context() const;
- inline void set_context(HeapObject context);
+ inline void set_context(HeapObject context,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -221,8 +222,11 @@ class JSFunction : public JSFunctionOrBoundFunction {
// The initial map for an object created by this constructor.
DECL_GETTER(initial_map, Map)
- static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
- Handle<HeapObject> prototype);
+ static void SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
+ Handle<Map> map, Handle<HeapObject> prototype);
+ static void SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
+ Handle<Map> map, Handle<HeapObject> prototype,
+ Handle<JSFunction> constructor);
DECL_GETTER(has_initial_map, bool)
V8_EXPORT_PRIVATE static void EnsureHasInitialMap(
Handle<JSFunction> function);
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 54f88221f7b..b2ff6e928ca 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -59,7 +59,6 @@ UListFormatterType GetIcuType(JSListFormat::Type type) {
MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
Handle<Object> locales,
Handle<Object> input_options) {
- Handle<JSReceiver> options;
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -67,17 +66,12 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 4. If options is undefined, then
- if (input_options->IsUndefined(isolate)) {
- // 4. a. Let options be ObjectCreate(null).
- options = isolate->factory()->NewJSObjectWithNullProto();
- // 5. Else
- } else {
- // 5. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, input_options),
- JSListFormat);
- }
+ Handle<JSReceiver> options;
+ const char* service = "Intl.ListFormat";
+ // 4. Let options be GetOptionsObject(_options_).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
+ JSListFormat);
// Note: No need to create a record. It's not observable.
// 6. Let opt be a new Record.
@@ -85,7 +79,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 7. Let matcher be ? GetOption(options, "localeMatcher", "string", «
// "lookup", "best fit" », "best fit").
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.ListFormat");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSListFormat>());
// 8. Set opt.[[localeMatcher]] to matcher.
@@ -107,8 +101,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
- isolate, options, "type", "Intl.ListFormat",
- {"conjunction", "disjunction", "unit"},
+ isolate, options, "type", service, {"conjunction", "disjunction", "unit"},
{Type::CONJUNCTION, Type::DISJUNCTION, Type::UNIT}, Type::CONJUNCTION);
MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
Type type_enum = maybe_type.FromJust();
@@ -116,7 +109,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 14. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", "Intl.ListFormat", {"long", "short", "narrow"},
+ isolate, options, "style", service, {"long", "short", "narrow"},
{Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
Style style_enum = maybe_style.FromJust();
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 30eb612da92..cc5b77a005f 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -829,20 +829,12 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 2. If options is undefined, then
- if (options_obj->IsUndefined(isolate)) {
- // 2. a. Let options be ObjectCreate(null).
- options_obj = isolate->factory()->NewJSObjectWithNullProto();
- } else {
- // 3. Else
- // 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
- Object::ToObject(isolate, options_obj, service),
- JSNumberFormat);
- }
-
- // At this point, options_obj can either be a JSObject or a JSProxy only.
- Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+ // 2. Set options to ? CoerceOptionsToObject(options).
+ Handle<JSReceiver> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ JSNumberFormat);
// 4. Let opt be a new Record.
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index e2f6becc5de..cbbbc9fc9ec 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -21,6 +21,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -51,11 +52,12 @@ CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSReceiver)
DEF_GETTER(JSObject, elements, FixedArrayBase) {
- return TaggedField<FixedArrayBase, kElementsOffset>::load(isolate, *this);
+ return TaggedField<FixedArrayBase, kElementsOffset>::load(cage_base, *this);
}
-FixedArrayBase JSObject::elements(IsolateRoot isolate, RelaxedLoadTag) const {
- return TaggedField<FixedArrayBase, kElementsOffset>::Relaxed_Load(isolate,
+FixedArrayBase JSObject::elements(PtrComprCageBase cage_base,
+ RelaxedLoadTag) const {
+ return TaggedField<FixedArrayBase, kElementsOffset>::Relaxed_Load(cage_base,
*this);
}
@@ -248,11 +250,11 @@ void JSObject::initialize_elements() {
}
DEF_GETTER(JSObject, GetIndexedInterceptor, InterceptorInfo) {
- return map(isolate).GetIndexedInterceptor(isolate);
+ return map(cage_base).GetIndexedInterceptor(cage_base);
}
DEF_GETTER(JSObject, GetNamedInterceptor, InterceptorInfo) {
- return map(isolate).GetNamedInterceptor(isolate);
+ return map(cage_base).GetNamedInterceptor(cage_base);
}
// static
@@ -321,16 +323,17 @@ void JSObject::SetEmbedderField(int index, Smi value) {
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object JSObject::RawFastPropertyAt(FieldIndex index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return RawFastPropertyAt(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return RawFastPropertyAt(cage_base, index);
}
-Object JSObject::RawFastPropertyAt(IsolateRoot isolate,
+Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base,
FieldIndex index) const {
if (index.is_inobject()) {
- return TaggedField<Object>::load(isolate, *this, index.offset());
+ return TaggedField<Object>::load(cage_base, *this, index.offset());
} else {
- return property_array(isolate).get(isolate, index.outobject_array_index());
+ return property_array(cage_base).get(cage_base,
+ index.outobject_array_index());
}
}
@@ -424,7 +427,7 @@ ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
- return TaggedField<Object, kNativeContextOffset>::load(isolate, *this);
+ return TaggedField<Object, kNativeContextOffset>::load(cage_base, *this);
}
bool JSMessageObject::DidEnsureSourcePositionsAvailable() const {
@@ -460,119 +463,119 @@ SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
SMI_ACCESSORS(JSMessageObject, raw_type, kMessageTypeOffset)
DEF_GETTER(JSObject, GetElementsKind, ElementsKind) {
- ElementsKind kind = map(isolate).elements_kind();
+ ElementsKind kind = map(cage_base).elements_kind();
#if VERIFY_HEAP && DEBUG
FixedArrayBase fixed_array = FixedArrayBase::unchecked_cast(
- TaggedField<HeapObject, kElementsOffset>::load(isolate, *this));
+ TaggedField<HeapObject, kElementsOffset>::load(cage_base, *this));
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine(isolate)) {
- Map map = fixed_array.map(isolate);
+ if (ElementsAreSafeToExamine(cage_base)) {
+ Map map = fixed_array.map(cage_base);
if (IsSmiOrObjectElementsKind(kind)) {
- DCHECK(map == GetReadOnlyRoots(isolate).fixed_array_map() ||
- map == GetReadOnlyRoots(isolate).fixed_cow_array_map());
+ DCHECK(map == GetReadOnlyRoots(cage_base).fixed_array_map() ||
+ map == GetReadOnlyRoots(cage_base).fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array.IsFixedDoubleArray(isolate) ||
- fixed_array == GetReadOnlyRoots(isolate).empty_fixed_array());
+ DCHECK(fixed_array.IsFixedDoubleArray(cage_base) ||
+ fixed_array == GetReadOnlyRoots(cage_base).empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array.IsFixedArray(isolate));
- DCHECK(fixed_array.IsNumberDictionary(isolate));
+ DCHECK(fixed_array.IsFixedArray(cage_base));
+ DCHECK(fixed_array.IsNumberDictionary(cage_base));
} else {
DCHECK(kind > DICTIONARY_ELEMENTS ||
IsAnyNonextensibleElementsKind(kind));
}
DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- elements(isolate).IsSloppyArgumentsElements());
+ elements(cage_base).IsSloppyArgumentsElements());
}
#endif
return kind;
}
DEF_GETTER(JSObject, GetElementsAccessor, ElementsAccessor*) {
- return ElementsAccessor::ForKind(GetElementsKind(isolate));
+ return ElementsAccessor::ForKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasObjectElements, bool) {
- return IsObjectElementsKind(GetElementsKind(isolate));
+ return IsObjectElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasSmiElements, bool) {
- return IsSmiElementsKind(GetElementsKind(isolate));
+ return IsSmiElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasSmiOrObjectElements, bool) {
- return IsSmiOrObjectElementsKind(GetElementsKind(isolate));
+ return IsSmiOrObjectElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasDoubleElements, bool) {
- return IsDoubleElementsKind(GetElementsKind(isolate));
+ return IsDoubleElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasHoleyElements, bool) {
- return IsHoleyElementsKind(GetElementsKind(isolate));
+ return IsHoleyElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasFastElements, bool) {
- return IsFastElementsKind(GetElementsKind(isolate));
+ return IsFastElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasFastPackedElements, bool) {
- return IsFastPackedElementsKind(GetElementsKind(isolate));
+ return IsFastPackedElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasDictionaryElements, bool) {
- return IsDictionaryElementsKind(GetElementsKind(isolate));
+ return IsDictionaryElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasPackedElements, bool) {
- return GetElementsKind(isolate) == PACKED_ELEMENTS;
+ return GetElementsKind(cage_base) == PACKED_ELEMENTS;
}
DEF_GETTER(JSObject, HasAnyNonextensibleElements, bool) {
- return IsAnyNonextensibleElementsKind(GetElementsKind(isolate));
+ return IsAnyNonextensibleElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasSealedElements, bool) {
- return IsSealedElementsKind(GetElementsKind(isolate));
+ return IsSealedElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasNonextensibleElements, bool) {
- return IsNonextensibleElementsKind(GetElementsKind(isolate));
+ return IsNonextensibleElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasFastArgumentsElements, bool) {
- return IsFastArgumentsElementsKind(GetElementsKind(isolate));
+ return IsFastArgumentsElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasSlowArgumentsElements, bool) {
- return IsSlowArgumentsElementsKind(GetElementsKind(isolate));
+ return IsSlowArgumentsElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasSloppyArgumentsElements, bool) {
- return IsSloppyArgumentsElementsKind(GetElementsKind(isolate));
+ return IsSloppyArgumentsElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasStringWrapperElements, bool) {
- return IsStringWrapperElementsKind(GetElementsKind(isolate));
+ return IsStringWrapperElementsKind(GetElementsKind(cage_base));
}
DEF_GETTER(JSObject, HasFastStringWrapperElements, bool) {
- return GetElementsKind(isolate) == FAST_STRING_WRAPPER_ELEMENTS;
+ return GetElementsKind(cage_base) == FAST_STRING_WRAPPER_ELEMENTS;
}
DEF_GETTER(JSObject, HasSlowStringWrapperElements, bool) {
- return GetElementsKind(isolate) == SLOW_STRING_WRAPPER_ELEMENTS;
+ return GetElementsKind(cage_base) == SLOW_STRING_WRAPPER_ELEMENTS;
}
DEF_GETTER(JSObject, HasTypedArrayElements, bool) {
- DCHECK(!elements(isolate).is_null());
- return map(isolate).has_typed_array_elements();
+ DCHECK(!elements(cage_base).is_null());
+ return map(cage_base).has_typed_array_elements();
}
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- DEF_GETTER(JSObject, HasFixed##Type##Elements, bool) { \
- return map(isolate).elements_kind() == TYPE##_ELEMENTS; \
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
+ DEF_GETTER(JSObject, HasFixed##Type##Elements, bool) { \
+ return map(cage_base).elements_kind() == TYPE##_ELEMENTS; \
}
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
@@ -580,21 +583,21 @@ TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
DEF_GETTER(JSObject, HasNamedInterceptor, bool) {
- return map(isolate).has_named_interceptor();
+ return map(cage_base).has_named_interceptor();
}
DEF_GETTER(JSObject, HasIndexedInterceptor, bool) {
- return map(isolate).has_indexed_interceptor();
+ return map(cage_base).has_indexed_interceptor();
}
RELEASE_ACQUIRE_ACCESSORS_CHECKED2(JSGlobalObject, global_dictionary,
GlobalDictionary, kPropertiesOrHashOffset,
- !HasFastProperties(isolate), true)
+ !HasFastProperties(cage_base), true)
DEF_GETTER(JSObject, element_dictionary, NumberDictionary) {
- DCHECK(HasDictionaryElements(isolate) ||
- HasSlowStringWrapperElements(isolate));
- return NumberDictionary::cast(elements(isolate));
+ DCHECK(HasDictionaryElements(cage_base) ||
+ HasSlowStringWrapperElements(cage_base));
+ return NumberDictionary::cast(elements(cage_base));
}
void JSReceiver::initialize_properties(Isolate* isolate) {
@@ -603,9 +606,9 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
DCHECK(!ObjectInYoungGeneration(roots.empty_ordered_property_dictionary()));
if (map(isolate).is_dictionary_map()) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
- roots.empty_ordered_property_dictionary());
+ roots.empty_swiss_property_dictionary());
} else {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
@@ -616,51 +619,45 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
}
DEF_GETTER(JSReceiver, HasFastProperties, bool) {
- DCHECK(raw_properties_or_hash(isolate).IsSmi() ||
- ((raw_properties_or_hash(isolate).IsGlobalDictionary(isolate) ||
- raw_properties_or_hash(isolate).IsNameDictionary(isolate) ||
- raw_properties_or_hash(isolate).IsOrderedNameDictionary(isolate)) ==
- map(isolate).is_dictionary_map()));
- return !map(isolate).is_dictionary_map();
+ DCHECK(raw_properties_or_hash(cage_base).IsSmi() ||
+ ((raw_properties_or_hash(cage_base).IsGlobalDictionary(cage_base) ||
+ raw_properties_or_hash(cage_base).IsNameDictionary(cage_base) ||
+ raw_properties_or_hash(cage_base).IsSwissNameDictionary(
+ cage_base)) == map(cage_base).is_dictionary_map()));
+ return !map(cage_base).is_dictionary_map();
}
DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
- DCHECK(!IsJSGlobalObject(isolate));
- DCHECK(!HasFastProperties(isolate));
- DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
+ DCHECK(!IsJSGlobalObject(cage_base));
+ DCHECK(!HasFastProperties(cage_base));
+ DCHECK(!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL);
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- Object prop = raw_properties_or_hash(isolate);
+ Object prop = raw_properties_or_hash(cage_base);
if (prop.IsSmi()) {
- return GetReadOnlyRoots(isolate).empty_property_dictionary();
+ return GetReadOnlyRoots(cage_base).empty_property_dictionary();
}
return NameDictionary::cast(prop);
}
-DEF_GETTER(JSReceiver, property_dictionary_ordered, OrderedNameDictionary) {
- DCHECK(!IsJSGlobalObject(isolate));
- DCHECK(!HasFastProperties(isolate));
- DCHECK(V8_DICT_MODE_PROTOTYPES_BOOL);
+DEF_GETTER(JSReceiver, property_dictionary_swiss, SwissNameDictionary) {
+ DCHECK(!IsJSGlobalObject(cage_base));
+ DCHECK(!HasFastProperties(cage_base));
+ DCHECK(V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL);
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- Object prop = raw_properties_or_hash(isolate);
+ Object prop = raw_properties_or_hash(cage_base);
if (prop.IsSmi()) {
- return GetReadOnlyRoots(isolate).empty_ordered_property_dictionary();
+ return GetReadOnlyRoots(cage_base).empty_swiss_property_dictionary();
}
- return OrderedNameDictionary::cast(prop);
+ return SwissNameDictionary::cast(prop);
}
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
DEF_GETTER(JSReceiver, property_array, PropertyArray) {
- DCHECK(HasFastProperties(isolate));
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- Object prop = raw_properties_or_hash(isolate);
- if (prop.IsSmi() || prop == GetReadOnlyRoots(isolate).empty_fixed_array()) {
- return GetReadOnlyRoots(isolate).empty_property_array();
+ DCHECK(HasFastProperties(cage_base));
+ Object prop = raw_properties_or_hash(cage_base);
+ if (prop.IsSmi() || prop == GetReadOnlyRoots(cage_base).empty_fixed_array()) {
+ return GetReadOnlyRoots(cage_base).empty_property_array();
}
return PropertyArray::cast(prop);
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 91006bcf2d6..01df8e1524f 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -7,7 +7,6 @@
#include "src/api/api-arguments-inl.h"
#include "src/common/globals.h"
#include "src/date/date.h"
-#include "src/debug/debug-wasm-objects.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
@@ -27,9 +26,10 @@
#include "src/objects/field-type.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number.h"
-#include "src/objects/js-array-buffer.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/lookup.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
@@ -66,11 +66,16 @@
#include "src/objects/prototype-info.h"
#include "src/objects/prototype.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/transitions.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-stream.h"
#include "src/utils/ostreams.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -221,7 +226,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(false);
}
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
bool stable = true;
@@ -247,7 +252,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
// shape.
if (stable) {
DCHECK_EQ(from->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
+ DCHECK_EQ(*descriptors, map->instance_descriptors(isolate));
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
@@ -266,7 +271,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
+ descriptors.PatchValue(map->instance_descriptors(isolate));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed
@@ -292,7 +297,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (result.IsNothing()) return result;
if (stable) {
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
+ descriptors.PatchValue(map->instance_descriptors(isolate));
}
} else {
if (excluded_properties != nullptr &&
@@ -355,9 +360,9 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
source_length = JSGlobalObject::cast(*from)
.global_dictionary(kAcquireLoad)
.NumberOfEnumerableProperties();
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
source_length =
- from->property_dictionary_ordered().NumberOfEnumerableProperties();
+ from->property_dictionary_swiss().NumberOfEnumerableProperties();
} else {
source_length =
from->property_dictionary().NumberOfEnumerableProperties();
@@ -643,7 +648,7 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
properties == roots.empty_property_dictionary() ||
- properties == roots.empty_ordered_property_dictionary()) {
+ properties == roots.empty_swiss_property_dictionary()) {
return Smi::FromInt(hash);
}
@@ -658,9 +663,9 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
return properties;
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- DCHECK(properties.IsOrderedNameDictionary());
- OrderedNameDictionary::cast(properties).SetHash(hash);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ DCHECK(properties.IsSwissNameDictionary());
+ SwissNameDictionary::cast(properties).SetHash(hash);
} else {
DCHECK(properties.IsNameDictionary());
NameDictionary::cast(properties).SetHash(hash);
@@ -678,12 +683,13 @@ int GetIdentityHashHelper(JSReceiver object) {
if (properties.IsPropertyArray()) {
return PropertyArray::cast(properties).Hash();
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL && properties.IsOrderedNameDictionary()) {
- return OrderedNameDictionary::cast(properties).Hash();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL &&
+ properties.IsSwissNameDictionary()) {
+ return SwissNameDictionary::cast(properties).Hash();
}
if (properties.IsNameDictionary()) {
- DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
+ DCHECK(!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL);
return NameDictionary::cast(properties).Hash();
}
@@ -695,7 +701,7 @@ int GetIdentityHashHelper(JSReceiver object) {
ReadOnlyRoots roots = object.GetReadOnlyRoots();
DCHECK(properties == roots.empty_fixed_array() ||
properties == roots.empty_property_dictionary() ||
- properties == roots.empty_ordered_property_dictionary());
+ properties == roots.empty_swiss_property_dictionary());
#endif
return PropertyArray::kNoHashSentinel;
@@ -782,12 +788,11 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
cell->ClearAndInvalidate(ReadOnlyRoots(isolate));
} else {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- object->property_dictionary_ordered(), isolate);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ object->property_dictionary_swiss(), isolate);
- dictionary =
- OrderedNameDictionary::DeleteEntry(isolate, dictionary, entry);
+ dictionary = SwissNameDictionary::DeleteEntry(isolate, dictionary, entry);
object->SetProperties(*dictionary);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
@@ -1914,7 +1919,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!map->OnlyHasSimpleProperties()) return Just(false);
Handle<JSObject> object(JSObject::cast(*receiver), isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -1943,7 +1948,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// side-effects.
bool stable = *map == object->map();
if (stable) {
- descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
+ descriptors.PatchValue(map->instance_descriptors(isolate));
}
for (InternalIndex index : InternalIndex::Range(number_of_own_descriptors)) {
@@ -1956,7 +1961,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
DCHECK_EQ(object->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
+ DCHECK_EQ(*descriptors, map->instance_descriptors(isolate));
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
@@ -1977,7 +1982,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = object->map() == *map;
- descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
+ descriptors.PatchValue(map->instance_descriptors(isolate));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -2133,8 +2138,8 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
- int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
- ? OrderedNameDictionary::kInitialCapacity
+ int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? SwissNameDictionary::kInitialCapacity
: NameDictionary::kInitialCapacity;
Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
initial_map, initial_capacity, AllocationType::kYoung, site);
@@ -2289,6 +2294,7 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_SEGMENTS_TYPE:
return JSSegments::kHeaderSize;
#endif // V8_INTL_SUPPORT
+#if V8_ENABLE_WEBASSEMBLY
case WASM_GLOBAL_OBJECT_TYPE:
return WasmGlobalObject::kHeaderSize;
case WASM_INSTANCE_OBJECT_TYPE:
@@ -2303,6 +2309,7 @@ int JSObject::GetHeaderSize(InstanceType type,
return WasmValueObject::kHeaderSize;
case WASM_EXCEPTION_OBJECT_TYPE:
return WasmExceptionObject::kHeaderSize;
+#endif // V8_ENABLE_WEBASSEMBLY
default:
UNREACHABLE();
}
@@ -2463,19 +2470,19 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK_EQ(dictionary->CellAt(entry).value(), *value);
}
} else {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- object->property_dictionary_ordered(), isolate);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ object->property_dictionary_swiss(), isolate);
InternalIndex entry = dictionary->FindEntry(isolate, *name);
if (entry.is_not_found()) {
DCHECK_IMPLIES(object->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(object->map()));
- dictionary = OrderedNameDictionary::Add(isolate, dictionary, name,
- value, details)
- .ToHandleChecked();
+ dictionary =
+ SwissNameDictionary::Add(isolate, dictionary, name, value, details);
object->SetProperties(*dictionary);
} else {
- dictionary->SetEntry(entry, *name, *value, details);
+ dictionary->ValueAtPut(entry, *value);
+ dictionary->DetailsAtPut(entry, details);
}
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
@@ -2650,8 +2657,9 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
return;
}
PrintF(file, "[migrating]");
- DescriptorArray o = original_map.instance_descriptors(kRelaxedLoad);
- DescriptorArray n = new_map.instance_descriptors(kRelaxedLoad);
+ Isolate* isolate = GetIsolate();
+ DescriptorArray o = original_map.instance_descriptors(isolate);
+ DescriptorArray n = new_map.instance_descriptors(isolate);
for (InternalIndex i : original_map.IterateOwnDescriptors()) {
Representation o_r = o.GetDetails(i).representation();
Representation n_r = n.GetDetails(i).representation();
@@ -2836,9 +2844,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
isolate->factory()->NewFixedArray(inobject);
Handle<DescriptorArray> old_descriptors(
- old_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
+ old_map->instance_descriptors(isolate), isolate);
Handle<DescriptorArray> new_descriptors(
- new_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
+ new_map->instance_descriptors(isolate), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -2913,10 +2921,6 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
- // Invalidate slots manually later in case of tagged to untagged translation.
- // In all other cases the recorded slot remains dereferenceable.
- heap->NotifyObjectLayoutChange(*object, no_gc, InvalidateRecordedSlots::kNo);
-
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = std::min(inobject, number_of_fields);
@@ -2966,23 +2970,21 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
property_count += expected_additional_properties;
} else {
// Make space for two more properties.
- int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
- ? OrderedNameDictionary::kInitialCapacity
+ int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? SwissNameDictionary::kInitialCapacity
: NameDictionary::kInitialCapacity;
property_count += initial_capacity;
}
Handle<NameDictionary> dictionary;
- Handle<OrderedNameDictionary> ord_dictionary;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- ord_dictionary =
- isolate->factory()->NewOrderedNameDictionary(property_count);
+ Handle<SwissNameDictionary> ord_dictionary;
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ ord_dictionary = isolate->factory()->NewSwissNameDictionary(property_count);
} else {
dictionary = isolate->factory()->NewNameDictionary(property_count);
}
- Handle<DescriptorArray> descs(
- map->instance_descriptors(isolate, kRelaxedLoad), isolate);
+ Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
for (InternalIndex i : InternalIndex::Range(real_size)) {
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
@@ -3011,16 +3013,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
: PropertyConstness::kMutable;
PropertyDetails d(details.kind(), details.attributes(), constness);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
ord_dictionary =
- OrderedNameDictionary::Add(isolate, ord_dictionary, key, value, d)
- .ToHandleChecked();
+ SwissNameDictionary::Add(isolate, ord_dictionary, key, value, d);
} else {
dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
}
}
- if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
// Copy the next enumeration index from instance descriptor.
dictionary->set_next_enumeration_index(real_size + 1);
}
@@ -3030,11 +3031,6 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
- // Invalidate slots manually later in case the new map has in-object
- // properties. If not, it is not possible to store an untagged value
- // in a recorded slot.
- heap->NotifyObjectLayoutChange(*object, no_gc, InvalidateRecordedSlots::kNo);
-
// Resize the object in the heap if necessary.
int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size();
@@ -3050,7 +3046,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// the left-over space to avoid races with the sweeper thread.
object->synchronized_set_map(*new_map);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
object->SetProperties(*ord_dictionary);
} else {
object->SetProperties(*dictionary);
@@ -3060,9 +3056,6 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// garbage.
int inobject_properties = new_map->GetInObjectProperties();
if (inobject_properties) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
- chunk->InvalidateRecordedSlots(*object);
-
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
object->FastPropertyAtPut(index, Smi::zero());
@@ -3159,6 +3152,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
DCHECK(object->map().GetInObjectProperties() == map->GetInObjectProperties());
ElementsKind obj_kind = object->map().elements_kind();
ElementsKind map_kind = map->elements_kind();
+ Isolate* isolate = object->GetIsolate();
if (map_kind != obj_kind) {
ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
if (IsDictionaryElementsKind(obj_kind)) {
@@ -3169,7 +3163,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
} else {
TransitionElementsKind(object, to_kind);
}
- map = Map::ReconfigureElementsKind(object->GetIsolate(), map, to_kind);
+ map = MapUpdater{isolate, map}.ReconfigureElementsKind(to_kind);
}
int number_of_fields = map->NumberOfFields();
int inobject = map->GetInObjectProperties();
@@ -3179,9 +3173,8 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
// Allocate mutable double boxes if necessary. It is always necessary if we
// have external properties, but is also necessary if we only have inobject
// properties but don't unbox double fields.
- Isolate* isolate = object->GetIsolate();
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(inobject);
@@ -3428,11 +3421,11 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Factory* factory = isolate->factory();
Handle<NameDictionary> dictionary;
- Handle<OrderedNameDictionary> ord_dictionary;
+ Handle<SwissNameDictionary> swiss_dictionary;
int number_of_elements;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- ord_dictionary = handle(object->property_dictionary_ordered(), isolate);
- number_of_elements = ord_dictionary->NumberOfElements();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ swiss_dictionary = handle(object->property_dictionary_swiss(), isolate);
+ number_of_elements = swiss_dictionary->NumberOfElements();
} else {
dictionary = handle(object->property_dictionary(), isolate);
number_of_elements = dictionary->NumberOfElements();
@@ -3444,9 +3437,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<FixedArray> iteration_order;
int iteration_length;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
// |iteration_order| remains empty handle, we don't need it.
- iteration_length = ord_dictionary->UsedCapacity();
+ iteration_length = swiss_dictionary->UsedCapacity();
} else {
iteration_order = NameDictionary::IterationIndices(isolate, dictionary);
iteration_length = dictionary->NumberOfElements();
@@ -3458,14 +3451,14 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
ReadOnlyRoots roots(isolate);
for (int i = 0; i < iteration_length; i++) {
PropertyKind kind;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- InternalIndex index(i);
- Object key = ord_dictionary->KeyAt(index);
- if (!OrderedNameDictionary::IsKey(roots, key)) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ InternalIndex index(swiss_dictionary->EntryForEnumerationIndex(i));
+ Object key = swiss_dictionary->KeyAt(index);
+ if (!SwissNameDictionary::IsKey(roots, key)) {
// Ignore deleted entries.
continue;
}
- kind = ord_dictionary->DetailsAt(index).kind();
+ kind = swiss_dictionary->DetailsAt(index).kind();
} else {
InternalIndex index(Smi::ToInt(iteration_order->get(i)));
DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(isolate, index)));
@@ -3533,16 +3526,16 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Object value;
PropertyDetails details = PropertyDetails::Empty();
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- InternalIndex index(i);
- Object key_obj = ord_dictionary->KeyAt(index);
- if (!OrderedNameDictionary::IsKey(roots, key_obj)) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ InternalIndex index(swiss_dictionary->EntryForEnumerationIndex(i));
+ Object key_obj = swiss_dictionary->KeyAt(index);
+ if (!SwissNameDictionary::IsKey(roots, key_obj)) {
continue;
}
k = Name::cast(key_obj);
- value = ord_dictionary->ValueAt(index);
- details = ord_dictionary->DetailsAt(index);
+ value = swiss_dictionary->ValueAt(index);
+ details = swiss_dictionary->DetailsAt(index);
} else {
InternalIndex index(Smi::ToInt(iteration_order->get(i)));
k = dictionary->NameAt(index);
@@ -3787,7 +3780,7 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(!map.IsCustomElementsReceiverMap());
DCHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors();
for (InternalIndex i : map.IterateOwnDescriptors()) {
if (descriptors.GetKey(i).IsPrivate()) continue;
PropertyDetails details = descriptors.GetDetails(i);
@@ -3806,9 +3799,9 @@ bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
return TestFastPropertiesIntegrityLevel(object.map(), level);
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
return TestDictionaryPropertiesIntegrityLevel(
- object.property_dictionary_ordered(), object.GetReadOnlyRoots(), level);
+ object.property_dictionary_swiss(), object.GetReadOnlyRoots(), level);
} else {
return TestDictionaryPropertiesIntegrityLevel(
object.property_dictionary(), object.GetReadOnlyRoots(), level);
@@ -4111,9 +4104,9 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- object->property_dictionary_ordered(), isolate);
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ object->property_dictionary_swiss(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
} else {
@@ -4169,6 +4162,20 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
return Object::WrapForRead(isolate, raw_value, representation);
}
+// static
+Handle<Object> JSObject::DictionaryPropertyAt(Handle<JSObject> object,
+ InternalIndex dict_index) {
+ Isolate* isolate = object->GetIsolate();
+
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = object->property_dictionary_swiss();
+ return handle(dict.ValueAt(dict_index), isolate);
+ } else {
+ NameDictionary dict = object->property_dictionary();
+ return handle(dict.ValueAt(dict_index), isolate);
+ }
+}
+
// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
bool JSObject::HasEnumerableElements() {
// TODO(cbruni): cleanup
@@ -4328,7 +4335,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
+ DescriptorArray descs = map().instance_descriptors();
bool value_is_number = value.IsNumber();
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
@@ -4358,8 +4365,8 @@ Object JSObject::SlowReverseLookup(Object value) {
return JSGlobalObject::cast(*this)
.global_dictionary(kAcquireLoad)
.SlowReverseLookup(value);
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return property_dictionary_ordered().SlowReverseLookup(GetIsolate(), value);
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return property_dictionary_swiss().SlowReverseLookup(GetIsolate(), value);
} else {
return property_dictionary().SlowReverseLookup(value);
}
@@ -4429,35 +4436,40 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
} else {
Handle<Map> new_map =
Map::Copy(isolate, handle(object->map(), isolate), "CopyAsPrototype");
-
- JSObject::MigrateToMap(isolate, object, new_map);
-
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !object->HasFastProperties()) {
- Handle<NameDictionary> dict =
- handle(object->property_dictionary(), isolate);
- ReadOnlyRoots roots(isolate);
- for (InternalIndex index : dict->IterateEntries()) {
- Object k;
- if (!dict->ToKey(roots, index, &k)) continue;
-
- PropertyDetails details = dict->DetailsAt(index);
- details = details.CopyWithConstness(PropertyConstness::kConst);
- dict->DetailsAtPut(index, details);
- }
- }
-
- object->map().set_is_prototype_map(true);
+ new_map->set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
// from the same context if undetectable from JS. This is to avoid keeping
// memory alive unnecessarily.
- Object maybe_constructor = object->map().GetConstructor();
+ Object maybe_constructor = new_map->GetConstructor();
if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
if (!constructor.shared().IsApiFunction()) {
Context context = constructor.context().native_context();
JSFunction object_function = context.object_function();
- object->map().SetConstructor(object_function);
+ new_map->SetConstructor(object_function);
+ }
+ }
+ JSObject::MigrateToMap(isolate, object, new_map);
+
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !object->HasFastProperties()) {
+ ReadOnlyRoots roots(isolate);
+ DisallowHeapAllocation no_gc;
+
+ auto make_constant = [&](auto dict) {
+ for (InternalIndex index : dict.IterateEntries()) {
+ Object k;
+ if (!dict.ToKey(roots, index, &k)) continue;
+
+ PropertyDetails details = dict.DetailsAt(index);
+ details = details.CopyWithConstness(PropertyConstness::kConst);
+ dict.DetailsAtPut(index, details);
+ }
+ };
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ make_constant(object->property_dictionary_swiss());
+ } else {
+ make_constant(object->property_dictionary());
}
}
}
@@ -4581,6 +4593,22 @@ void InvalidateOnePrototypeValidityCellInternal(Map map) {
PrototypeInfo prototype_info = PrototypeInfo::cast(maybe_prototype_info);
prototype_info.set_prototype_chain_enum_cache(Object());
}
+
+ // We may inline accesses to constants stored in dictionary mode protoypes in
+ // optimized code. When doing so, we install depenendies of group
+ // |kPrototypeCheckGroup| on each prototype between the receiver's immediate
+ // prototype and the holder of the constant property. This dependency is used
+ // both to detect changes to the constant value itself, and other changes to
+ // the prototype chain that invalidate the access to the given property from
+ // the given receiver (like adding the property to another prototype between
+ // the receiver and the (previous) holder). This works by de-opting this group
+ // whenever the validity cell would be invalidated. However, the actual value
+ // of the validity cell is not used. Therefore, we always trigger the de-opt
+ // here, even if the cell was already invalid.
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && map.is_dictionary_map()) {
+ map.dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kPrototypeCheckGroup);
+ }
}
void InvalidatePrototypeChainsInternal(Map map) {
@@ -5309,13 +5337,24 @@ int JSMessageObject::GetColumnNumber() const {
return info.column; // Note: No '+1' in contrast to GetLineNumber.
}
+String JSMessageObject::GetSource() const {
+ Script script_object = script();
+ if (script_object.HasValidSource()) {
+ Object source = script_object.source();
+ if (source.IsString()) return String::cast(source);
+ }
+ return ReadOnlyRoots(GetIsolate()).empty_string();
+}
+
Handle<String> JSMessageObject::GetSourceLine() const {
Isolate* isolate = GetIsolate();
Handle<Script> the_script(script(), isolate);
+#if V8_ENABLE_WEBASSEMBLY
if (the_script->type() == Script::TYPE_WASM) {
return isolate->factory()->empty_string();
}
+#endif // V8_ENABLE_WEBASSEMBLY
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index d78df5c431d..b1f22ed8f6f 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -44,13 +44,13 @@ class JSReceiver : public HeapObject {
// map.
DECL_GETTER(property_array, PropertyArray)
- // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
- // not set).
+ // Gets slow properties for non-global objects (if
+ // v8_enable_swiss_name_dictionary is not set).
DECL_GETTER(property_dictionary, NameDictionary)
- // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
- // set).
- DECL_GETTER(property_dictionary_ordered, OrderedNameDictionary)
+ // Gets slow properties for non-global objects (if
+ // v8_enable_swiss_name_dictionary is set).
+ DECL_GETTER(property_dictionary_swiss, SwissNameDictionary)
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
@@ -319,7 +319,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// acquire/release semantics ever become necessary, the default setter should
// be reverted to non-atomic behavior, and setters with explicit tags
// introduced and used when required.
- FixedArrayBase elements(IsolateRoot isolate,
+ FixedArrayBase elements(PtrComprCageBase cage_base,
AcquireLoadTag tag) const = delete;
void set_elements(FixedArrayBase value, ReleaseStoreTag tag,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER) = delete;
@@ -643,12 +643,17 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
int unused_property_fields,
const char* reason);
+ // Access property in dictionary mode object at the given dictionary index.
+ static Handle<Object> DictionaryPropertyAt(Handle<JSObject> object,
+ InternalIndex dict_index);
+
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
inline Object RawFastPropertyAt(FieldIndex index) const;
- inline Object RawFastPropertyAt(IsolateRoot isolate, FieldIndex index) const;
+ inline Object RawFastPropertyAt(PtrComprCageBase cage_base,
+ FieldIndex index) const;
inline void FastPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -738,14 +743,16 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(IsolateRoot isolate) const;
+ V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(
+ PtrComprCageBase cage_base) const;
#endif
Object SlowReverseLookup(Object value);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
- static const uint32_t kMaxElementCount = 0xffffffffu;
+ static constexpr uint32_t kMaxElementCount = kMaxUInt32;
+ static constexpr uint32_t kMaxElementIndex = kMaxElementCount - 1;
// Constants for heuristics controlling conversion of fast elements
// to slow elements.
@@ -1117,6 +1124,9 @@ class JSMessageObject : public JSObject {
// EnsureSourcePositionsAvailable must have been called before calling this.
V8_EXPORT_PRIVATE int GetColumnNumber() const;
+ // Returns the source code
+ V8_EXPORT_PRIVATE String GetSource() const;
+
// Returns the source code line containing the given source
// position, or the empty string if the position is invalid.
// EnsureSourcePositionsAvailable must have been called before calling this.
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index d074ab1fb2e..9f5bf8554e9 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -6,7 +6,7 @@
@abstract
@highestInstanceTypeWithinParentClassRange
extern class JSReceiver extends HeapObject {
- properties_or_hash: FixedArrayBase|PropertyArray|Smi;
+ properties_or_hash: SwissNameDictionary|FixedArrayBase|PropertyArray|Smi;
}
type Constructor extends JSReceiver;
@@ -73,12 +73,12 @@ macro GetDerivedMap(implicit context: Context)(
macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
JSObject {
- let properties: EmptyFixedArray|NameDictionary|OrderedNameDictionary =
+ let properties: EmptyFixedArray|NameDictionary|SwissNameDictionary =
kEmptyFixedArray;
if (IsDictionaryMap(map)) {
if (kDictModePrototypes) {
properties =
- AllocateOrderedNameDictionary(kOrderedNameDictionaryInitialCapacity);
+ AllocateSwissNameDictionary(kSwissNameDictionaryInitialCapacity);
} else {
properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
}
@@ -161,10 +161,9 @@ extern class JSStringIterator extends JSObject {
extern macro AllocateJSObjectFromMap(Map): JSObject;
extern macro AllocateJSObjectFromMap(
Map,
- NameDictionary | OrderedNameDictionary | EmptyFixedArray |
+ NameDictionary | SwissNameDictionary | EmptyFixedArray |
PropertyArray): JSObject;
extern macro AllocateJSObjectFromMap(
- Map,
- NameDictionary | OrderedNameDictionary | EmptyFixedArray | PropertyArray,
+ Map, NameDictionary | SwissNameDictionary | EmptyFixedArray | PropertyArray,
FixedArray, constexpr AllocationFlag,
constexpr SlackTrackingMode): JSObject;
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 47600ba333c..9c2d77d6bc9 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -70,34 +70,26 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 2. If options is undefined, then
- if (options_obj->IsUndefined(isolate)) {
- // 2. a. Let options be ObjectCreate(null).
- options_obj = isolate->factory()->NewJSObjectWithNullProto();
- } else {
- // 3. Else
- // 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options_obj,
- Object::ToObject(isolate, options_obj, "Intl.PluralRules"),
- JSPluralRules);
- }
-
- // At this point, options_obj can either be a JSObject or a JSProxy only.
- Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+ // 2. Set options to ? CoerceOptionsToObject(options).
+ Handle<JSReceiver> options;
+ const char* service = "Intl.PluralRules";
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ JSPluralRules);
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 6. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.PluralRules");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSPluralRules>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
// 7. Let t be ? GetOption(options, "type", "string", « "cardinal",
// "ordinal" », "cardinal").
Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
- isolate, options, "type", "Intl.PluralRules", {"cardinal", "ordinal"},
+ isolate, options, "type", service, {"cardinal", "ordinal"},
{Type::CARDINAL, Type::ORDINAL}, Type::CARDINAL);
MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
Type type = maybe_type.FromJust();
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index 36d1480dafd..b8a01418dae 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -65,8 +65,8 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
Handle<FixedArray> names(Handle<FixedArray>::cast(maybe_names));
int num_names = names->length() >> 1;
Handle<HeapObject> group_names;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- group_names = isolate->factory()->NewOrderedNameDictionary(num_names);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ group_names = isolate->factory()->NewSwissNameDictionary(num_names);
} else {
group_names = isolate->factory()->NewNameDictionary(num_names);
}
@@ -81,12 +81,10 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
if (!capture_indices->IsUndefined(isolate)) {
capture_indices = Handle<JSArray>::cast(capture_indices);
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- group_names =
- OrderedNameDictionary::Add(
- isolate, Handle<OrderedNameDictionary>::cast(group_names), name,
- capture_indices, PropertyDetails::Empty())
- .ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ group_names = SwissNameDictionary::Add(
+ isolate, Handle<SwissNameDictionary>::cast(group_names), name,
+ capture_indices, PropertyDetails::Empty());
} else {
group_names = NameDictionary::Add(
isolate, Handle<NameDictionary>::cast(group_names), name,
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 08d7358f181..caa4ce562d7 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -74,25 +74,20 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 2. If options is undefined, then
+ // 2. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options;
- if (input_options->IsUndefined(isolate)) {
- // 2. a. Let options be ObjectCreate(null).
- options = isolate->factory()->NewJSObjectWithNullProto();
- // 3. Else
- } else {
- // 3. a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, input_options),
- JSRelativeTimeFormat);
- }
+ const char* service = "Intl.RelativeTimeFormat";
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ Intl::CoerceOptionsToObject(isolate, input_options, service),
+ JSRelativeTimeFormat);
// 4. Let opt be a new Record.
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
// "lookup", "best fit" », "best fit").
// 6. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.RelativeTimeFormat");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSRelativeTimeFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -100,7 +95,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// `"string"`, *undefined*, *undefined*).
std::unique_ptr<char[]> numbering_system_str = nullptr;
Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
- isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ isolate, options, service, &numbering_system_str);
// 8. If _numberingSystem_ is not *undefined*, then
// a. If _numberingSystem_ does not match the
// `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
@@ -153,9 +148,8 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 16. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", "Intl.RelativeTimeFormat",
- {"long", "short", "narrow"}, {Style::LONG, Style::SHORT, Style::NARROW},
- Style::LONG);
+ isolate, options, "style", service, {"long", "short", "narrow"},
+ {Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
Style style_enum = maybe_style.FromJust();
@@ -164,8 +158,8 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 18. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
- isolate, options, "numeric", "Intl.RelativeTimeFormat",
- {"always", "auto"}, {Numeric::ALWAYS, Numeric::AUTO}, Numeric::ALWAYS);
+ isolate, options, "numeric", service, {"always", "auto"},
+ {Numeric::ALWAYS, Numeric::AUTO}, Numeric::ALWAYS);
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
Numeric numeric_enum = maybe_numeric.FromJust();
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index a799ea56b1b..386150613ae 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -33,24 +33,19 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
std::vector<std::string> requested_locales =
maybe_requested_locales.FromJust();
- // 5. If options is undefined, then
Handle<JSReceiver> options;
- if (input_options->IsUndefined(isolate)) {
- // a. Let options be ObjectCreate(null).
- options = isolate->factory()->NewJSObjectWithNullProto();
- } else { // 6. Else
- // a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, input_options),
- JSSegmenter);
- }
+ const char* service = "Intl.Segmenter";
+ // 5. Let options be GetOptionsObject(_options_).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
+ JSSegmenter);
// 7. Let opt be a new Record.
// 8. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 9. Set opt.[[localeMatcher]] to matcher.
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options, "Intl.Segmenter");
+ Intl::GetLocaleMatcher(isolate, options, service);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSSegmenter>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -74,7 +69,7 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
// "grapheme", "word", "sentence" », "grapheme").
Maybe<Granularity> maybe_granularity = Intl::GetStringOption<Granularity>(
- isolate, options, "granularity", "Intl.Segmenter",
+ isolate, options, "granularity", service,
{"grapheme", "word", "sentence"},
{Granularity::GRAPHEME, Granularity::WORD, Granularity::SENTENCE},
Granularity::GRAPHEME);
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 6abb9d42aec..798402de4b9 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -69,8 +69,7 @@ static Handle<FixedArray> CombineKeys(Isolate* isolate,
int nof_descriptors = map.NumberOfOwnDescriptors();
if (nof_descriptors == 0 && !may_have_elements) return prototype_chain_keys;
- Handle<DescriptorArray> descs(map.instance_descriptors(kRelaxedLoad),
- isolate);
+ Handle<DescriptorArray> descs(map.instance_descriptors(isolate), isolate);
int own_keys_length = own_keys.is_null() ? 0 : own_keys->length();
Handle<FixedArray> combined_keys = isolate->factory()->NewFixedArray(
own_keys_length + prototype_chain_keys_length);
@@ -373,7 +372,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate);
Handle<FixedArray> keys(
- map->instance_descriptors(kRelaxedLoad).enum_cache().keys(), isolate);
+ map->instance_descriptors(isolate).enum_cache().keys(), isolate);
// Check if the {map} has a valid enum length, which implies that it
// must have a valid enum cache as well.
@@ -398,7 +397,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
}
Handle<DescriptorArray> descriptors =
- Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(isolate), isolate);
isolate->counters()->enum_cache_misses()->Increment();
// Create the keys array.
@@ -830,7 +829,7 @@ void CommonCopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
continue;
} else {
if (Dictionary::kIsOrderedDictionaryType) {
- storage->set(properties, dictionary->NameAt(i));
+ storage->set(properties, Name::cast(key));
} else {
// If the dictionary does not store elements in enumeration order,
// we need to sort it afterwards in CopyEnumKeysTo. To enable this we
@@ -875,11 +874,11 @@ void CopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
}
template <>
-void CopyEnumKeysTo(Isolate* isolate, Handle<OrderedNameDictionary> dictionary,
+void CopyEnumKeysTo(Isolate* isolate, Handle<SwissNameDictionary> dictionary,
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator) {
- CommonCopyEnumKeysTo<OrderedNameDictionary>(isolate, dictionary, storage,
- mode, accumulator);
+ CommonCopyEnumKeysTo<SwissNameDictionary>(isolate, dictionary, storage, mode,
+ accumulator);
// No need to sort, as CommonCopyEnumKeysTo on OrderedNameDictionary
// adds entries to |storage| in the dict's insertion order
@@ -992,7 +991,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
if (map.prototype(isolate_) != ReadOnlyRoots(isolate_).null_value()) {
AllowGarbageCollection allow_gc;
Handle<DescriptorArray> descs = Handle<DescriptorArray>(
- map.instance_descriptors(kRelaxedLoad), isolate_);
+ map.instance_descriptors(isolate_), isolate_);
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) continue;
@@ -1004,9 +1003,9 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object,
JSGlobalObject::cast(*object).global_dictionary(kAcquireLoad));
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
- isolate_, mode_, this, object, object->property_dictionary_ordered());
+ isolate_, mode_, this, object, object->property_dictionary_swiss());
} else {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
@@ -1028,7 +1027,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
Handle<DescriptorArray> descs(
- object->map().instance_descriptors(kRelaxedLoad), isolate_);
+ object->map().instance_descriptors(isolate_), isolate_);
// First collect the strings,
base::Optional<int> first_symbol =
CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
@@ -1043,9 +1042,9 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
handle(JSGlobalObject::cast(*object).global_dictionary(kAcquireLoad),
isolate_),
this));
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
- handle(object->property_dictionary_ordered(), isolate_), this));
+ handle(object->property_dictionary_swiss(), isolate_), this));
} else {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
@@ -1060,17 +1059,17 @@ ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
DCHECK_EQ(mode_, KeyCollectionMode::kOwnOnly);
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(
- object->map().instance_descriptors(kRelaxedLoad), isolate_);
+ Handle<DescriptorArray> descs(object->map().instance_descriptors(isolate_),
+ isolate_);
CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
} else if (object->IsJSGlobalObject()) {
RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(JSGlobalObject::cast(*object).global_dictionary(kAcquireLoad),
isolate_),
this));
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
- handle(object->property_dictionary_ordered(), isolate_), this));
+ handle(object->property_dictionary_swiss(), isolate_), this));
} else {
RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
@@ -1153,10 +1152,10 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
JSGlobalObject::cast(*object).global_dictionary(kAcquireLoad));
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
- object->property_dictionary_ordered());
+ object->property_dictionary_swiss());
} else {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
@@ -1187,9 +1186,9 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy) {
STACK_CHECK(isolate_, Nothing<bool>());
if (filter_ == PRIVATE_NAMES_ONLY) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
- handle(proxy->property_dictionary_ordered(), isolate_), this));
+ handle(proxy->property_dictionary_swiss(), isolate_), this));
} else {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(proxy->property_dictionary(), isolate_), this));
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 4a2329ee556..26c0829f3e5 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -29,26 +29,26 @@ SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
Object ObjectBoilerplateDescription::name(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return name(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return name(cage_base, index);
}
-Object ObjectBoilerplateDescription::name(IsolateRoot isolate,
+Object ObjectBoilerplateDescription::name(PtrComprCageBase cage_base,
int index) const {
// get() already checks for out of bounds access, but we do not want to allow
// access to the last element, if it is the number of properties.
DCHECK_NE(size(), index);
- return get(isolate, 2 * index + kDescriptionStartIndex);
+ return get(cage_base, 2 * index + kDescriptionStartIndex);
}
Object ObjectBoilerplateDescription::value(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return value(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return value(cage_base, index);
}
-Object ObjectBoilerplateDescription::value(IsolateRoot isolate,
+Object ObjectBoilerplateDescription::value(PtrComprCageBase cage_base,
int index) const {
- return get(isolate, 2 * index + 1 + kDescriptionStartIndex);
+ return get(cage_base, 2 * index + 1 + kDescriptionStartIndex);
}
void ObjectBoilerplateDescription::set_key_value(int index, Object key,
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index e6c7402f73f..8dfea9f77c1 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -25,7 +25,7 @@ namespace internal {
namespace {
// The enumeration order index in the property details is unused if they are
-// stored in a OrderedNameDictionary or NumberDictionary (because they handle
+// stored in a SwissNameDictionary or NumberDictionary (because they handle
// propery ordering differently). We then use this dummy value instead.
constexpr int kDummyEnumerationIndex = 0;
@@ -100,14 +100,13 @@ Handle<NameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
}
template <typename LocalIsolate>
-Handle<OrderedNameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<OrderedNameDictionary> dictionary,
+Handle<SwissNameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> dictionary,
Handle<Name> name, Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
- // OrderedNameDictionary does not maintain the enumeration order in property
+ // SwissNameDictionary does not maintain the enumeration order in property
// details, so it's a normal Add().
- return OrderedNameDictionary::Add(isolate, dictionary, name, value, details)
- .ToHandleChecked();
+ return SwissNameDictionary::Add(isolate, dictionary, name, value, details);
}
template <typename LocalIsolate>
@@ -124,7 +123,7 @@ Handle<NumberDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
template <typename Dictionary>
void DictionaryUpdateMaxNumberKey(Handle<Dictionary> dictionary,
Handle<Name> name) {
- STATIC_ASSERT((std::is_same<Dictionary, OrderedNameDictionary>::value ||
+ STATIC_ASSERT((std::is_same<Dictionary, SwissNameDictionary>::value ||
std::is_same<Dictionary, NameDictionary>::value));
// No-op for (ordered) name dictionaries.
}
@@ -162,7 +161,7 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
std::is_same<Dictionary, NumberDictionary>::value;
STATIC_ASSERT(is_elements_dictionary !=
(std::is_same<Dictionary, NameDictionary>::value ||
- std::is_same<Dictionary, OrderedNameDictionary>::value));
+ std::is_same<Dictionary, SwissNameDictionary>::value));
if (entry.is_not_found()) {
// Entry not found, add new one.
@@ -405,9 +404,9 @@ class ObjectDescriptor {
void CreateTemplates(LocalIsolate* isolate) {
auto* factory = isolate->factory();
descriptor_array_template_ = factory->empty_descriptor_array();
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties_dictionary_template_ =
- factory->empty_ordered_property_dictionary();
+ factory->empty_swiss_property_dictionary();
} else {
properties_dictionary_template_ = factory->empty_property_dictionary();
}
@@ -415,11 +414,11 @@ class ObjectDescriptor {
if (HasDictionaryProperties()) {
int need_space_for =
property_count_ + computed_count_ + property_slack_;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties_dictionary_template_ =
- OrderedNameDictionary::Allocate(isolate, need_space_for,
- AllocationType::kOld)
- .ToHandleChecked();
+ isolate->factory()->NewSwissNameDictionary(need_space_for,
+ AllocationType::kOld);
+
} else {
properties_dictionary_template_ = NameDictionary::New(
isolate, need_space_for, AllocationType::kOld);
@@ -450,11 +449,12 @@ class ObjectDescriptor {
DCHECK(!value->IsAccessorPair());
if (HasDictionaryProperties()) {
PropertyKind kind = is_accessor ? i::kAccessor : i::kData;
- int enum_order = V8_DICT_MODE_PROTOTYPES_BOOL ? kDummyEnumerationIndex
- : next_enumeration_index_++;
+ int enum_order = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? kDummyEnumerationIndex
+ : next_enumeration_index_++;
PropertyDetails details(kind, attribs, PropertyCellType::kNoCell,
enum_order);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties_dictionary_template_ =
DictionaryAddNoUpdateNextEnumerationIndex(
isolate, properties_ordered_dictionary_template(), name, value,
@@ -479,7 +479,7 @@ class ObjectDescriptor {
Smi value = Smi::FromInt(value_index);
if (HasDictionaryProperties()) {
UpdateNextEnumerationIndex(value_index);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
AddToDictionaryTemplate(isolate,
properties_ordered_dictionary_template(), name,
value_index, value_kind, value);
@@ -519,7 +519,7 @@ class ObjectDescriptor {
void Finalize(LocalIsolate* isolate) {
if (HasDictionaryProperties()) {
DCHECK_EQ(current_computed_index_, computed_properties_->length());
- if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
+ if (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties_dictionary_template()->set_next_enumeration_index(
next_enumeration_index_);
}
@@ -533,8 +533,8 @@ class ObjectDescriptor {
return Handle<NameDictionary>::cast(properties_dictionary_template_);
}
- Handle<OrderedNameDictionary> properties_ordered_dictionary_template() const {
- return Handle<OrderedNameDictionary>::cast(properties_dictionary_template_);
+ Handle<SwissNameDictionary> properties_ordered_dictionary_template() const {
+ return Handle<SwissNameDictionary>::cast(properties_dictionary_template_);
}
const int property_slack_;
@@ -546,7 +546,7 @@ class ObjectDescriptor {
Handle<DescriptorArray> descriptor_array_template_;
- // Is either a NameDictionary or OrderedNameDictionary.
+ // Is either a NameDictionary or SwissNameDictionary.
Handle<HeapObject> properties_dictionary_template_;
Handle<NumberDictionary> elements_dictionary_template_;
@@ -569,9 +569,8 @@ template void ClassBoilerplate::AddToPropertiesTemplate(
LocalIsolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
template void ClassBoilerplate::AddToPropertiesTemplate(
- Isolate* isolate, Handle<OrderedNameDictionary> dictionary,
- Handle<Name> name, int key_index, ClassBoilerplate::ValueKind value_kind,
- Smi value);
+ Isolate* isolate, Handle<SwissNameDictionary> dictionary, Handle<Name> name,
+ int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
template <typename LocalIsolate>
void ClassBoilerplate::AddToElementsTemplate(
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 78fa53011b4..3377bcd4c2b 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -28,10 +28,10 @@ class ClassLiteral;
class ObjectBoilerplateDescription : public FixedArray {
public:
inline Object name(int index) const;
- inline Object name(IsolateRoot isolate, int index) const;
+ inline Object name(PtrComprCageBase cage_base, int index) const;
inline Object value(int index) const;
- inline Object value(IsolateRoot isolate, int index) const;
+ inline Object value(PtrComprCageBase cage_base, int index) const;
inline void set_key_value(int index, Object key, Object value);
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index d38be27fca9..5f2fbd4cc20 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -73,7 +73,7 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
if (IsElement()) {
// If we're not looking at a TypedArray, we will need the key represented
// as an internalized string.
- if (index_ > JSArray::kMaxArrayIndex &&
+ if (index_ > JSObject::kMaxElementIndex &&
!lookup_start_object->IsJSTypedArray()) {
if (name_.is_null()) {
name_ = isolate->factory()->SizeToString(index_);
@@ -85,6 +85,7 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
}
Start<true>();
} else {
+ DCHECK(!name_.is_null());
name_ = isolate->factory()->InternalizeName(name_);
#ifdef DEBUG
// Assert that the name is not an index.
@@ -106,7 +107,9 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
LookupIterator::Key::Key(Isolate* isolate, double index) {
DCHECK_EQ(index, static_cast<uint64_t>(index));
#if V8_TARGET_ARCH_32_BIT
- if (index <= JSArray::kMaxArrayIndex) {
+ if (index <= JSObject::kMaxElementIndex) {
+ STATIC_ASSERT(JSObject::kMaxElementIndex <=
+ std::numeric_limits<size_t>::max());
index_ = static_cast<size_t>(index);
} else {
index_ = LookupIterator::kInvalidIndex;
@@ -165,7 +168,7 @@ Handle<Name> LookupIterator::GetName() {
}
bool LookupIterator::IsElement(JSReceiver object) const {
- return index_ <= JSArray::kMaxArrayIndex ||
+ return index_ <= JSObject::kMaxElementIndex ||
(index_ != kInvalidIndex && object.map().has_typed_array_elements());
}
@@ -270,7 +273,7 @@ Handle<T> LookupIterator::GetStoreTarget() const {
template <bool is_element>
InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) const {
- if (is_element && index_ <= JSArray::kMaxArrayIndex) {
+ if (is_element && index_ <= JSObject::kMaxElementIndex) {
return holder.GetIndexedInterceptor(isolate_);
} else {
return holder.GetNamedInterceptor(isolate_);
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index a8f2da66f82..d81d0059336 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -381,8 +381,13 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
// that's only for the case that the existing map is a fast mode map.
// Therefore, we need to perform the necessary updates to the property
// details and the prototype validity cell directly.
- NameDictionary dict = holder->property_dictionary();
- dict.DetailsAtPut(dictionary_entry(), property_details_);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = holder->property_dictionary_swiss();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+ } else {
+ NameDictionary dict = holder->property_dictionary();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+ }
Map old_map = holder->map(isolate_);
if (old_map.is_prototype_map()) {
@@ -406,9 +411,8 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (old_map.is_identical_to(new_map)) {
// Update the property details if the representation was None.
if (constness() != new_constness || representation().IsNone()) {
- property_details_ =
- new_map->instance_descriptors(isolate_, kRelaxedLoad)
- .GetDetails(descriptor_number());
+ property_details_ = new_map->instance_descriptors(isolate_).GetDetails(
+ descriptor_number());
}
return;
}
@@ -429,8 +433,13 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
property_details_ =
property_details_.CopyWithConstness(PropertyConstness::kMutable);
- NameDictionary dict = holder_obj->property_dictionary();
- dict.DetailsAtPut(dictionary_entry(), property_details_);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = holder_obj->property_dictionary_swiss();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+ } else {
+ NameDictionary dict = holder_obj->property_dictionary();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+ }
DCHECK_IMPLIES(new_map->is_prototype_map(),
!new_map->IsPrototypeValidityCellValid());
@@ -498,10 +507,11 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK_EQ(cell->value(), *value);
} else {
PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- holder_obj->property_dictionary_ordered(isolate_), isolate());
- dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ holder_obj->property_dictionary_swiss(isolate_), isolate());
+ dictionary->ValueAtPut(dictionary_entry(), *value);
+ dictionary->DetailsAtPut(dictionary_entry(), details);
DCHECK_EQ(details.AsSmi(),
dictionary->DetailsAt(dictionary_entry()).AsSmi());
property_details_ = details;
@@ -640,18 +650,14 @@ void LookupIterator::ApplyTransitionToDataProperty(
receiver->IsJSObject(isolate_)) {
JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- receiver->property_dictionary_ordered(isolate_), isolate_);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ receiver->property_dictionary_swiss(isolate_), isolate_);
dictionary =
- OrderedNameDictionary::Add(isolate(), dictionary, name(),
- isolate_->factory()->uninitialized_value(),
- property_details_)
- .ToHandleChecked();
-
- // set to last used entry
- number_ = InternalIndex(dictionary->UsedCapacity() - 1);
+ SwissNameDictionary::Add(isolate(), dictionary, name(),
+ isolate_->factory()->uninitialized_value(),
+ property_details_, &number_);
receiver->SetProperties(*dictionary);
} else {
Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
@@ -849,8 +855,8 @@ Handle<Object> LookupIterator::FetchValue(
result = holder->global_dictionary(isolate_, kAcquireLoad)
.ValueAt(isolate_, dictionary_entry());
} else if (!holder_->HasFastProperties(isolate_)) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- result = holder_->property_dictionary_ordered(isolate_).ValueAt(
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ result = holder_->property_dictionary_swiss(isolate_).ValueAt(
dictionary_entry());
} else {
result = holder_->property_dictionary(isolate_).ValueAt(
@@ -868,9 +874,9 @@ Handle<Object> LookupIterator::FetchValue(
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_->map(isolate_)
- .instance_descriptors(isolate_, kRelaxedLoad)
- .GetStrongValue(isolate_, descriptor_number());
+ result =
+ holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
+ isolate_, descriptor_number());
}
return handle(result, isolate_);
}
@@ -931,9 +937,14 @@ bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
return true;
}
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
- NameDictionary dict = holder->property_dictionary();
-
- Object current_value = dict.ValueAt(dictionary_entry());
+ Object current_value;
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = holder->property_dictionary_swiss();
+ current_value = dict.ValueAt(dictionary_entry());
+ } else {
+ NameDictionary dict = holder->property_dictionary();
+ current_value = dict.ValueAt(dictionary_entry());
+ }
if (current_value.IsUninitialized(isolate()) || current_value == value) {
return true;
@@ -959,16 +970,6 @@ int LookupIterator::GetAccessorIndex() const {
return descriptor_number().as_int();
}
-Handle<Map> LookupIterator::GetFieldOwnerMap() const {
- DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
- DCHECK(!IsElement(*holder_));
- Map holder_map = holder_->map(isolate_);
- return handle(holder_map.FindFieldOwner(isolate(), descriptor_number()),
- isolate_);
-}
-
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
@@ -977,16 +978,6 @@ FieldIndex LookupIterator::GetFieldIndex() const {
return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
}
-Handle<FieldType> LookupIterator::GetFieldType() const {
- DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
- return handle(holder_->map(isolate_)
- .instance_descriptors(isolate_, kRelaxedLoad)
- .GetFieldType(isolate_, descriptor_number()),
- isolate_);
-}
-
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement(*holder_));
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
@@ -1045,9 +1036,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
property_details_.constness() == PropertyConstness::kConst,
holder->IsJSProxy(isolate_) || IsConstDictValueEqualTo(*value));
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- OrderedNameDictionary dictionary =
- holder->property_dictionary_ordered(isolate_);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dictionary =
+ holder->property_dictionary_swiss(isolate_);
dictionary.ValueAtPut(dictionary_entry(), *value);
} else {
NameDictionary dictionary = holder->property_dictionary(isolate_);
@@ -1101,7 +1092,7 @@ namespace {
template <bool is_element>
bool HasInterceptor(Map map, size_t index) {
if (is_element) {
- if (index > JSArray::kMaxArrayIndex) {
+ if (index > JSObject::kMaxElementIndex) {
// There is currently no way to install interceptors on an object with
// typed array elements.
DCHECK(!map.has_typed_array_elements());
@@ -1190,15 +1181,14 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = property_details_.CopyAddAttributes(SEALED);
}
} else if (!map.is_dictionary_map()) {
- DescriptorArray descriptors =
- map.instance_descriptors(isolate_, kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors(isolate_);
number_ = descriptors.SearchWithCache(isolate_, *name_, map);
if (number_.is_not_found()) return NotFound(holder);
property_details_ = descriptors.GetDetails(number_);
} else {
DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- OrderedNameDictionary dict = holder.property_dictionary_ordered(isolate_);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dict = holder.property_dictionary_swiss(isolate_);
number_ = dict.FindEntry(isolate(), *name_);
if (number_.is_not_found()) return NotFound(holder);
property_details_ = dict.DetailsAt(number_);
@@ -1313,5 +1303,92 @@ base::Optional<Object> ConcurrentLookupIterator::TryGetOwnCowElement(
return result;
}
+// static
+ConcurrentLookupIterator::Result
+ConcurrentLookupIterator::TryGetOwnConstantElement(
+ Object* result_out, Isolate* isolate, LocalIsolate* local_isolate,
+ JSObject holder, FixedArrayBase elements, ElementsKind elements_kind,
+ size_t index) {
+ DisallowGarbageCollection no_gc;
+
+ DCHECK_LE(index, JSObject::kMaxElementIndex);
+
+ // Own 'constant' elements (PropertyAttributes READ_ONLY|DONT_DELETE) occur in
+ // three main cases:
+ //
+ // 1. Frozen elements: guaranteed constant.
+ // 2. Dictionary elements: may be constant.
+ // 3. String wrapper elements: guaranteed constant.
+
+ // Interesting field reads below:
+ //
+ // - elements.length (immutable on FixedArrays).
+ // - elements[i] (immutable if constant; be careful around dictionaries).
+ // - holder.AsJSPrimitiveWrapper.value.AsString.length (immutable).
+ // - holder.AsJSPrimitiveWrapper.value.AsString[i] (immutable).
+ // - single_character_string_cache()->get().
+
+ if (IsFrozenElementsKind(elements_kind)) {
+ FixedArray elements_fixed_array = FixedArray::cast(elements);
+ if (index >= static_cast<uint32_t>(elements_fixed_array.length())) {
+ return kGaveUp;
+ }
+ Object result = elements_fixed_array.get(isolate, static_cast<int>(index));
+ if (IsHoleyElementsKindForRead(elements_kind) &&
+ result == ReadOnlyRoots(isolate).the_hole_value()) {
+ return kNotPresent;
+ }
+ *result_out = result;
+ return kPresent;
+ } else if (IsDictionaryElementsKind(elements_kind)) {
+ DCHECK(elements.IsNumberDictionary());
+ // TODO(jgruber, v8:7790): Add support. Dictionary elements require racy
+ // NumberDictionary lookups. This should be okay in general (slot iteration
+ // depends only on the dict's capacity), but 1. we'd need to update
+ // NumberDictionary methods to do atomic reads, and 2. the dictionary
+ // elements case isn't very important for callers of this function.
+ return kGaveUp;
+ } else if (IsStringWrapperElementsKind(elements_kind)) {
+ // In this case we don't care about the actual `elements`. All in-bounds
+ // reads are redirected to the wrapped String.
+
+ JSPrimitiveWrapper js_value = JSPrimitiveWrapper::cast(holder);
+ String wrapped_string = String::cast(js_value.value());
+
+ // The access guard below protects only internalized string accesses.
+ // TODO(jgruber): Support other string kinds.
+ Map wrapped_string_map = wrapped_string.synchronized_map(isolate);
+ if (!InstanceTypeChecker::IsInternalizedString(
+ wrapped_string_map.instance_type())) {
+ return kGaveUp;
+ }
+
+ const uint32_t length = static_cast<uint32_t>(wrapped_string.length());
+ if (index >= length) return kGaveUp;
+
+ uint16_t charcode;
+ {
+ SharedStringAccessGuardIfNeeded access_guard(local_isolate);
+ charcode = wrapped_string.Get(static_cast<int>(index));
+ }
+
+ if (charcode > unibrow::Latin1::kMaxChar) return kGaveUp;
+
+ Object value = isolate->factory()->single_character_string_cache()->get(
+ charcode, kRelaxedLoad);
+ if (value == ReadOnlyRoots(isolate).undefined_value()) return kGaveUp;
+
+ *result_out = value;
+ return kPresent;
+ } else {
+ DCHECK(!IsFrozenElementsKind(elements_kind));
+ DCHECK(!IsDictionaryElementsKind(elements_kind));
+ DCHECK(!IsStringWrapperElementsKind(elements_kind));
+ return kGaveUp;
+ }
+
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 06c317beb7a..6abed027d1e 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -172,9 +172,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
}
PropertyLocation location() const { return property_details().location(); }
PropertyConstness constness() const { return property_details().constness(); }
- Handle<Map> GetFieldOwnerMap() const;
FieldIndex GetFieldIndex() const;
- Handle<FieldType> GetFieldType() const;
int GetFieldDescriptorIndex() const;
int GetAccessorIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
@@ -305,6 +303,13 @@ class V8_EXPORT_PRIVATE LookupIterator final {
// functionality and constraints are better known.
class ConcurrentLookupIterator final : public AllStatic {
public:
+ // Tri-state to distinguish between 'not-present' and 'who-knows' failures.
+ enum Result {
+ kPresent, // The value was found.
+ kNotPresent, // No value exists.
+ kGaveUp, // The operation can't be completed.
+ };
+
// Implements the own data property lookup for the specialized case of
// fixed_cow_array backing stores (these are only in use for array literal
// boilerplates). The contract is that the elements, elements kind, and array
@@ -316,6 +321,13 @@ class ConcurrentLookupIterator final : public AllStatic {
V8_EXPORT_PRIVATE static base::Optional<Object> TryGetOwnCowElement(
Isolate* isolate, FixedArray array_elements, ElementsKind elements_kind,
int array_length, size_t index);
+
+ // Unlike above, the contract is that holder, elements, and elements_kind are
+ // a consistent view of the world; and index must be a valid element index.
+ V8_EXPORT_PRIVATE static Result TryGetOwnConstantElement(
+ Object* result_out, Isolate* isolate, LocalIsolate* local_isolate,
+ JSObject holder, FixedArrayBase elements, ElementsKind elements_kind,
+ size_t index);
};
} // namespace internal
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index ecb270e7cee..eb28f0b111f 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -20,7 +20,10 @@
#include "src/objects/templates-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/objects/transitions.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -33,6 +36,8 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
+ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
RELAXED_ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
@@ -57,22 +62,27 @@ ACCESSORS_CHECKED(Map, prototype_info, Object,
// is explicitly allowlisted here. The former is never modified after the map
// is setup but it's being read by concurrent marker when pointer compression
// is enabled. The latter bit can be modified on a live objects.
-BIT_FIELD_ACCESSORS(Map, bit_field, has_non_instance_prototype,
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_non_instance_prototype,
Map::Bits1::HasNonInstancePrototypeBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, has_named_interceptor,
- Map::Bits1::HasNamedInterceptorBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, has_indexed_interceptor,
- Map::Bits1::HasIndexedInterceptorBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, is_undetectable,
- Map::Bits1::IsUndetectableBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, is_access_check_needed,
- Map::Bits1::IsAccessCheckNeededBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, is_constructor,
- Map::Bits1::IsConstructorBit)
-BIT_FIELD_ACCESSORS(Map, bit_field, has_prototype_slot,
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
+// These are fine to be written as non-atomic since we don't have data races.
+// However, they have to be read atomically from the background since the
+// |bit_field| as a whole can mutate when using the above setters.
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, is_callable,
+ Map::Bits1::IsCallableBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, has_named_interceptor,
+ Map::Bits1::HasNamedInterceptorBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, has_indexed_interceptor,
+ Map::Bits1::HasIndexedInterceptorBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, is_undetectable,
+ Map::Bits1::IsUndetectableBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, is_access_check_needed,
+ Map::Bits1::IsAccessCheckNeededBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field, bit_field, is_constructor,
+ Map::Bits1::IsConstructorBit)
+
// |bit_field2| fields.
BIT_FIELD_ACCESSORS(Map, bit_field2, new_target_is_base,
Map::Bits2::NewTargetIsBaseBit)
@@ -97,14 +107,14 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
DEF_GETTER(Map, GetNamedInterceptor, InterceptorInfo) {
DCHECK(has_named_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
- return InterceptorInfo::cast(info.GetNamedPropertyHandler(isolate));
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(cage_base);
+ return InterceptorInfo::cast(info.GetNamedPropertyHandler(cage_base));
}
DEF_GETTER(Map, GetIndexedInterceptor, InterceptorInfo) {
DCHECK(has_indexed_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
- return InterceptorInfo::cast(info.GetIndexedPropertyHandler(isolate));
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(cage_base);
+ return InterceptorInfo::cast(info.GetIndexedPropertyHandler(cage_base));
}
bool Map::IsMostGeneralFieldType(Representation representation,
@@ -171,8 +181,12 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
}
}
+Name Map::GetLastDescriptorName(Isolate* isolate) const {
+ return instance_descriptors(isolate).GetKey(LastAdded());
+}
+
PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
- return instance_descriptors(isolate, kRelaxedLoad).GetDetails(LastAdded());
+ return instance_descriptors(isolate).GetDetails(LastAdded());
}
InternalIndex Map::LastAdded() const {
@@ -186,7 +200,7 @@ int Map::NumberOfOwnDescriptors() const {
}
void Map::SetNumberOfOwnDescriptors(int number) {
- DCHECK_LE(number, instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ DCHECK_LE(number, instance_descriptors().number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
set_bit_field3(
@@ -304,6 +318,8 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
return AddMissingTransitions(isolate, split_map, descriptors);
}
+// TODO(solanes, v8:7790, v8:11353): Make the instance_type accessors non-atomic
+// when TSAN sees the map's store synchronization.
InstanceType Map::instance_type() const {
return static_cast<InstanceType>(
RELAXED_READ_UINT16_FIELD(*this, kInstanceTypeOffset));
@@ -432,20 +448,24 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
-byte Map::bit_field() const {
- return ACQUIRE_READ_BYTE_FIELD(*this, kBitFieldOffset);
-}
+byte Map::bit_field() const { return ReadField<byte>(kBitFieldOffset); }
void Map::set_bit_field(byte value) {
- RELEASE_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
+ WriteField<byte>(kBitFieldOffset, value);
+}
+
+byte Map::relaxed_bit_field() const {
+ return RELAXED_READ_BYTE_FIELD(*this, kBitFieldOffset);
}
-byte Map::bit_field2() const {
- return ACQUIRE_READ_BYTE_FIELD(*this, kBitField2Offset);
+void Map::set_relaxed_bit_field(byte value) {
+ RELAXED_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
}
+byte Map::bit_field2() const { return ReadField<byte>(kBitField2Offset); }
+
void Map::set_bit_field2(byte value) {
- RELEASE_WRITE_BYTE_FIELD(*this, kBitField2Offset, value);
+ WriteField<byte>(kBitField2Offset, value);
}
bool Map::is_abandoned_prototype_map() const {
@@ -612,7 +632,7 @@ void Map::clear_padding() {
}
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
- DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = instance_descriptors(isolate);
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
{
@@ -641,25 +661,25 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
#endif
}
+bool Map::ConcurrentIsMap(PtrComprCageBase cage_base,
+ const Object& object) const {
+ return object.IsHeapObject() && HeapObject::cast(object).map(cage_base) ==
+ GetReadOnlyRoots(cage_base).meta_map();
+}
+
DEF_GETTER(Map, GetBackPointer, HeapObject) {
- Object object = constructor_or_back_pointer(isolate);
- // This is the equivalent of IsMap() but avoids reading the instance type so
- // it can be used concurrently without acquire load.
- if (object.IsHeapObject() && HeapObject::cast(object).map(isolate) ==
- GetReadOnlyRoots(isolate).meta_map()) {
+ Object object = constructor_or_back_pointer(cage_base);
+ if (ConcurrentIsMap(cage_base, object)) {
return Map::cast(object);
}
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- return GetReadOnlyRoots(isolate).undefined_value();
+ return GetReadOnlyRoots(cage_base).undefined_value();
}
void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
CHECK(value.IsMap());
CHECK(GetBackPointer().IsUndefined());
- CHECK_IMPLIES(value.IsMap(), Map::cast(value).GetConstructor() ==
- constructor_or_back_pointer());
+ CHECK_EQ(Map::cast(value).GetConstructor(), constructor_or_back_pointer());
set_constructor_or_back_pointer(value, mode);
}
@@ -678,9 +698,11 @@ ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
+#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Map, wasm_type_info, WasmTypeInfo,
kConstructorOrBackPointerOrNativeContextOffset,
IsWasmStructMap() || IsWasmArrayMap())
+#endif // V8_ENABLE_WEBASSEMBLY
bool Map::IsPrototypeValidityCellValid() const {
Object validity_cell = prototype_validity_cell();
@@ -690,11 +712,11 @@ bool Map::IsPrototypeValidityCellValid() const {
}
DEF_GETTER(Map, GetConstructor, Object) {
- Object maybe_constructor = constructor_or_back_pointer(isolate);
+ Object maybe_constructor = constructor_or_back_pointer(cage_base);
// Follow any back pointers.
- while (maybe_constructor.IsMap(isolate)) {
+ while (ConcurrentIsMap(cage_base, maybe_constructor)) {
maybe_constructor =
- Map::cast(maybe_constructor).constructor_or_back_pointer(isolate);
+ Map::cast(maybe_constructor).constructor_or_back_pointer(cage_base);
}
return maybe_constructor;
}
@@ -711,13 +733,13 @@ Object Map::TryGetConstructor(Isolate* isolate, int max_steps) {
}
DEF_GETTER(Map, GetFunctionTemplateInfo, FunctionTemplateInfo) {
- Object constructor = GetConstructor(isolate);
- if (constructor.IsJSFunction(isolate)) {
+ Object constructor = GetConstructor(cage_base);
+ if (constructor.IsJSFunction(cage_base)) {
// TODO(ishell): IsApiFunction(isolate) and get_api_func_data(isolate)
- DCHECK(JSFunction::cast(constructor).shared(isolate).IsApiFunction());
- return JSFunction::cast(constructor).shared(isolate).get_api_func_data();
+ DCHECK(JSFunction::cast(constructor).shared(cage_base).IsApiFunction());
+ return JSFunction::cast(constructor).shared(cage_base).get_api_func_data();
}
- DCHECK(constructor.IsFunctionTemplateInfo(isolate));
+ DCHECK(constructor.IsFunctionTemplateInfo(cage_base));
return FunctionTemplateInfo::cast(constructor);
}
@@ -738,6 +760,7 @@ bool Map::IsInobjectSlackTrackingInProgress() const {
}
void Map::InobjectSlackTrackingStep(Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
// Slack tracking should only be performed on an initial map.
DCHECK(GetBackPointer().IsUndefined());
if (!IsInobjectSlackTrackingInProgress()) return;
@@ -771,7 +794,7 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
}
DEF_GETTER(HeapObject, IsNormalizedMapCache, bool) {
- if (!IsWeakFixedArray(isolate)) return false;
+ if (!IsWeakFixedArray(cage_base)) return false;
if (WeakFixedArray::cast(*this).length() != NormalizedMapCache::kEntries) {
return false;
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 31841992dec..8ab15451a77 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -28,7 +28,7 @@ inline bool EqualImmutableValues(Object obj1, Object obj2) {
MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
: isolate_(isolate),
old_map_(old_map),
- old_descriptors_(old_map->instance_descriptors(kRelaxedLoad), isolate_),
+ old_descriptors_(old_map->instance_descriptors(isolate), isolate_),
old_nof_(old_map_->NumberOfOwnDescriptors()),
new_elements_kind_(old_map_->elements_kind()),
is_transitionable_fast_elements_kind_(
@@ -117,6 +117,10 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
DCHECK_EQ(kInitialized, state_);
DCHECK(descriptor.is_found());
DCHECK(!old_map_->is_dictionary_map());
+
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ isolate_->map_updater_access());
+
modified_descriptor_ = descriptor;
new_kind_ = kData;
new_attributes_ = attributes;
@@ -125,50 +129,6 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
- // If the {descriptor} was "const" data field so far, we need to update the
- // {old_map_} here, otherwise we could get the constants wrong, i.e.
- //
- // o.x = 1;
- // change o.x's attributes to something else
- // delete o.x;
- // o.x = 2;
- //
- // could trick V8 into thinking that `o.x` is still 1 even after the second
- // assignment.
- // This situation is similar to what might happen with property deletion.
- if (old_details.constness() == PropertyConstness::kConst &&
- old_details.location() == kField &&
- old_details.attributes() != new_attributes_) {
- // Ensure we'll be updating constness of the up-to-date version of old_map_.
- Handle<Map> old_map = Map::Update(isolate_, old_map_);
- PropertyDetails details =
- old_map->instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
- Handle<FieldType> field_type(
- old_map->instance_descriptors(kRelaxedLoad).GetFieldType(descriptor),
- isolate_);
- Map::GeneralizeField(isolate_, old_map, descriptor,
- PropertyConstness::kMutable, details.representation(),
- field_type);
- DCHECK_EQ(PropertyConstness::kMutable,
- old_map->instance_descriptors(kRelaxedLoad)
- .GetDetails(descriptor)
- .constness());
- // The old_map_'s property must become mutable.
- // Note, that the {old_map_} and {old_descriptors_} are not expected to be
- // updated by the generalization if the map is already deprecated.
- DCHECK_IMPLIES(
- !old_map_->is_deprecated(),
- PropertyConstness::kMutable ==
- old_descriptors_->GetDetails(modified_descriptor_).constness());
- // Although the property in the old map is marked as mutable we still
- // treat it as constant when merging with the new path in transition tree.
- // This is fine because up until this reconfiguration the field was
- // known to be constant, so it's fair to proceed treating it as such
- // during this reconfiguration session. The issue is that after the
- // reconfiguration the original field might become mutable (see the delete
- // example above).
- }
-
// If property kind is not reconfigured merge the result with
// representation/field type from the old descriptor.
if (old_details.kind() == new_kind_) {
@@ -209,6 +169,10 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
DCHECK_EQ(kInitialized, state_);
+
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ isolate_->map_updater_access());
+
new_elements_kind_ = elements_kind;
is_transitionable_fast_elements_kind_ =
IsTransitionableFastElementsKind(new_elements_kind_);
@@ -222,7 +186,23 @@ Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
return result_map_;
}
+// static
+Handle<Map> MapUpdater::UpdateMapNoLock(Isolate* isolate, Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+ // TODO(ishell): support fast map updating if we enable it.
+ CHECK(!FLAG_fast_map_update);
+ MapUpdater mu(isolate, map);
+ // Update map without locking the Isolate::map_updater_access mutex.
+ return mu.UpdateImpl();
+}
+
Handle<Map> MapUpdater::Update() {
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ isolate_->map_updater_access());
+ return UpdateImpl();
+}
+
+Handle<Map> MapUpdater::UpdateImpl() {
DCHECK_EQ(kInitialized, state_);
DCHECK(old_map_->is_deprecated());
@@ -245,9 +225,9 @@ void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
Map::GeneralizeField(isolate_, map, modify_index, new_constness,
new_representation, new_field_type);
- DCHECK(*old_descriptors_ == old_map_->instance_descriptors(kRelaxedLoad) ||
+ DCHECK(*old_descriptors_ == old_map_->instance_descriptors(isolate_) ||
*old_descriptors_ ==
- integrity_source_map_->instance_descriptors(kRelaxedLoad));
+ integrity_source_map_->instance_descriptors(isolate_));
}
MapUpdater::State MapUpdater::Normalize(const char* reason) {
@@ -341,8 +321,8 @@ bool MapUpdater::TrySaveIntegrityLevelTransitions() {
integrity_source_map_->NumberOfOwnDescriptors());
has_integrity_level_transition_ = true;
- old_descriptors_ = handle(
- integrity_source_map_->instance_descriptors(kRelaxedLoad), isolate_);
+ old_descriptors_ =
+ handle(integrity_source_map_->instance_descriptors(isolate_), isolate_);
return true;
}
@@ -438,7 +418,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(
- tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
+ tmp_map->instance_descriptors(isolate_), isolate_);
// Check if target map is incompatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -486,7 +466,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
#ifdef DEBUG
if (modified_descriptor_.is_found()) {
DescriptorArray target_descriptors =
- target_map_->instance_descriptors(kRelaxedLoad);
+ target_map_->instance_descriptors(isolate_);
PropertyDetails details =
target_descriptors.GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
@@ -535,7 +515,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(
- tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
+ tmp_map->instance_descriptors(isolate_), isolate_);
#ifdef DEBUG
// Check that target map is compatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -559,7 +539,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
InstanceType instance_type = old_map_->instance_type();
int target_nof = target_map_->NumberOfOwnDescriptors();
Handle<DescriptorArray> target_descriptors(
- target_map_->instance_descriptors(kRelaxedLoad), isolate_);
+ target_map_->instance_descriptors(isolate_), isolate_);
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as the old descriptor
@@ -734,7 +714,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
TransitionsAccessor(isolate_, current, &no_gc)
.SearchTransition(name, details.kind(), details.attributes());
if (next.is_null()) break;
- DescriptorArray next_descriptors = next.instance_descriptors(kRelaxedLoad);
+ DescriptorArray next_descriptors = next.instance_descriptors(isolate_);
PropertyDetails next_details = next_descriptors.GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index 11bdd0859ff..427ddc90e83 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -19,15 +19,15 @@ namespace internal {
// including changes of elements kind, property attributes, property kind,
// property location and field representations/type changes. It ensures that
// the reconfigured map and all the intermediate maps are properly integrated
-// into the exising transition tree.
+// into the existing transition tree.
//
// To avoid high degrees over polymorphism, and to stabilize quickly, on every
// rewrite the new type is deduced by merging the current type with any
// potential new (partial) version of the type in the transition tree.
// To do this, on each rewrite:
// - Search the root of the transition tree using FindRootMap, remember
-// the integrity level (preventExtensions/seal/freeze) transitions.
-// - Find/create a |root_map| with requested |new_elements_kind|.
+// the integrity level (preventExtensions/seal/freeze) of transitions.
+// - Find/create a |root_map| with the requested |new_elements_kind|.
// - Find |target_map|, the newest matching version of this map using the
// "updated" |old_map|'s descriptor array (i.e. whose entry at |modify_index|
// is considered to be of |new_kind| and having |new_attributes|) to walk
@@ -40,31 +40,31 @@ namespace internal {
// - Generalize the |modify_index| descriptor using |new_representation| and
// |new_field_type|.
// - Walk the tree again starting from the root towards |target_map|. Stop at
-// |split_map|, the first map who's descriptor array does not match the merged
+// |split_map|, the first map whose descriptor array does not match the merged
// descriptor array.
// - If |target_map| == |split_map|, and there are no integrity level
// transitions, |target_map| is in the expected state. Return it.
// - Otherwise, invalidate the outdated transition target from |target_map|, and
// replace its transition tree with a new branch for the updated descriptors.
// - If the |old_map| had integrity level transition, create the new map for it.
-class MapUpdater {
+class V8_EXPORT_PRIVATE MapUpdater {
public:
MapUpdater(Isolate* isolate, Handle<Map> old_map);
// Prepares for reconfiguring of a property at |descriptor| to data field
// with given |attributes| and |representation|/|field_type| and
- // performs the steps 1-5.
+ // performs the steps 1-6.
Handle<Map> ReconfigureToDataField(InternalIndex descriptor,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
Handle<FieldType> field_type);
- // Prepares for reconfiguring elements kind and performs the steps 1-5.
+ // Prepares for reconfiguring elements kind and performs the steps 1-6.
Handle<Map> ReconfigureElementsKind(ElementsKind elements_kind);
// Prepares for updating deprecated map to most up-to-date non-deprecated
- // version and performs the steps 1-5.
+ // version and performs the steps 1-6.
Handle<Map> Update();
private:
@@ -76,6 +76,16 @@ class MapUpdater {
kEnd
};
+ // Updates map to the most up-to-date non-deprecated version.
+ static inline Handle<Map> UpdateMapNoLock(Isolate* isolate,
+ Handle<Map> old_map);
+
+ // Prepares for updating deprecated map to most up-to-date non-deprecated
+ // version and performs the steps 1-6.
+ // Unlike the Update() entry point it doesn't lock the map_updater_access
+ // mutex.
+ Handle<Map> UpdateImpl();
+
// Try to reconfigure property in-place without rebuilding transition tree
// and creating new maps. See implementation for details.
State TryReconfigureToDataFieldInplace();
@@ -105,7 +115,7 @@ class MapUpdater {
// Step 4.
// - Walk the tree again starting from the root towards |target_map|. Stop at
- // |split_map|, the first map who's descriptor array does not match the
+ // |split_map|, the first map whose descriptor array does not match the
// merged descriptor array.
Handle<Map> FindSplitMap(Handle<DescriptorArray> descriptors);
@@ -117,7 +127,7 @@ class MapUpdater {
// descriptors.
State ConstructNewMap();
- // Step 6 (if there was
+ // Step 6.
// - If the |old_map| had integrity level transition, create the new map
// for it.
State ConstructNewMapWithIntegrityLevelTransition();
@@ -129,20 +139,20 @@ class MapUpdater {
// Returns name of a |descriptor| property.
inline Name GetKey(InternalIndex descriptor) const;
- // Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
+ // Returns property details of a |descriptor| in "updated" |old_descriptors_|
// array.
inline PropertyDetails GetDetails(InternalIndex descriptor) const;
// Returns value of a |descriptor| with kDescriptor location in "updated"
- // |old_descrtiptors_| array.
+ // |old_descriptors_| array.
inline Object GetValue(InternalIndex descriptor) const;
// Returns field type for a |descriptor| with kField location in "updated"
- // |old_descrtiptors_| array.
+ // |old_descriptors_| array.
inline FieldType GetFieldType(InternalIndex descriptor) const;
// If a |descriptor| property in "updated" |old_descriptors_| has kField
- // location then returns it's field type otherwise computes optimal field
+ // location then returns its field type, otherwise computes the optimal field
// type for the descriptor's value and |representation|. The |location|
// value must be a pre-fetched location for |descriptor|.
inline Handle<FieldType> GetOrComputeFieldType(
@@ -150,7 +160,7 @@ class MapUpdater {
Representation representation) const;
// If a |descriptor| property in given |descriptors| array has kField
- // location then returns it's field type otherwise computes optimal field
+ // location then returns its field type, otherwise computes the optimal field
// type for the descriptor's value and |representation|.
// The |location| value must be a pre-fetched location for |descriptor|.
inline Handle<FieldType> GetOrComputeFieldType(
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 8bfbe5812ba..0f281519b79 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -64,7 +64,7 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
+ Name name = instance_descriptors(isolate).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -247,9 +247,6 @@ VisitorId Map::GetVisitorId(Map map) {
case CODE_DATA_CONTAINER_TYPE:
return kVisitCodeDataContainer;
- case WASM_INSTANCE_OBJECT_TYPE:
- return kVisitWasmInstanceObject;
-
case PREPARSE_DATA_TYPE:
return kVisitPreparseData;
@@ -306,12 +303,14 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_SEGMENTER_TYPE:
case JS_SEGMENTS_TYPE:
#endif // V8_INTL_SUPPORT
+#if V8_ENABLE_WEBASSEMBLY
case WASM_EXCEPTION_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
case JS_BOUND_FUNCTION_TYPE: {
const bool has_raw_data_fields =
COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0;
@@ -346,12 +345,14 @@ VisitorId Map::GetVisitorId(Map map) {
if (instance_type == PROTOTYPE_INFO_TYPE) {
return kVisitPrototypeInfo;
}
+#if V8_ENABLE_WEBASSEMBLY
if (instance_type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return kVisitWasmCapiFunctionData;
}
if (instance_type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
return kVisitWasmIndirectFunctionTable;
}
+#endif // V8_ENABLE_WEBASSEMBLY
return kVisitStruct;
case LOAD_HANDLER_TYPE:
@@ -363,12 +364,16 @@ VisitorId Map::GetVisitorId(Map map) {
case SYNTHETIC_MODULE_TYPE:
return kVisitSyntheticModule;
+#if V8_ENABLE_WEBASSEMBLY
+ case WASM_INSTANCE_OBJECT_TYPE:
+ return kVisitWasmInstanceObject;
case WASM_ARRAY_TYPE:
return kVisitWasmArray;
case WASM_STRUCT_TYPE:
return kVisitWasmStruct;
case WASM_TYPE_INFO_TYPE:
return kVisitWasmTypeInfo;
+#endif // V8_ENABLE_WEBASSEMBLY
#define MAKE_TQ_CASE(TYPE, Name) \
case TYPE: \
@@ -391,7 +396,7 @@ void Map::PrintGeneralization(
MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
+ Name name = instance_descriptors(isolate).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -452,7 +457,7 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(map->instance_descriptors(kRelaxedLoad)
+ DCHECK(map->instance_descriptors(isolate)
.Search(*name, map->NumberOfOwnDescriptors())
.is_not_found());
@@ -498,12 +503,6 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
PropertyConstness::kConst, representation, flag);
}
-bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
- int inobject = NumberOfFields();
- int target_inobject = target.NumberOfFields();
- return target_inobject < inobject;
-}
-
bool Map::InstancesNeedRewriting(Map target) const {
int target_number_of_fields = target.NumberOfFields();
int target_inobject = target.GetInObjectProperties();
@@ -524,8 +523,8 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = instance_descriptors(kRelaxedLoad);
- DescriptorArray new_desc = target.instance_descriptors(kRelaxedLoad);
+ DescriptorArray old_desc = instance_descriptors();
+ DescriptorArray new_desc = target.instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
@@ -549,7 +548,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
}
int Map::NumberOfFields() const {
- DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = instance_descriptors();
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
@@ -558,7 +557,7 @@ int Map::NumberOfFields() const {
}
Map::FieldCounts Map::GetFieldCounts() const {
- DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = instance_descriptors();
int mutable_count = 0;
int const_count = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
@@ -610,7 +609,7 @@ void Map::ReplaceDescriptors(Isolate* isolate,
return;
}
- DescriptorArray to_replace = instance_descriptors(kRelaxedLoad);
+ DescriptorArray to_replace = instance_descriptors(isolate);
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
@@ -618,7 +617,7 @@ void Map::ReplaceDescriptors(Isolate* isolate,
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(to_replace, to_replace.number_of_descriptors());
#endif
- while (current.instance_descriptors(isolate, kRelaxedLoad) == to_replace) {
+ while (current.instance_descriptors(isolate) == to_replace) {
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
@@ -630,15 +629,16 @@ void Map::ReplaceDescriptors(Isolate* isolate,
}
Map Map::FindRootMap(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
Map result = *this;
while (true) {
Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) {
// Initial map must not contain descriptors in the descriptors array
// that do not belong to the map.
- DCHECK_LE(
- result.NumberOfOwnDescriptors(),
- result.instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ DCHECK_LE(result.NumberOfOwnDescriptors(),
+ result.instance_descriptors(isolate, kRelaxedLoad)
+ .number_of_descriptors());
return result;
}
result = Map::cast(back);
@@ -669,7 +669,7 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
// We store raw pointers in the queue, so no allocations are allowed.
DisallowGarbageCollection no_gc;
PropertyDetails details =
- instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
+ instance_descriptors(isolate).GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
@@ -691,7 +691,7 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray descriptors = current.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = current.instance_descriptors(isolate);
PropertyDetails details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
@@ -739,8 +739,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
Representation new_representation,
Handle<FieldType> new_field_type) {
// Check if we actually need to generalize the field type at all.
- Handle<DescriptorArray> old_descriptors(
- map->instance_descriptors(kRelaxedLoad), isolate);
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(isolate),
+ isolate);
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PropertyConstness old_constness = old_details.constness();
Representation old_representation = old_details.representation();
@@ -764,7 +764,7 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
// Determine the field owner.
Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
Handle<DescriptorArray> descriptors(
- field_owner->instance_descriptors(kRelaxedLoad), isolate);
+ field_owner->instance_descriptors(isolate), isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
new_field_type =
@@ -806,29 +806,6 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
}
}
-// TODO(ishell): remove.
-// static
-Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map,
- InternalIndex modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type) {
- DCHECK_EQ(kData, new_kind); // Only kData case is supported.
- MapUpdater mu(isolate, map);
- return mu.ReconfigureToDataField(modify_index, new_attributes,
- PropertyConstness::kConst,
- new_representation, new_field_type);
-}
-
-// TODO(ishell): remove.
-// static
-Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
- ElementsKind new_elements_kind) {
- MapUpdater mu(isolate, map);
- return mu.ReconfigureElementsKind(new_elements_kind);
-}
-
namespace {
Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
@@ -849,7 +826,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray old_descriptors = old_map.instance_descriptors(isolate);
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
@@ -1011,7 +988,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map.NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray old_descriptors = old_map.instance_descriptors(isolate);
Map new_map = *this;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
@@ -1022,8 +999,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors =
- new_map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray new_descriptors = new_map.instance_descriptors(isolate);
PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
@@ -1088,7 +1064,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Only supports adding slack to owned descriptors.
DCHECK(map->owns_descriptors());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
int old_size = map->NumberOfOwnDescriptors();
if (slack <= descriptors->number_of_slack_descriptors()) return;
@@ -1117,16 +1093,21 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
WriteBarrier::Marking(*descriptors, descriptors->number_of_descriptors());
#endif
- Map current = *map;
- while (current.instance_descriptors(kRelaxedLoad) == *descriptors) {
- Object next = current.GetBackPointer();
- if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ // Update the descriptors from {map} (inclusive) until the initial map
+ // (exclusive). In the case that {map} is the initial map, update it.
+ map->UpdateDescriptors(isolate, *new_descriptors,
+ map->NumberOfOwnDescriptors());
+ Object next = map->GetBackPointer();
+ if (next.IsUndefined(isolate)) return;
+
+ Map current = Map::cast(next);
+ while (current.instance_descriptors(isolate) == *descriptors) {
+ next = current.GetBackPointer();
+ if (next.IsUndefined(isolate)) break;
current.UpdateDescriptors(isolate, *new_descriptors,
current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
- map->UpdateDescriptors(isolate, *new_descriptors,
- map->NumberOfOwnDescriptors());
}
// static
@@ -1320,7 +1301,7 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
return Map::CopyAsElementsKind(isolate, map, to_kind, OMIT_TRANSITION);
}
- return Map::ReconfigureElementsKind(isolate, map, to_kind);
+ return MapUpdater{isolate, map}.ReconfigureElementsKind(to_kind);
}
static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
@@ -1381,7 +1362,7 @@ int Map::NumberOfEnumerableProperties() const {
int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray descs = instance_descriptors(kRelaxedLoad);
+ DescriptorArray descs = instance_descriptors();
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
@@ -1437,7 +1418,9 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
Handle<HeapObject> prototype(map->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_back_pointer(map->GetConstructor());
- result->set_bit_field(map->bit_field());
+ // TODO(solanes, v8:7790, v8:11353): set_relaxed_bit_field could be an atomic
+ // set if TSAN could see the transitions happening in StoreIC.
+ result->set_relaxed_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
@@ -1589,7 +1572,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Initial maps must not contain descriptors in the descriptors array
// that do not belong to the map.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ map->instance_descriptors(isolate).number_of_descriptors());
}
} // namespace
@@ -1615,7 +1598,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors > 0) {
// The copy will use the same descriptors array without ownership.
- DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map->instance_descriptors(isolate);
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, number_of_own_descriptors);
@@ -1646,7 +1629,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ map->instance_descriptors(isolate).number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
@@ -1664,7 +1647,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
} else {
int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
EnsureDescriptorSlack(isolate, map, slack);
- descriptors = handle(map->instance_descriptors(kRelaxedLoad), isolate);
+ descriptors = handle(map->instance_descriptors(isolate), isolate);
}
}
@@ -1692,9 +1675,8 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
} else if (!parent->IsDetached(isolate)) {
// |parent| is initial map and it must not contain descriptors in the
// descriptors array that do not belong to the map.
- DCHECK_EQ(
- parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ DCHECK_EQ(parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors(isolate).number_of_descriptors());
}
if (parent->IsDetached(isolate)) {
DCHECK(child->IsDetached(isolate));
@@ -1908,12 +1890,11 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
// transfer ownership to the new map.
// The properties did not change, so reuse descriptors.
map->set_owns_descriptors(false);
- new_map->InitializeDescriptors(isolate,
- map->instance_descriptors(kRelaxedLoad));
+ new_map->InitializeDescriptors(isolate, map->instance_descriptors(isolate));
} else {
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
@@ -1924,7 +1905,7 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
}
Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
@@ -1964,7 +1945,7 @@ Handle<Map> Map::CopyForPreventExtensions(
bool old_map_is_dictionary_elements_kind) {
int num_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
- isolate, handle(map->instance_descriptors(kRelaxedLoad), isolate),
+ isolate, handle(map->instance_descriptors(isolate), isolate),
num_descriptors, attrs_to_add);
// Do not track transitions during bootstrapping.
TransitionFlag flag =
@@ -2059,14 +2040,13 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
InternalIndex descriptor,
PropertyConstness constness,
Handle<Object> value) {
- if (CanHoldValue(map->instance_descriptors(kRelaxedLoad), descriptor,
- constness, *value)) {
+ if (CanHoldValue(map->instance_descriptors(isolate), descriptor, constness,
+ *value)) {
return map;
}
- PropertyAttributes attributes = map->instance_descriptors(kRelaxedLoad)
- .GetDetails(descriptor)
- .attributes();
+ PropertyAttributes attributes =
+ map->instance_descriptors(isolate).GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
@@ -2113,7 +2093,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
InternalIndex descriptor = transition->LastAdded();
- DCHECK_EQ(attributes, transition->instance_descriptors(kRelaxedLoad)
+ DCHECK_EQ(attributes, transition->instance_descriptors(isolate)
.GetDetails(descriptor)
.attributes());
@@ -2158,7 +2138,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
reason);
initial_map->DeprecateTransitionTree(isolate);
Handle<HeapObject> prototype(result->prototype(), isolate);
- JSFunction::SetInitialMap(constructor, result, prototype);
+ JSFunction::SetInitialMap(isolate, constructor, result, prototype);
// Deoptimize all code that embeds the previous initial map.
initial_map->dependent_code().DeoptimizeDependentCodeGroup(
@@ -2233,8 +2213,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
.SearchTransition(*name, kAccessor, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
- DescriptorArray descriptors =
- transition->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = transition->instance_descriptors(isolate);
InternalIndex descriptor = transition->LastAdded();
DCHECK(descriptors.GetKey(descriptor).Equals(*name));
@@ -2257,7 +2236,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
}
Handle<AccessorPair> pair;
- DescriptorArray old_descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray old_descriptors = map->instance_descriptors(isolate);
if (descriptor.is_found()) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
@@ -2318,7 +2297,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
isolate);
// Share descriptors only if map owns descriptors and it not an initial map.
@@ -2341,8 +2320,8 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> old_descriptors(
- map->instance_descriptors(kRelaxedLoad), isolate);
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(isolate),
+ isolate);
// We replace the key if it is already present.
InternalIndex index =
@@ -2380,20 +2359,17 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
}
int Map::Hash() {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2. For predictability reasons we
- // use objects' offsets in respective pages for hashing instead of raw
- // addresses.
+ // For performance reasons we only hash the 2 most variable fields of a map:
+ // prototype map and bit_field2. For predictability reasons we use objects'
+ // offsets in respective pages for hashing instead of raw addresses. We use
+ // the map of the prototype because the prototype itself could be compacted,
+ // whereas the map will not be moved.
+ // NOTE: If we want to compact maps, this hash function won't work as intended
+ // anymore.
// Shift away the tag.
- int hash = ObjectAddressForHashing(GetConstructor().ptr()) >> 2;
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype bits relatively to the constructor.
- hash ^= ObjectAddressForHashing(prototype().ptr()) << (32 - kPageSizeBits);
-
- return hash ^ (hash >> 16) ^ bit_field2();
+ int hash = ObjectAddressForHashing(prototype().map().ptr()) >> 2;
+ return hash ^ bit_field2();
}
namespace {
@@ -2421,8 +2397,8 @@ bool Map::EquivalentToForTransition(const Map other) const {
// not equivalent to strict function.
int nof =
std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
- return instance_descriptors(kRelaxedLoad)
- .IsEqualUpTo(other.instance_descriptors(kRelaxedLoad), nof);
+ return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
+ nof);
}
return true;
}
@@ -2433,7 +2409,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 01b1bf3a65e..e31bb9a51f4 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -32,47 +32,47 @@ enum InstanceType : uint16_t;
V(FeedbackMetadata) \
V(FixedDoubleArray)
-#define POINTER_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(DataHandler) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FreeSpace) \
- V(JSApiObject) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSObject) \
- V(JSObjectFast) \
- V(JSTypedArray) \
- V(JSWeakRef) \
- V(JSWeakCollection) \
- V(Map) \
- V(NativeContext) \
- V(PreparseData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(ShortcutCandidate) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourceTextModule) \
- V(Struct) \
- V(SwissNameDictionary) \
- V(Symbol) \
- V(SyntheticModule) \
- V(TransitionArray) \
- V(WasmIndirectFunctionTable) \
- V(WasmInstanceObject) \
- V(WasmArray) \
- V(WasmStruct) \
- V(WasmTypeInfo) \
+#define POINTER_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(DataHandler) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSTypedArray) \
+ V(JSWeakRef) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(ShortcutCandidate) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(Struct) \
+ V(SwissNameDictionary) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(TransitionArray) \
+ IF_WASM(V, WasmIndirectFunctionTable) \
+ IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmTypeInfo) \
V(WeakCell)
#define TORQUE_VISITOR_ID_LIST(V) \
@@ -245,6 +245,8 @@ class Map : public HeapObject {
// Bit field.
//
DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
+ // Atomic accessors, used for allowlisting legitimate concurrent accesses.
+ DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field, byte)
// Bit positions for |bit_field|.
struct Bits1 {
@@ -481,10 +483,6 @@ class Map : public HeapObject {
bool HasOutOfObjectProperties() const;
- // Returns true if transition to the given map requires special
- // synchronization with the concurrent marker.
- bool TransitionRequiresSynchronizationWithGC(Map target) const;
-
// TODO(ishell): candidate with JSObject::MigrateToMap().
bool InstancesNeedRewriting(Map target) const;
bool InstancesNeedRewriting(Map target, int target_number_of_fields,
@@ -514,14 +512,6 @@ class Map : public HeapObject {
Isolate* isolate, InstanceType instance_type,
Representation* representation, Handle<FieldType>* field_type);
- V8_EXPORT_PRIVATE static Handle<Map> ReconfigureProperty(
- Isolate* isolate, Handle<Map> map, InternalIndex modify_index,
- PropertyKind new_kind, PropertyAttributes new_attributes,
- Representation new_representation, Handle<FieldType> new_field_type);
-
- V8_EXPORT_PRIVATE static Handle<Map> ReconfigureElementsKind(
- Isolate* isolate, Handle<Map> map, ElementsKind new_elements_kind);
-
V8_EXPORT_PRIVATE static Handle<Map> PrepareForDataProperty(
Isolate* isolate, Handle<Map> old_map, InternalIndex descriptor_number,
PropertyConstness constness, Handle<Object> value);
@@ -580,6 +570,7 @@ class Map : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
+ DECL_ACCESSORS(instance_descriptors, DescriptorArray)
DECL_RELAXED_ACCESSORS(instance_descriptors, DescriptorArray)
DECL_ACQUIRE_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
@@ -612,6 +603,7 @@ class Map : public HeapObject {
// chain state.
inline bool IsPrototypeValidityCellValid() const;
+ inline Name GetLastDescriptorName(Isolate* isolate) const;
inline PropertyDetails GetLastDescriptorDetails(Isolate* isolate) const;
inline InternalIndex LastAdded() const;
@@ -942,6 +934,11 @@ class Map : public HeapObject {
MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
MaybeHandle<Object> new_value);
+ // This is the equivalent of IsMap() but avoids reading the instance type so
+ // it can be used concurrently without acquire load.
+ V8_INLINE bool ConcurrentIsMap(PtrComprCageBase cage_base,
+ const Object& object) const;
+
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
DECL_RELEASE_SETTER(instance_descriptors, DescriptorArray)
@@ -972,7 +969,8 @@ class NormalizedMapCache : public WeakFixedArray {
DECL_VERIFIER(NormalizedMapCache)
private:
- friend bool HeapObject::IsNormalizedMapCache(IsolateRoot isolate) const;
+ friend bool HeapObject::IsNormalizedMapCache(
+ PtrComprCageBase cage_base) const;
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 6cabc52312d..4b06fec5cb7 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -78,13 +78,14 @@ HeapObjectReference HeapObjectReference::From(Object object,
}
// static
-HeapObjectReference HeapObjectReference::ClearedValue(IsolateRoot isolate) {
+HeapObjectReference HeapObjectReference::ClearedValue(
+ PtrComprCageBase cage_base) {
// Construct cleared weak ref value.
#ifdef V8_COMPRESS_POINTERS
// This is necessary to make pointer decompression computation also
// suitable for cleared weak references.
Address raw_value =
- DecompressTaggedPointer(isolate, kClearedWeakHeapObjectLower32);
+ DecompressTaggedPointer(cage_base, kClearedWeakHeapObjectLower32);
#else
Address raw_value = kClearedWeakHeapObjectLower32;
#endif
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index 3fe69ee5ec7..0393ef6497b 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -54,7 +54,7 @@ class HeapObjectReference : public MaybeObject {
V8_INLINE static HeapObjectReference From(Object object,
HeapObjectReferenceType type);
- V8_INLINE static HeapObjectReference ClearedValue(IsolateRoot isolate);
+ V8_INLINE static HeapObjectReference ClearedValue(PtrComprCageBase cage_base);
template <typename THeapObjectSlot>
V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index f07e1bb9ced..93c0cd3fa93 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -56,7 +56,7 @@ void Symbol::set_is_private_name() {
}
DEF_GETTER(Name, IsUniqueName, bool) {
- uint32_t type = map(isolate).instance_type();
+ uint32_t type = map(cage_base).instance_type();
bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
SLOW_DCHECK(result == HeapObject::IsUniqueName());
@@ -104,23 +104,23 @@ uint32_t Name::hash() const {
}
DEF_GETTER(Name, IsInterestingSymbol, bool) {
- return IsSymbol(isolate) && Symbol::cast(*this).is_interesting_symbol();
+ return IsSymbol(cage_base) && Symbol::cast(*this).is_interesting_symbol();
}
DEF_GETTER(Name, IsPrivate, bool) {
- return this->IsSymbol(isolate) && Symbol::cast(*this).is_private();
+ return this->IsSymbol(cage_base) && Symbol::cast(*this).is_private();
}
DEF_GETTER(Name, IsPrivateName, bool) {
bool is_private_name =
- this->IsSymbol(isolate) && Symbol::cast(*this).is_private_name();
+ this->IsSymbol(cage_base) && Symbol::cast(*this).is_private_name();
DCHECK_IMPLIES(is_private_name, IsPrivate());
return is_private_name;
}
DEF_GETTER(Name, IsPrivateBrand, bool) {
bool is_private_brand =
- this->IsSymbol(isolate) && Symbol::cast(*this).is_private_brand();
+ this->IsSymbol(cage_base) && Symbol::cast(*this).is_private_brand();
DCHECK_IMPLIES(is_private_brand, IsPrivateName());
return is_private_brand;
}
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 40ef44c7854..fc3f956d2db 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -214,17 +214,17 @@ class ZoneForwardList;
V(TransitionArray) \
V(Undetectable) \
V(UniqueName) \
- V(WasmArray) \
- V(WasmExceptionObject) \
- V(WasmExceptionPackage) \
- V(WasmGlobalObject) \
- V(WasmInstanceObject) \
- V(WasmMemoryObject) \
- V(WasmModuleObject) \
- V(WasmStruct) \
- V(WasmTypeInfo) \
- V(WasmTableObject) \
- V(WasmValueObject) \
+ IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmExceptionObject) \
+ IF_WASM(V, WasmExceptionPackage) \
+ IF_WASM(V, WasmGlobalObject) \
+ IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmMemoryObject) \
+ IF_WASM(V, WasmModuleObject) \
+ IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmTypeInfo) \
+ IF_WASM(V, WasmTableObject) \
+ IF_WASM(V, WasmValueObject) \
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index d919e665a08..e2c5961ab2a 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -11,6 +11,8 @@
#undef OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
+#undef DECL_PRIMITIVE_GETTER
+#undef DECL_PRIMITIVE_SETTER
#undef DECL_PRIMITIVE_ACCESSORS
#undef DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
@@ -32,6 +34,7 @@
#undef DECL_ACQUIRE_GETTER
#undef DECL_RELEASE_SETTER
#undef DECL_RELEASE_ACQUIRE_ACCESSORS
+#undef DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS
#undef DECL_CAST
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
@@ -53,15 +56,18 @@
#undef WEAK_ACCESSORS_CHECKED2
#undef WEAK_ACCESSORS_CHECKED
#undef WEAK_ACCESSORS
-#undef SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2
-#undef SYNCHRONIZED_WEAK_ACCESSORS_CHECKED
-#undef SYNCHRONIZED_WEAK_ACCESSORS
+#undef RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2
+#undef RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED
+#undef RELEASE_ACQUIRE_WEAK_ACCESSORS
#undef SMI_ACCESSORS_CHECKED
#undef SMI_ACCESSORS
#undef SYNCHRONIZED_SMI_ACCESSORS
#undef RELAXED_SMI_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
+#undef DECL_RELAXED_BOOL_ACCESSORS
+#undef RELAXED_BOOL_ACCESSORS
+#undef BIT_FIELD_ACCESSORS2
#undef BIT_FIELD_ACCESSORS
#undef INSTANCE_TYPE_CHECKER
#undef TYPE_CHECKER
@@ -83,6 +89,8 @@
#undef ACQUIRE_READ_INT32_FIELD
#undef RELAXED_WRITE_INT8_FIELD
#undef RELAXED_READ_INT8_FIELD
+#undef RELAXED_READ_UINT16_FIELD
+#undef RELAXED_WRITE_UINT16_FIELD
#undef RELAXED_READ_INT16_FIELD
#undef RELAXED_WRITE_INT16_FIELD
#undef RELAXED_READ_UINT32_FIELD
@@ -92,6 +100,10 @@
#undef RELAXED_READ_INT32_FIELD
#undef RELEASE_WRITE_INT32_FIELD
#undef RELAXED_WRITE_INT32_FIELD
+#undef RELAXED_READ_INT_FIELD
+#undef RELAXED_WRITE_INT_FIELD
+#undef RELAXED_READ_UINT_FIELD
+#undef RELAXED_WRITE_UINT_FIELD
#undef RELAXED_READ_BYTE_FIELD
#undef ACQUIRE_READ_BYTE_FIELD
#undef RELAXED_WRITE_BYTE_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index f36751e1f96..2a742d5d77d 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -86,14 +86,14 @@
// parameter.
#define DECL_GETTER(name, type) \
inline type name() const; \
- inline type name(IsolateRoot isolate) const;
+ inline type name(PtrComprCageBase cage_base) const;
-#define DEF_GETTER(holder, name, type) \
- type holder::name() const { \
- IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate); \
- } \
- type holder::name(IsolateRoot isolate) const
+#define DEF_GETTER(holder, name, type) \
+ type holder::name() const { \
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
+ return holder::name(cage_base); \
+ } \
+ type holder::name(PtrComprCageBase cage_base) const
#define DECL_SETTER(name, type) \
inline void set_##name(type value, \
@@ -105,7 +105,7 @@
#define DECL_ACCESSORS_LOAD_TAG(name, type, tag_type) \
inline type name(tag_type tag) const; \
- inline type name(IsolateRoot isolate, tag_type) const;
+ inline type name(PtrComprCageBase cage_base, tag_type) const;
#define DECL_ACCESSORS_STORE_TAG(name, type, tag_type) \
inline void set_##name(type value, tag_type, \
@@ -179,7 +179,7 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
DEF_GETTER(holder, name, type) { \
- type value = TaggedField<type, offset>::load(isolate, *this); \
+ type value = TaggedField<type, offset>::load(cage_base, *this); \
DCHECK(get_condition); \
return value; \
} \
@@ -215,18 +215,18 @@
#define RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type holder::name(RelaxedLoadTag tag) const { \
- IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate, tag); \
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
+ return holder::name(cage_base, tag); \
} \
- type holder::name(IsolateRoot isolate, RelaxedLoadTag) const { \
- type value = TaggedField<type, offset>::load(isolate, *this); \
+ type holder::name(PtrComprCageBase cage_base, RelaxedLoadTag) const { \
+ type value = TaggedField<type, offset>::Relaxed_Load(cage_base, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type value, RelaxedStoreTag, \
WriteBarrierMode mode) { \
DCHECK(set_condition); \
- TaggedField<type, offset>::store(*this, value); \
+ TaggedField<type, offset>::Relaxed_Store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
@@ -236,22 +236,22 @@
#define RELAXED_ACCESSORS(holder, name, type, offset) \
RELAXED_ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, \
- get_condition, set_condition) \
- type holder::name(AcquireLoadTag tag) const { \
- IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate, tag); \
- } \
- type holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
- type value = TaggedField<type, offset>::Acquire_Load(isolate, *this); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(type value, ReleaseStoreTag, \
- WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- TaggedField<type, offset>::Release_Store(*this, value); \
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+#define RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ type holder::name(AcquireLoadTag tag) const { \
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
+ return holder::name(cage_base, tag); \
+ } \
+ type holder::name(PtrComprCageBase cage_base, AcquireLoadTag) const { \
+ type value = TaggedField<type, offset>::Acquire_Load(cage_base, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
#define RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, \
@@ -266,7 +266,7 @@
set_condition) \
DEF_GETTER(holder, name, MaybeObject) { \
MaybeObject value = \
- TaggedField<MaybeObject, offset>::load(isolate, *this); \
+ TaggedField<MaybeObject, offset>::load(cage_base, *this); \
DCHECK(get_condition); \
return value; \
} \
@@ -282,23 +282,23 @@
#define WEAK_ACCESSORS(holder, name, offset) \
WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
-#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
- get_condition, set_condition) \
- MaybeObject holder::name(AcquireLoadTag tag) const { \
- IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate, tag); \
- } \
- MaybeObject holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
- MaybeObject value = \
- TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(MaybeObject value, ReleaseStoreTag, \
- WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
- CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
+#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
+ get_condition, set_condition) \
+ MaybeObject holder::name(AcquireLoadTag tag) const { \
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
+ return holder::name(cage_base, tag); \
+ } \
+ MaybeObject holder::name(PtrComprCageBase cage_base, AcquireLoadTag) const { \
+ MaybeObject value = \
+ TaggedField<MaybeObject, offset>::Acquire_Load(cage_base, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, \
@@ -351,22 +351,38 @@
set_##field(BooleanBit::set(field(), offset, value)); \
}
-#define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \
- typename BitField::FieldType holder::name() const { \
- return BitField::decode(field()); \
- } \
- void holder::set_##name(typename BitField::FieldType value) { \
- set_##field(BitField::update(field(), value)); \
+#define DECL_RELAXED_BOOL_ACCESSORS(name) \
+ inline bool name(RelaxedLoadTag) const; \
+ inline void set_##name(bool value, RelaxedStoreTag);
+
+#define RELAXED_BOOL_ACCESSORS(holder, field, name, offset) \
+ bool holder::name(RelaxedLoadTag) const { \
+ return BooleanBit::get(field(kRelaxedLoad), offset); \
+ } \
+ void holder::set_##name(bool value, RelaxedStoreTag) { \
+ set_##field(BooleanBit::set(field(kRelaxedLoad), offset, value), \
+ kRelaxedStore); \
+ }
+
+#define BIT_FIELD_ACCESSORS2(holder, get_field, set_field, name, BitField) \
+ typename BitField::FieldType holder::name() const { \
+ return BitField::decode(get_field()); \
+ } \
+ void holder::set_##name(typename BitField::FieldType value) { \
+ set_##set_field(BitField::update(set_field(), value)); \
}
+#define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \
+ BIT_FIELD_ACCESSORS2(holder, field, field, name, BitField)
+
#define INSTANCE_TYPE_CHECKER(type, forinstancetype) \
V8_INLINE bool Is##type(InstanceType instance_type) { \
return instance_type == forinstancetype; \
}
-#define TYPE_CHECKER(type, ...) \
- DEF_GETTER(HeapObject, Is##type, bool) { \
- return InstanceTypeChecker::Is##type(map(isolate).instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ DEF_GETTER(HeapObject, Is##type, bool) { \
+ return InstanceTypeChecker::Is##type(map(cage_base).instance_type()); \
}
#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
@@ -553,7 +569,23 @@
#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
- static_cast<base::Atomic32>(value));
+ static_cast<base::Atomic32>(value))
+
+static_assert(sizeof(int) == sizeof(int32_t),
+ "sizeof int must match sizeof int32_t");
+
+#define RELAXED_READ_INT_FIELD(p, offset) RELAXED_READ_INT32_FIELD(p, offset)
+
+#define RELAXED_WRITE_INT_FIELD(p, offset, value) \
+ RELAXED_WRITE_INT32_FIELD(p, offset, value)
+
+static_assert(sizeof(unsigned) == sizeof(uint32_t),
+ "sizeof unsigned must match sizeof uint32_t");
+
+#define RELAXED_READ_UINT_FIELD(p, offset) RELAXED_READ_UINT32_FIELD(p, offset)
+
+#define RELAXED_WRITE_UINT_FIELD(p, offset, value) \
+ RELAXED_WRITE_UINT32_FIELD(p, offset, value)
#define RELAXED_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Relaxed_Load( \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index e4167229cae..2ffe2a44144 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -23,7 +23,10 @@
#include "src/objects/synthetic-module.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -560,6 +563,7 @@ class Foreign::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
+#if V8_ENABLE_WEBASSEMBLY
class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -578,6 +582,85 @@ class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
+class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ SLOW_DCHECK(std::is_sorted(std::begin(kTaggedFieldOffsets),
+ std::end(kTaggedFieldOffsets)));
+ STATIC_ASSERT(sizeof(*kTaggedFieldOffsets) == sizeof(uint16_t));
+ if (offset < int{8 * sizeof(*kTaggedFieldOffsets)} &&
+ std::binary_search(std::begin(kTaggedFieldOffsets),
+ std::end(kTaggedFieldOffsets),
+ static_cast<uint16_t>(offset))) {
+ return true;
+ }
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOrHashOffset, JSObject::kHeaderSize, v);
+ for (uint16_t offset : kTaggedFieldOffsets) {
+ IteratePointer(obj, offset, v);
+ }
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Fields in WasmArrays never change their types in place, so
+ // there should never be a need to call this function.
+ UNREACHABLE();
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ if (!WasmArray::GcSafeType(map)->element_type().is_reference()) return;
+ IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return WasmArray::GcSafeSizeFor(map, WasmArray::cast(object).length());
+ }
+};
+
+class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Fields in WasmStructs never change their types in place, so
+ // there should never be a need to call this function.
+ UNREACHABLE();
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ WasmStruct wasm_struct = WasmStruct::cast(obj);
+ wasm::StructType* type = WasmStruct::GcSafeType(map);
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ if (!type->field(i).is_reference()) continue;
+ int offset = static_cast<int>(type->field_offset(i));
+ v->VisitPointer(wasm_struct, wasm_struct.RawField(offset));
+ }
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
@@ -619,9 +702,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
kDeoptimizationDataOffset);
STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
- kSourcePositionTableOffset);
- STATIC_ASSERT(kSourcePositionTableOffset + kTaggedSize ==
- kCodeDataContainerOffset);
+ kPositionTableOffset);
+ STATIC_ASSERT(kPositionTableOffset + kTaggedSize == kCodeDataContainerOffset);
STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -661,36 +743,6 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- SLOW_DCHECK(std::is_sorted(std::begin(kTaggedFieldOffsets),
- std::end(kTaggedFieldOffsets)));
- STATIC_ASSERT(sizeof(*kTaggedFieldOffsets) == sizeof(uint16_t));
- if (offset < int{8 * sizeof(*kTaggedFieldOffsets)} &&
- std::binary_search(std::begin(kTaggedFieldOffsets),
- std::end(kTaggedFieldOffsets),
- static_cast<uint16_t>(offset))) {
- return true;
- }
- return IsValidJSObjectSlotImpl(map, obj, offset);
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOrHashOffset, JSObject::kHeaderSize, v);
- for (uint16_t offset : kTaggedFieldOffsets) {
- IteratePointer(obj, offset, v);
- }
- IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
- }
-
- static inline int SizeOf(Map map, HeapObject object) {
- return map.instance_size();
- }
-};
-
class Map::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -777,53 +829,6 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- // Fields in WasmArrays never change their types in place, so
- // there should never be a need to call this function.
- UNREACHABLE();
- return false;
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- if (!WasmArray::GcSafeType(map)->element_type().is_reference_type()) return;
- IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
- }
-
- static inline int SizeOf(Map map, HeapObject object) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(object).length());
- }
-};
-
-class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- // Fields in WasmStructs never change their types in place, so
- // there should never be a need to call this function.
- UNREACHABLE();
- return false;
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- WasmStruct wasm_struct = WasmStruct::cast(obj);
- wasm::StructType* type = WasmStruct::GcSafeType(map);
- for (uint32_t i = 0; i < type->field_count(); i++) {
- if (!type->field(i).is_reference_type()) continue;
- int offset = static_cast<int>(type->field_offset(i));
- v->VisitPointer(wasm_struct, wasm_struct.RawField(offset));
- }
- }
-
- static inline int SizeOf(Map map, HeapObject object) {
- return map.instance_size();
- }
-};
-
class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -936,12 +941,14 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3, p4);
case COVERAGE_INFO_TYPE:
return Op::template apply<CoverageInfo::BodyDescriptor>(p1, p2, p3, p4);
+#if V8_ENABLE_WEBASSEMBLY
case WASM_ARRAY_TYPE:
return Op::template apply<WasmArray::BodyDescriptor>(p1, p2, p3, p4);
case WASM_STRUCT_TYPE:
return Op::template apply<WasmStruct::BodyDescriptor>(p1, p2, p3, p4);
case WASM_TYPE_INFO_TYPE:
return Op::template apply<WasmTypeInfo::BodyDescriptor>(p1, p2, p3, p4);
+#endif // V8_ENABLE_WEBASSEMBLY
case JS_API_OBJECT_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
@@ -997,16 +1004,20 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SEGMENTER_TYPE:
case JS_SEGMENTS_TYPE:
#endif // V8_INTL_SUPPORT
+#if V8_ENABLE_WEBASSEMBLY
case WASM_EXCEPTION_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+#if V8_ENABLE_WEBASSEMBLY
case WASM_INSTANCE_OBJECT_TYPE:
return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
p4);
+#endif // V8_ENABLE_WEBASSEMBLY
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3,
@@ -1083,15 +1094,18 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
if (type == PROTOTYPE_INFO_TYPE) {
return Op::template apply<PrototypeInfo::BodyDescriptor>(p1, p2, p3,
p4);
- } else if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
p3, p4);
- } else if (type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
+ }
+ if (type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
return Op::template apply<WasmIndirectFunctionTable::BodyDescriptor>(
p1, p2, p3, p4);
- } else {
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
case CALL_HANDLER_INFO_TYPE:
return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
case LOAD_HANDLER_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 68b82b33d34..43560caab81 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -101,64 +101,63 @@ namespace internal {
// code for the class including allocation and garbage collection routines,
// casts and predicates. All you need to define is the class, methods and
// object verification routines. Easy, no?
-#define STRUCT_LIST_GENERATOR_BASE(V, _) \
- V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
- promise_fulfill_reaction_job_task) \
- V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
- promise_reject_reaction_job_task) \
- V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
- V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
- V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
- promise_resolve_thenable_job_task) \
- V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
- function_template_info) \
- V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
- V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
- V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
- V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
- V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
- aliased_arguments_entry) \
- V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
- V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
- array_boilerplate_description) \
- V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
- V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
- async_generator_request) \
- V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
- V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
- V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
- V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
- cached_template_object) \
- V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
- V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
- V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
- V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
- function_template_rare_data) \
- V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
- V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
- V(_, MODULE_REQUEST_TYPE, ModuleRequest, module_request) \
- V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
- V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
- V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
- property_descriptor_object) \
- V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
- V(_, REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, RegExpBoilerplateDescription, \
- regexp_boilerplate_description) \
- V(_, SCRIPT_TYPE, Script, script) \
- V(_, SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, SourceTextModuleInfoEntry, \
- module_info_entry) \
- V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
- V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
- template_object_description) \
- V(_, TUPLE2_TYPE, Tuple2, tuple2) \
- V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
- V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
- wasm_exported_function_data) \
- V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
- wasm_indirect_function_table) \
- V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data)
-
-#define STRUCT_LIST_GENERATOR(V, _) STRUCT_LIST_GENERATOR_BASE(V, _)
+#define STRUCT_LIST_GENERATOR(V, _) \
+ V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
+ V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
+ V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task) \
+ V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
+ function_template_info) \
+ V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
+ V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
+ V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
+ V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
+ V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
+ aliased_arguments_entry) \
+ V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
+ IF_WASM(V, _, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
+ V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
+ async_generator_request) \
+ V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
+ V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
+ V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
+ V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
+ cached_template_object) \
+ V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
+ V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
+ V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
+ V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
+ function_template_rare_data) \
+ V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
+ V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_REQUEST_TYPE, ModuleRequest, module_request) \
+ V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
+ V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
+ V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
+ property_descriptor_object) \
+ V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
+ V(_, REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, RegExpBoilerplateDescription, \
+ regexp_boilerplate_description) \
+ V(_, SCRIPT_TYPE, Script, script) \
+ V(_, SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, SourceTextModuleInfoEntry, \
+ module_info_entry) \
+ V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
+ V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
+ template_object_description) \
+ V(_, TUPLE2_TYPE, Tuple2, tuple2) \
+ IF_WASM(V, _, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
+ IF_WASM(V, _, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
+ wasm_exported_function_data) \
+ IF_WASM(V, _, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
+ wasm_indirect_function_table) \
+ IF_WASM(V, _, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, \
+ wasm_js_function_data)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index 39fa7b3381a..c94feca2501 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -30,7 +30,7 @@
#include "src/objects/literal-objects.h"
#include "src/objects/lookup-inl.h" // TODO(jkummerow): Drop.
#include "src/objects/objects.h"
-#include "src/objects/oddball.h"
+#include "src/objects/oddball-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/property.h"
#include "src/objects/regexp-match-info.h"
@@ -65,19 +65,19 @@ int PropertyDetails::field_width_in_words() const {
}
DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
- return IsFixedArrayExact(isolate);
+ return IsFixedArrayExact(cage_base);
}
bool Object::IsTaggedIndex() const {
return IsSmi() && TaggedIndex::IsValid(TaggedIndex(ptr()).value());
}
-#define IS_TYPE_FUNCTION_DEF(type_) \
- bool Object::Is##type_() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
- } \
- bool Object::Is##type_(IsolateRoot isolate) const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##type_(isolate); \
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+ } \
+ bool Object::Is##type_(PtrComprCageBase cage_base) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(cage_base); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
IS_TYPE_FUNCTION_DEF(HashTableBase)
@@ -148,127 +148,125 @@ bool HeapObject::IsNullOrUndefined() const {
}
DEF_GETTER(HeapObject, IsUniqueName, bool) {
- return IsInternalizedString(isolate) || IsSymbol(isolate);
+ return IsInternalizedString(cage_base) || IsSymbol(cage_base);
}
DEF_GETTER(HeapObject, IsFunction, bool) {
return IsJSFunctionOrBoundFunction();
}
-DEF_GETTER(HeapObject, IsCallable, bool) { return map(isolate).is_callable(); }
+DEF_GETTER(HeapObject, IsCallable, bool) {
+ return map(cage_base).is_callable();
+}
DEF_GETTER(HeapObject, IsCallableJSProxy, bool) {
- return IsCallable(isolate) && IsJSProxy(isolate);
+ return IsCallable(cage_base) && IsJSProxy(cage_base);
}
DEF_GETTER(HeapObject, IsCallableApiObject, bool) {
- InstanceType type = map(isolate).instance_type();
- return IsCallable(isolate) &&
+ InstanceType type = map(cage_base).instance_type();
+ return IsCallable(cage_base) &&
(type == JS_API_OBJECT_TYPE || type == JS_SPECIAL_API_OBJECT_TYPE);
}
DEF_GETTER(HeapObject, IsNonNullForeign, bool) {
- return IsForeign(isolate) &&
+ return IsForeign(cage_base) &&
Foreign::cast(*this).foreign_address() != kNullAddress;
}
DEF_GETTER(HeapObject, IsConstructor, bool) {
- return map(isolate).is_constructor();
+ return map(cage_base).is_constructor();
}
DEF_GETTER(HeapObject, IsSourceTextModuleInfo, bool) {
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- return map(isolate) == GetReadOnlyRoots(isolate).module_info_map();
+ return map(cage_base) == GetReadOnlyRoots(cage_base).module_info_map();
}
DEF_GETTER(HeapObject, IsConsString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsCons();
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsCons();
}
DEF_GETTER(HeapObject, IsThinString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsThin();
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsThin();
}
DEF_GETTER(HeapObject, IsSlicedString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsSliced();
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsSliced();
}
DEF_GETTER(HeapObject, IsSeqString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsSequential();
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsSequential();
}
DEF_GETTER(HeapObject, IsSeqOneByteString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
- String::cast(*this).IsOneByteRepresentation(isolate);
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsSequential() &&
+ String::cast(*this).IsOneByteRepresentation(cage_base);
}
DEF_GETTER(HeapObject, IsSeqTwoByteString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
- String::cast(*this).IsTwoByteRepresentation(isolate);
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsSequential() &&
+ String::cast(*this).IsTwoByteRepresentation(cage_base);
}
DEF_GETTER(HeapObject, IsExternalOneByteString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
- String::cast(*this).IsOneByteRepresentation(isolate);
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsExternal() &&
+ String::cast(*this).IsOneByteRepresentation(cage_base);
}
DEF_GETTER(HeapObject, IsExternalTwoByteString, bool) {
- if (!IsString(isolate)) return false;
- return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
- String::cast(*this).IsTwoByteRepresentation(isolate);
+ if (!IsString(cage_base)) return false;
+ return StringShape(String::cast(*this).map(cage_base)).IsExternal() &&
+ String::cast(*this).IsTwoByteRepresentation(cage_base);
}
bool Object::IsNumber() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
- return this_heap_object.IsHeapNumber(isolate);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(this_heap_object);
+ return this_heap_object.IsHeapNumber(cage_base);
}
-bool Object::IsNumber(IsolateRoot isolate) const {
- return IsSmi() || IsHeapNumber(isolate);
+bool Object::IsNumber(PtrComprCageBase cage_base) const {
+ return IsSmi() || IsHeapNumber(cage_base);
}
bool Object::IsNumeric() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
- return this_heap_object.IsHeapNumber(isolate) ||
- this_heap_object.IsBigInt(isolate);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(this_heap_object);
+ return this_heap_object.IsHeapNumber(cage_base) ||
+ this_heap_object.IsBigInt(cage_base);
}
-bool Object::IsNumeric(IsolateRoot isolate) const {
- return IsNumber(isolate) || IsBigInt(isolate);
+bool Object::IsNumeric(PtrComprCageBase cage_base) const {
+ return IsNumber(cage_base) || IsBigInt(cage_base);
}
DEF_GETTER(HeapObject, IsFreeSpaceOrFiller, bool) {
- InstanceType instance_type = map(isolate).instance_type();
+ InstanceType instance_type = map(cage_base).instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
DEF_GETTER(HeapObject, IsArrayList, bool) {
- // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
- // i::GetIsolateForPtrCompr(HeapObject).
- ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
+ ReadOnlyRoots roots = GetReadOnlyRoots(cage_base);
return *this == roots.empty_fixed_array() ||
- map(isolate) == roots.array_list_map();
+ map(cage_base) == roots.array_list_map();
}
DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
- return IsFixedArrayExact(isolate);
+ return IsFixedArrayExact(cage_base);
}
DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
// Must be a fixed array.
- if (!IsFixedArrayExact(isolate)) return false;
+ if (!IsFixedArrayExact(cage_base)) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
@@ -282,14 +280,14 @@ DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
}
DEF_GETTER(HeapObject, IsHandlerTable, bool) {
- if (!IsFixedArrayExact(isolate)) return false;
+ if (!IsFixedArrayExact(cage_base)) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
DEF_GETTER(HeapObject, IsTemplateList, bool) {
- if (!IsFixedArrayExact(isolate)) return false;
+ if (!IsFixedArrayExact(cage_base)) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
if (FixedArray::cast(*this).length() < 1) return false;
@@ -297,82 +295,86 @@ DEF_GETTER(HeapObject, IsTemplateList, bool) {
}
DEF_GETTER(HeapObject, IsDependentCode, bool) {
- if (!IsWeakFixedArray(isolate)) return false;
+ if (!IsWeakFixedArray(cage_base)) return false;
// There's actually no way to see the difference between a weak fixed array
// and a dependent codes array.
return true;
}
DEF_GETTER(HeapObject, IsOSROptimizedCodeCache, bool) {
- if (!IsWeakFixedArray(isolate)) return false;
+ if (!IsWeakFixedArray(cage_base)) return false;
// There's actually no way to see the difference between a weak fixed array
// and a osr optimized code cache.
return true;
}
DEF_GETTER(HeapObject, IsAbstractCode, bool) {
- return IsBytecodeArray(isolate) || IsCode(isolate);
+ return IsBytecodeArray(cage_base) || IsCode(cage_base);
}
DEF_GETTER(HeapObject, IsStringWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsString(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsString(cage_base);
}
DEF_GETTER(HeapObject, IsBooleanWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsBoolean(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBoolean(cage_base);
}
DEF_GETTER(HeapObject, IsScriptWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsScript(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsScript(cage_base);
}
DEF_GETTER(HeapObject, IsNumberWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsNumber(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsNumber(cage_base);
}
DEF_GETTER(HeapObject, IsBigIntWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsBigInt(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBigInt(cage_base);
}
DEF_GETTER(HeapObject, IsSymbolWrapper, bool) {
- return IsJSPrimitiveWrapper(isolate) &&
- JSPrimitiveWrapper::cast(*this).value().IsSymbol(isolate);
+ return IsJSPrimitiveWrapper(cage_base) &&
+ JSPrimitiveWrapper::cast(*this).value().IsSymbol(cage_base);
}
-DEF_GETTER(HeapObject, IsStringSet, bool) { return IsHashTable(isolate); }
+DEF_GETTER(HeapObject, IsStringSet, bool) { return IsHashTable(cage_base); }
-DEF_GETTER(HeapObject, IsObjectHashSet, bool) { return IsHashTable(isolate); }
+DEF_GETTER(HeapObject, IsObjectHashSet, bool) { return IsHashTable(cage_base); }
DEF_GETTER(HeapObject, IsCompilationCacheTable, bool) {
- return IsHashTable(isolate);
+ return IsHashTable(cage_base);
}
-DEF_GETTER(HeapObject, IsMapCache, bool) { return IsHashTable(isolate); }
+DEF_GETTER(HeapObject, IsMapCache, bool) { return IsHashTable(cage_base); }
-DEF_GETTER(HeapObject, IsObjectHashTable, bool) { return IsHashTable(isolate); }
+DEF_GETTER(HeapObject, IsObjectHashTable, bool) {
+ return IsHashTable(cage_base);
+}
-DEF_GETTER(HeapObject, IsHashTableBase, bool) { return IsHashTable(isolate); }
+DEF_GETTER(HeapObject, IsHashTableBase, bool) { return IsHashTable(cage_base); }
+#if V8_ENABLE_WEBASSEMBLY
DEF_GETTER(HeapObject, IsWasmExceptionPackage, bool) {
// It is not possible to check for the existence of certain properties on the
// underlying {JSReceiver} here because that requires calling handlified code.
- return IsJSReceiver(isolate);
+ return IsJSReceiver(cage_base);
}
+#endif // V8_ENABLE_WEBASSEMBLY
bool Object::IsPrimitive() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
- return this_heap_object.map(isolate).IsPrimitiveMap();
+ PtrComprCageBase cage_base = GetPtrComprCageBase(this_heap_object);
+ return this_heap_object.map(cage_base).IsPrimitiveMap();
}
-bool Object::IsPrimitive(IsolateRoot isolate) const {
- return IsSmi() || HeapObject::cast(*this).map(isolate).IsPrimitiveMap();
+bool Object::IsPrimitive(PtrComprCageBase cage_base) const {
+ return IsSmi() || HeapObject::cast(*this).map(cage_base).IsPrimitiveMap();
}
// static
@@ -385,24 +387,24 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
}
DEF_GETTER(HeapObject, IsUndetectable, bool) {
- return map(isolate).is_undetectable();
+ return map(cage_base).is_undetectable();
}
DEF_GETTER(HeapObject, IsAccessCheckNeeded, bool) {
- if (IsJSGlobalProxy(isolate)) {
+ if (IsJSGlobalProxy(cage_base)) {
const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
JSGlobalObject global = proxy.GetIsolate()->context().global_object();
return proxy.IsDetachedFrom(global);
}
- return map(isolate).is_access_check_needed();
+ return map(cage_base).is_access_check_needed();
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
- } \
- bool Object::Is##Name(IsolateRoot isolate) const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##Name(isolate); \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
+ } \
+ bool Object::Is##Name(PtrComprCageBase cage_base) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(cage_base); \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
@@ -465,17 +467,17 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Representation Object::OptimalRepresentation(IsolateRoot isolate) const {
+Representation Object::OptimalRepresentation(PtrComprCageBase cage_base) const {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
}
HeapObject heap_object = HeapObject::cast(*this);
- if (FLAG_track_double_fields && heap_object.IsHeapNumber(isolate)) {
+ if (FLAG_track_double_fields && heap_object.IsHeapNumber(cage_base)) {
return Representation::Double();
} else if (FLAG_track_computed_fields &&
heap_object.IsUninitialized(
- heap_object.GetReadOnlyRoots(isolate))) {
+ heap_object.GetReadOnlyRoots(cage_base))) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
return Representation::HeapObject();
@@ -484,9 +486,9 @@ Representation Object::OptimalRepresentation(IsolateRoot isolate) const {
}
}
-ElementsKind Object::OptimalElementsKind(IsolateRoot isolate) const {
+ElementsKind Object::OptimalElementsKind(PtrComprCageBase cage_base) const {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
- if (IsNumber(isolate)) return PACKED_DOUBLE_ELEMENTS;
+ if (IsNumber(cage_base)) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
}
@@ -629,9 +631,10 @@ void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
i::InitExternalPointerField(field_address(offset), isolate, value, tag);
}
-Address Object::ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+Address Object::ReadExternalPointerField(size_t offset,
+ PtrComprCageBase isolate_root,
ExternalPointerTag tag) const {
- return i::ReadExternalPointerField(field_address(offset), isolate, tag);
+ return i::ReadExternalPointerField(field_address(offset), isolate_root, tag);
}
void Object::WriteExternalPointerField(size_t offset, Isolate* isolate,
@@ -685,16 +688,16 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
-ReadOnlyRoots HeapObject::GetReadOnlyRoots(IsolateRoot isolate) const {
-#ifdef V8_COMPRESS_POINTERS
- DCHECK_NE(isolate.address(), 0);
- return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
+ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ DCHECK_NE(cage_base.address(), 0);
+ return ReadOnlyRoots(Isolate::FromRootAddress(cage_base.address()));
#else
return GetReadOnlyRoots();
#endif
}
-DEF_GETTER(HeapObject, map, Map) { return map_word(isolate).ToMap(); }
+DEF_GETTER(HeapObject, map, Map) { return map_word(cage_base).ToMap(); }
void HeapObject::set_map(Map value) {
#ifdef VERIFY_HEAP
@@ -713,7 +716,7 @@ void HeapObject::set_map(Map value) {
}
DEF_GETTER(HeapObject, synchronized_map, Map) {
- return synchronized_map_word(isolate).ToMap();
+ return synchronized_map_word(cage_base).ToMap();
}
void HeapObject::synchronized_set_map(Map value) {
@@ -759,7 +762,7 @@ ObjectSlot HeapObject::map_slot() const {
}
DEF_GETTER(HeapObject, map_word, MapWord) {
- return MapField::Relaxed_Load(isolate, *this);
+ return MapField::Relaxed_Load(cage_base, *this);
}
void HeapObject::set_map_word(MapWord map_word) {
@@ -767,7 +770,7 @@ void HeapObject::set_map_word(MapWord map_word) {
}
DEF_GETTER(HeapObject, synchronized_map_word, MapWord) {
- return MapField::Acquire_Load(isolate, *this);
+ return MapField::Acquire_Load(cage_base, *this);
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
@@ -853,9 +856,9 @@ String RegExpMatchInfo::LastSubject() {
return String::cast(get(kLastSubjectIndex));
}
-void RegExpMatchInfo::SetLastSubject(String value) {
+void RegExpMatchInfo::SetLastSubject(String value, WriteBarrierMode mode) {
DCHECK_GE(length(), kLastMatchOverhead);
- set(kLastSubjectIndex, value);
+ set(kLastSubjectIndex, value, mode);
}
Object RegExpMatchInfo::LastInput() {
@@ -863,9 +866,9 @@ Object RegExpMatchInfo::LastInput() {
return get(kLastInputIndex);
}
-void RegExpMatchInfo::SetLastInput(Object value) {
+void RegExpMatchInfo::SetLastInput(Object value, WriteBarrierMode mode) {
DCHECK_GE(length(), kLastMatchOverhead);
- set(kLastInputIndex, value);
+ set(kLastInputIndex, value, mode);
}
int RegExpMatchInfo::Capture(int i) {
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 697ffdec088..18e659a910b 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -63,6 +63,7 @@
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
#include "src/objects/lookup-inl.h"
@@ -123,10 +124,12 @@
#include "src/strings/unicode-inl.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils-inl.h"
-#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -458,6 +461,13 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
if (input->IsString() || input->IsNumber() || input->IsOddball()) {
return Object::ToString(isolate, input).ToHandleChecked();
+ } else if (input->IsJSProxy()) {
+ Handle<Object> currInput = input;
+ do {
+ HeapObject target = Handle<JSProxy>::cast(currInput)->target(isolate);
+ currInput = Handle<Object>(target, isolate);
+ } while (currInput->IsJSProxy());
+ return NoSideEffectsToString(isolate, currInput);
} else if (input->IsBigInt()) {
MaybeHandle<String> maybe_string =
BigInt::ToString(isolate, Handle<BigInt>::cast(input), 10, kDontThrow);
@@ -1311,8 +1321,12 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
}
bool FunctionTemplateInfo::IsTemplateFor(Map map) {
- RuntimeCallTimerScope timer(GetIsolate(),
- RuntimeCallCounterId::kIsTemplateFor);
+ RuntimeCallTimerScope timer(
+ LocalHeap::Current() == nullptr
+ ? GetIsolate()->counters()->runtime_call_stats()
+ : LocalIsolate::FromHeap(LocalHeap::Current())->runtime_call_stats(),
+ RuntimeCallCounterId::kIsTemplateFor);
+
// There is a constraint on the object; check.
if (!map.IsJSObjectMap()) return false;
// Fetch the constructor function of the object.
@@ -1339,14 +1353,14 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) {
// static
FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- DCHECK(function_template_info->rare_data().IsUndefined(isolate));
+ DCHECK(function_template_info->rare_data(kAcquireLoad).IsUndefined(isolate));
Handle<Struct> struct_obj = isolate->factory()->NewStruct(
FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
Handle<FunctionTemplateRareData> rare_data =
i::Handle<FunctionTemplateRareData>::cast(struct_obj);
rare_data->set_c_function(Smi(0));
rare_data->set_c_signature(Smi(0));
- function_template_info->set_rare_data(*rare_data);
+ function_template_info->set_rare_data(*rare_data, kReleaseStore);
return *rare_data;
}
@@ -1960,6 +1974,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case NAME_DICTIONARY_TYPE:
os << "<NameDictionary[" << FixedArray::cast(*this).length() << "]>";
break;
+ case SWISS_NAME_DICTIONARY_TYPE:
+ os << "<SwissNameDictionary["
+ << SwissNameDictionary::cast(*this).Capacity() << "]>";
+ break;
case GLOBAL_DICTIONARY_TYPE:
os << "<GlobalDictionary[" << FixedArray::cast(*this).length() << "]>";
break;
@@ -2320,9 +2338,11 @@ int HeapObject::SizeFromMap(Map map) const {
return CoverageInfo::SizeFor(
CoverageInfo::unchecked_cast(*this).slot_count());
}
+#if V8_ENABLE_WEBASSEMBLY
if (instance_type == WASM_ARRAY_TYPE) {
return WasmArray::GcSafeSizeFor(map, WasmArray::cast(*this).length());
}
+#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
return EmbedderDataArray::SizeFor(
EmbedderDataArray::unchecked_cast(*this).length());
@@ -2351,6 +2371,7 @@ bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ case SWISS_NAME_DICTIONARY_TYPE:
case JS_MAP_TYPE:
case JS_SET_TYPE:
return true;
@@ -2374,6 +2395,7 @@ bool HeapObject::CanBeRehashed() const {
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case SWISS_NAME_DICTIONARY_TYPE:
return true;
case DESCRIPTOR_ARRAY_TYPE:
case STRONG_DESCRIPTOR_ARRAY_TYPE:
@@ -2399,6 +2421,9 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
case NAME_DICTIONARY_TYPE:
NameDictionary::cast(*this).Rehash(isolate);
break;
+ case SWISS_NAME_DICTIONARY_TYPE:
+ SwissNameDictionary::cast(*this).Rehash(isolate);
+ break;
case GLOBAL_DICTIONARY_TYPE:
GlobalDictionary::cast(*this).Rehash(isolate);
break;
@@ -2520,9 +2545,21 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
- if (maybe_attributes.FromJust() == ABSENT) break;
- *found = false;
- return Nothing<bool>();
+ // At this point we might have called interceptor's query or getter
+ // callback. Assuming that the callbacks have side effects, we use
+ // Object::SetSuperProperty() which works properly regardless on
+ // whether the property was present on the receiver or not when
+ // storing to the receiver.
+ if (maybe_attributes.FromJust() == ABSENT) {
+ // Proceed lookup from the next state.
+ it->Next();
+ } else {
+ // Finish lookup in order to make Object::SetSuperProperty() store
+ // property to the receiver.
+ it->NotFound();
+ }
+ return Object::SetSuperProperty(it, value, store_origin,
+ should_throw);
}
break;
}
@@ -2597,6 +2634,8 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
if (found) return result;
}
+ // TODO(ishell): refactor this: both SetProperty and and SetSuperProperty have
+ // this piece of code.
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
// throw a reference error in strict mode. In sloppy mode, we continue.
@@ -2640,6 +2679,8 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ // Note, the callers rely on the fact that this code is redoing the full own
+ // lookup from scratch.
LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator own_lookup =
it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
@@ -2702,6 +2743,25 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
}
+ // TODO(ishell): refactor this: both SetProperty and and SetSuperProperty have
+ // this piece of code.
+ // If the receiver is the JSGlobalObject, the store was contextual. In case
+ // the property did not exist yet on the global object itself, we have to
+ // throw a reference error in strict mode. In sloppy mode, we continue.
+ if (receiver->IsJSGlobalObject() &&
+ (GetShouldThrow(isolate, should_throw) == ShouldThrow::kThrowOnError)) {
+ if (own_lookup.state() == LookupIterator::TRANSITION) {
+ // The property cell that we have created is garbage because we are going
+ // to throw now instead of putting it into the global dictionary. However,
+ // the cell might already have been stored into the feedback vector, so
+ // we must invalidate it nevertheless.
+ own_lookup.transition_cell()->ClearAndInvalidate(ReadOnlyRoots(isolate));
+ }
+ isolate->Throw(*isolate->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, own_lookup.GetName()));
+ return Nothing<bool>();
+ }
+
return AddDataProperty(&own_lookup, value, NONE, should_throw, store_origin);
}
@@ -3551,12 +3611,11 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
}
PropertyDetails details(kData, DONT_ENUM, PropertyConstness::kMutable);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dict(proxy->property_dictionary_ordered(),
- isolate);
- Handle<OrderedNameDictionary> result =
- OrderedNameDictionary::Add(isolate, dict, private_name, value, details)
- .ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dict(proxy->property_dictionary_swiss(),
+ isolate);
+ Handle<SwissNameDictionary> result =
+ SwissNameDictionary::Add(isolate, dict, private_name, value, details);
if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
} else {
Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
@@ -4294,7 +4353,7 @@ template Handle<DescriptorArray> DescriptorArray::Allocate(
LocalIsolate* isolate, int nof_descriptors, int slack,
AllocationType allocation);
-void DescriptorArray::Initialize(EnumCache enum_cache,
+void DescriptorArray::Initialize(EnumCache empty_enum_cache,
HeapObject undefined_value,
int nof_descriptors, int slack) {
DCHECK_GE(nof_descriptors, 0);
@@ -4304,13 +4363,13 @@ void DescriptorArray::Initialize(EnumCache enum_cache,
set_number_of_descriptors(nof_descriptors);
set_raw_number_of_marked_descriptors(0);
set_filler16bits(0);
- set_enum_cache(enum_cache);
+ set_enum_cache(empty_enum_cache, SKIP_WRITE_BARRIER);
MemsetTagged(GetDescriptorSlot(0), undefined_value,
number_of_all_descriptors() * kEntrySize);
}
void DescriptorArray::ClearEnumCache() {
- set_enum_cache(GetReadOnlyRoots().empty_enum_cache());
+ set_enum_cache(GetReadOnlyRoots().empty_enum_cache(), SKIP_WRITE_BARRIER);
}
void DescriptorArray::Replace(InternalIndex index, Descriptor* descriptor) {
@@ -4425,17 +4484,19 @@ Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
Handle<NativeContext> native_context,
Handle<AccessorPair> accessor_pair,
AccessorComponent component) {
- Object accessor = accessor_pair->get(component);
- if (accessor.IsFunctionTemplateInfo()) {
- return ApiNatives::InstantiateFunction(
- isolate, native_context,
- handle(FunctionTemplateInfo::cast(accessor), isolate))
- .ToHandleChecked();
- }
- if (accessor.IsNull(isolate)) {
+ Handle<Object> accessor(accessor_pair->get(component), isolate);
+ if (accessor->IsFunctionTemplateInfo()) {
+ auto function = ApiNatives::InstantiateFunction(
+ isolate, native_context,
+ Handle<FunctionTemplateInfo>::cast(accessor))
+ .ToHandleChecked();
+ accessor_pair->set(component, *function);
+ return function;
+ }
+ if (accessor->IsNull(isolate)) {
return isolate->factory()->undefined_value();
}
- return handle(accessor, isolate);
+ return accessor;
}
#ifdef DEBUG
@@ -4726,8 +4787,10 @@ template <typename LocalIsolate>
// static
void Script::InitLineEnds(LocalIsolate* isolate, Handle<Script> script) {
if (!script->line_ends().IsUndefined(isolate)) return;
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(script->type() != Script::TYPE_WASM ||
script->source_mapping_url().IsString());
+#endif // V8_ENABLE_WEBASSEMBLY
Object src_obj = script->source();
if (!src_obj.IsString()) {
@@ -4750,15 +4813,19 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Script::InitLineEnds(
bool Script::GetPositionInfo(Handle<Script> script, int position,
PositionInfo* info, OffsetFlag offset_flag) {
+ bool init_line_ends = true;
+#if V8_ENABLE_WEBASSEMBLY
// For wasm, we do not create an artificial line_ends array, but do the
// translation directly.
- if (script->type() != Script::TYPE_WASM)
- InitLineEnds(script->GetIsolate(), script);
+ init_line_ends = script->type() != Script::TYPE_WASM;
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (init_line_ends) InitLineEnds(script->GetIsolate(), script);
return script->GetPositionInfo(position, info, offset_flag);
}
bool Script::IsUserJavaScript() const { return type() == Script::TYPE_NORMAL; }
+#if V8_ENABLE_WEBASSEMBLY
bool Script::ContainsAsmModule() {
DisallowGarbageCollection no_gc;
SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), *this);
@@ -4768,6 +4835,7 @@ bool Script::ContainsAsmModule() {
}
return false;
}
+#endif // V8_ENABLE_WEBASSEMBLY
namespace {
@@ -4813,6 +4881,7 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
OffsetFlag offset_flag) const {
DisallowGarbageCollection no_gc;
+#if V8_ENABLE_WEBASSEMBLY
// For wasm, we use the byte offset as the column.
if (type() == Script::TYPE_WASM) {
DCHECK_LE(0, position);
@@ -4825,6 +4894,7 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
info->line_end = module->functions.back().code.end_offset();
return true;
}
+#endif // V8_ENABLE_WEBASSEMBLY
if (line_ends().IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
@@ -5113,11 +5183,9 @@ bool JSArray::MayHaveReadOnlyLength(Map js_array_map) {
// dictionary properties. Since it's not configurable, it's guaranteed to be
// the first in the descriptor array.
InternalIndex first(0);
- DCHECK(js_array_map.instance_descriptors(kRelaxedLoad).GetKey(first) ==
+ DCHECK(js_array_map.instance_descriptors().GetKey(first) ==
js_array_map.GetReadOnlyRoots().length_string());
- return js_array_map.instance_descriptors(kRelaxedLoad)
- .GetDetails(first)
- .IsReadOnly();
+ return js_array_map.instance_descriptors().GetDetails(first).IsReadOnly();
}
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
@@ -5254,8 +5322,8 @@ Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
if (isolate->debug()->is_active()) MoveMessageToPromise(isolate, promise);
if (debug_event) isolate->debug()->OnPromiseReject(promise, reason);
- isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
// 1. Assert: The value of promise.[[PromiseState]] is "pending".
CHECK_EQ(Promise::kPending, promise->status());
@@ -5290,8 +5358,8 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
DCHECK(
!reinterpret_cast<v8::Isolate*>(isolate)->GetCurrentContext().IsEmpty());
- isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
// 7. If SameValue(resolution, promise) is true, then
if (promise.is_identical_to(resolution)) {
@@ -5534,7 +5602,8 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate, Derived new_table) {
+void HashTable<Derived, Shape>::Rehash(PtrComprCageBase cage_base,
+ Derived new_table) {
DisallowGarbageCollection no_gc;
WriteBarrierMode mode = new_table.GetWriteBarrierMode(no_gc);
@@ -5542,21 +5611,21 @@ void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate, Derived new_table) {
// Copy prefix to new array.
for (int i = kPrefixStartIndex; i < kElementsStartIndex; i++) {
- new_table.set(i, get(isolate, i), mode);
+ new_table.set(i, get(cage_base, i), mode);
}
// Rehash the elements.
- ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
+ ReadOnlyRoots roots = GetReadOnlyRoots(cage_base);
for (InternalIndex i : this->IterateEntries()) {
uint32_t from_index = EntryToIndex(i);
- Object k = this->get(isolate, from_index);
+ Object k = this->get(cage_base, from_index);
if (!IsKey(roots, k)) continue;
uint32_t hash = Shape::HashForObject(roots, k);
uint32_t insertion_index =
- EntryToIndex(new_table.FindInsertionEntry(isolate, roots, hash));
- new_table.set_key(insertion_index, get(isolate, from_index), mode);
+ EntryToIndex(new_table.FindInsertionEntry(cage_base, roots, hash));
+ new_table.set_key(insertion_index, get(cage_base, from_index), mode);
for (int j = 1; j < Shape::kEntrySize; j++) {
- new_table.set(insertion_index + j, get(isolate, from_index + j), mode);
+ new_table.set(insertion_index + j, get(cage_base, from_index + j), mode);
}
}
new_table.SetNumberOfElements(NumberOfElements());
@@ -5598,10 +5667,10 @@ void HashTable<Derived, Shape>::Swap(InternalIndex entry1, InternalIndex entry2,
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
+void HashTable<Derived, Shape>::Rehash(PtrComprCageBase cage_base) {
DisallowGarbageCollection no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
- ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
+ ReadOnlyRoots roots = GetReadOnlyRoots(cage_base);
uint32_t capacity = Capacity();
bool done = false;
for (int probe = 1; !done; probe++) {
@@ -5610,7 +5679,7 @@ void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
done = true;
for (InternalIndex current(0); current.raw_value() < capacity;
/* {current} is advanced manually below, when appropriate.*/) {
- Object current_key = KeyAt(isolate, current);
+ Object current_key = KeyAt(cage_base, current);
if (!IsKey(roots, current_key)) {
++current; // Advance to next entry.
continue;
@@ -5620,7 +5689,7 @@ void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
++current; // Advance to next entry.
continue;
}
- Object target_key = KeyAt(isolate, target);
+ Object target_key = KeyAt(cage_base, target);
if (!IsKey(roots, target_key) ||
EntryForProbe(roots, target_key, probe, target) != target) {
// Put the current element into the correct position.
@@ -5640,7 +5709,7 @@ void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
HeapObject undefined = roots.undefined_value();
Derived* self = static_cast<Derived*>(this);
for (InternalIndex current : InternalIndex::Range(capacity)) {
- if (KeyAt(isolate, current) == the_hole) {
+ if (KeyAt(cage_base, current) == the_hole) {
self->set_key(EntryToIndex(current) + kEntryKeyIndex, undefined,
SKIP_WRITE_BARRIER);
}
@@ -5731,15 +5800,14 @@ Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
}
template <typename Derived, typename Shape>
-InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(IsolateRoot isolate,
- ReadOnlyRoots roots,
- uint32_t hash) {
+InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(
+ PtrComprCageBase cage_base, ReadOnlyRoots roots, uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
for (InternalIndex entry = FirstProbe(hash, capacity);;
entry = NextProbe(entry, count++, capacity)) {
- if (!IsKey(roots, KeyAt(isolate, entry))) return entry;
+ if (!IsKey(roots, KeyAt(cage_base, entry))) return entry;
}
}
@@ -5911,6 +5979,13 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(LocalIsolate* isolate,
return dictionary;
}
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::ShallowCopy(
+ Isolate* isolate, Handle<Derived> dictionary) {
+ return Handle<Derived>::cast(isolate->factory()->CopyFixedArrayWithMap(
+ dictionary, Derived::GetMap(ReadOnlyRoots(isolate))));
+}
+
// static
Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
Isolate* isolate, Handle<SimpleNumberDictionary> dictionary, uint32_t key,
@@ -6040,14 +6115,14 @@ void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
}
template <typename Derived, typename Shape>
-Object ObjectHashTableBase<Derived, Shape>::Lookup(IsolateRoot isolate,
+Object ObjectHashTableBase<Derived, Shape>::Lookup(PtrComprCageBase cage_base,
Handle<Object> key,
int32_t hash) {
DisallowGarbageCollection no_gc;
- ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
+ ReadOnlyRoots roots = this->GetReadOnlyRoots(cage_base);
DCHECK(this->IsKey(roots, *key));
- InternalIndex entry = this->FindEntry(isolate, roots, key, hash);
+ InternalIndex entry = this->FindEntry(cage_base, roots, key, hash);
if (entry.is_not_found()) return roots.the_hole_value();
return this->get(Derived::EntryToIndex(entry) + 1);
}
@@ -6056,8 +6131,8 @@ template <typename Derived, typename Shape>
Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowGarbageCollection no_gc;
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ ReadOnlyRoots roots = this->GetReadOnlyRoots(cage_base);
DCHECK(this->IsKey(roots, *key));
// If the object does not have an identity hash, it was never used as a key.
@@ -6065,13 +6140,13 @@ Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
if (hash.IsUndefined(roots)) {
return roots.the_hole_value();
}
- return Lookup(isolate, key, Smi::ToInt(hash));
+ return Lookup(cage_base, key, Smi::ToInt(hash));
}
template <typename Derived, typename Shape>
Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key,
int32_t hash) {
- return Lookup(GetIsolateForPtrCompr(*this), key, hash);
+ return Lookup(GetPtrComprCageBase(*this), key, hash);
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index c68445597f5..e4532bb0e5e 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -279,7 +279,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(IsolateRoot isolate) const;
+ V8_INLINE bool Is##Type(PtrComprCageBase cage_base) const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
@@ -307,7 +307,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(IsolateRoot isolate) const;
+ V8_INLINE bool Is##Name(PtrComprCageBase cage_base) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -322,9 +322,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
- inline Representation OptimalRepresentation(IsolateRoot isolate) const;
+ inline Representation OptimalRepresentation(PtrComprCageBase cage_base) const;
- inline ElementsKind OptimalElementsKind(IsolateRoot isolate) const;
+ inline ElementsKind OptimalElementsKind(PtrComprCageBase cage_base) const;
inline bool FitsRepresentation(Representation representation);
@@ -673,7 +673,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
inline void InitExternalPointerField(size_t offset, Isolate* isolate);
inline void InitExternalPointerField(size_t offset, Isolate* isolate,
Address value, ExternalPointerTag tag);
- inline Address ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+ inline Address ReadExternalPointerField(size_t offset,
+ PtrComprCageBase isolate_root,
ExternalPointerTag tag) const;
inline void WriteExternalPointerField(size_t offset, Isolate* isolate,
Address value, ExternalPointerTag tag);
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index 4a022831bee..df7829e2b4d 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -37,7 +37,7 @@ Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
}
DEF_GETTER(HeapObject, IsBoolean, bool) {
- return IsOddball(isolate) &&
+ return IsOddball(cage_base) &&
((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index d9bc94b9af0..a19217878dc 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -438,38 +438,6 @@ InternalIndex OrderedNameDictionary::FindEntry(LocalIsolate* isolate,
return InternalIndex::NotFound();
}
-// TODO(emrich): This is almost an identical copy of
-// Dictionary<..>::SlowReverseLookup.
-// Consolidate both versions elsewhere (e.g., hash-table-utils)?
-Object OrderedNameDictionary::SlowReverseLookup(Isolate* isolate,
- Object value) {
- ReadOnlyRoots roots(isolate);
- for (InternalIndex i : IterateEntries()) {
- Object k;
- if (!ToKey(roots, i, &k)) continue;
- Object e = this->ValueAt(i);
- if (e == value) return k;
- }
- return roots.undefined_value();
-}
-
-// TODO(emrich): This is almost an identical copy of
-// HashTable<..>::NumberOfEnumerableProperties.
-// Consolidate both versions elsewhere (e.g., hash-table-utils)?
-int OrderedNameDictionary::NumberOfEnumerableProperties() {
- ReadOnlyRoots roots = this->GetReadOnlyRoots();
- int result = 0;
- for (InternalIndex i : this->IterateEntries()) {
- Object k;
- if (!this->ToKey(roots, i, &k)) continue;
- if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & ONLY_ENUMERABLE) == 0) result++;
- }
- return result;
-}
-
template <typename LocalIsolate>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
LocalIsolate* isolate, Handle<OrderedNameDictionary> table,
@@ -712,17 +680,9 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
num_buckets + num_chains);
- if (Heap::InYoungGeneration(*this)) {
- MemsetTagged(RawField(DataTableStartOffset()),
- ReadOnlyRoots(isolate).the_hole_value(),
- capacity * Derived::kEntrySize);
- } else {
- for (int i = 0; i < capacity; i++) {
- for (int j = 0; j < Derived::kEntrySize; j++) {
- SetDataEntry(i, j, ReadOnlyRoots(isolate).the_hole_value());
- }
- }
- }
+ MemsetTagged(RawField(DataTableStartOffset()),
+ ReadOnlyRoots(isolate).the_hole_value(),
+ capacity * Derived::kEntrySize);
#ifdef DEBUG
for (int i = 0; i < num_buckets; ++i) {
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index ca1d29d2fd6..1746e2dc896 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -92,8 +92,6 @@ class OrderedHashTable : public FixedArray {
InternalIndex FindEntry(Isolate* isolate, Object key);
- Object SlowReverseLookup(Isolate* isolate, Object value);
-
int NumberOfElements() const {
return Smi::ToInt(get(NumberOfElementsIndex()));
}
@@ -784,10 +782,6 @@ class V8_EXPORT_PRIVATE OrderedNameDictionary
return FindEntry(isolate, *key);
}
- int NumberOfEnumerableProperties();
-
- Object SlowReverseLookup(Isolate* isolate, Object value);
-
static Handle<OrderedNameDictionary> DeleteEntry(
Isolate* isolate, Handle<OrderedNameDictionary> table,
InternalIndex entry);
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index e2e905fbb36..fe884b043fd 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -25,14 +25,14 @@ SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
Object PropertyArray::get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return get(isolate, index);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
}
-Object PropertyArray::get(IsolateRoot isolate, int index) const {
+Object PropertyArray::get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
- return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this,
OffsetOfElementAt(index));
}
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index da15e8d7320..f4cc5c9fb1c 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -30,7 +30,7 @@ class PropertyArray : public HeapObject {
inline int Hash() const;
inline Object get(int index) const;
- inline Object get(IsolateRoot isolate, int index) const;
+ inline Object get(PtrComprCageBase cage_base, int index) const;
inline void set(int index, Object value);
// Setter with explicit barrier mode.
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index e7bfd039de8..cde66262cf3 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -57,7 +57,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
// TODO(jkummerow): support dictionary properties?
if (map->is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(isolate), isolate);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index bab6e297e48..1b856a12574 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -32,6 +32,12 @@ enum PropertyAttributes {
// a non-existent property.
};
+// Number of distinct bits in PropertyAttributes.
+static const int kPropertyAttributesBitsCount = 3;
+
+static const int kPropertyAttributesCombinationsCount =
+ 1 << kPropertyAttributesBitsCount;
+
enum PropertyFilter {
ALL_PROPERTIES = 0,
ONLY_WRITABLE = 1,
@@ -63,6 +69,11 @@ STATIC_ASSERT(SKIP_STRINGS ==
STATIC_ASSERT(SKIP_SYMBOLS ==
static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
+// Assert that kPropertyAttributesBitsCount value matches the definition of
+// ALL_ATTRIBUTES_MASK.
+STATIC_ASSERT((ALL_ATTRIBUTES_MASK == (READ_ONLY | DONT_ENUM | DONT_DELETE)) ==
+ (kPropertyAttributesBitsCount == 3));
+
class Smi;
class TypeInfo;
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index b0bb79c6016..c21a618cb19 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -75,10 +75,10 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
- IsolateRoot isolate = GetIsolateForPtrCompr(*key);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
kDescriptor, PropertyConstness::kConst,
- value->OptimalRepresentation(isolate), 0);
+ value->OptimalRepresentation(cage_base), 0);
}
Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 9a1b03828e2..9799c3282a0 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -37,11 +37,13 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
// Returns the subject string of the last match.
inline String LastSubject();
- inline void SetLastSubject(String value);
+ inline void SetLastSubject(String value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Like LastSubject, but modifiable by the user.
inline Object LastInput();
- inline void SetLastInput(Object value);
+ inline void SetLastInput(Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
// Capture(1) determine the start- and endpoint of the match itself.
diff --git a/deps/v8/src/objects/scope-info-inl.h b/deps/v8/src/objects/scope-info-inl.h
index 6ba93dd80f0..a31f0e989bd 100644
--- a/deps/v8/src/objects/scope-info-inl.h
+++ b/deps/v8/src/objects/scope-info-inl.h
@@ -30,49 +30,7 @@ int ScopeInfo::Flags() const { return flags(); }
int ScopeInfo::ParameterCount() const { return parameter_count(); }
int ScopeInfo::ContextLocalCount() const { return context_local_count(); }
-Object ScopeInfo::get(int index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return get(isolate, index);
-}
-
-Object ScopeInfo::get(IsolateRoot isolate, int index) const {
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return TaggedField<Object>::Relaxed_Load(
- isolate, *this, FixedArray::OffsetOfElementAt(index));
-}
-
-void ScopeInfo::set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = FixedArray::OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
-}
-
-void ScopeInfo::set(int index, Object value, WriteBarrierMode mode) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK(IsScopeInfo());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- int offset = FixedArray::OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
-}
-
-void ScopeInfo::CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
- int src_index, int len, WriteBarrierMode mode) {
- if (len == 0) return;
- DCHECK_LE(dst_index + len, length());
- DCHECK_LE(src_index + len, src.length());
- DisallowGarbageCollection no_gc;
-
- ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
- ObjectSlot src_slot(src.RawFieldOfElementAt(src_index));
- isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
-}
-
-ObjectSlot ScopeInfo::RawFieldOfElementAt(int index) {
- return RawField(FixedArray::OffsetOfElementAt(index));
-}
+ObjectSlot ScopeInfo::data_start() { return RawField(OffsetOfElementAt(0)); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 642770a852f..308b57a309d 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -15,17 +15,12 @@
#include "src/objects/string-set-inl.h"
#include "src/roots/roots.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
-// An entry in ModuleVariableEntries consists of several slots:
-enum ModuleVariableEntryOffset {
- kModuleVariableNameOffset,
- kModuleVariableIndexOffset,
- kModuleVariablePropertiesOffset,
- kModuleVariableEntryLength // Sentinel value.
-};
-
#ifdef DEBUG
bool ScopeInfo::Equals(ScopeInfo other) const {
if (length() != other.length()) return false;
@@ -164,6 +159,12 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
scope->AsModuleScope()->module());
}
+// Make sure the Fields enum agrees with Torque-generated offsets.
+#define ASSERT_MATCHED_FIELD(name) \
+ STATIC_ASSERT(OffsetOfElementAt(k##name) == k##name##Offset);
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(ASSERT_MATCHED_FIELD)
+#undef ASSERT_MATCHED_FIELD
+
const int length = kVariablePartIndex + 2 * context_local_count +
(should_save_class_variable_index ? 1 : 0) +
(has_receiver ? 1 : 0) +
@@ -189,7 +190,9 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
if (scope->is_function_scope()) {
DeclarationScope* function_scope = scope->AsDeclarationScope();
has_simple_parameters = function_scope->has_simple_parameters();
+#if V8_ENABLE_WEBASSEMBLY
is_asm_module = function_scope->is_asm_module();
+#endif // V8_ENABLE_WEBASSEMBLY
}
FunctionKind function_kind = kNormalFunction;
if (scope->is_declaration_scope()) {
@@ -228,6 +231,12 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
scope_info.set_parameter_count(parameter_count);
scope_info.set_context_local_count(context_local_count);
+ // Jump ahead to set the number of module variables so that we can use range
+ // DCHECKs in future steps.
+ if (scope->is_module_scope()) {
+ scope_info.set_module_variable_count(module_vars_count);
+ }
+
// Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
int context_local_base = index;
@@ -255,18 +264,26 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
break;
}
case VariableLocation::MODULE: {
- scope_info.set(module_var_entry + kModuleVariableNameOffset,
+ scope_info.set(module_var_entry +
+ TorqueGeneratedModuleVariableOffsets::kNameOffset /
+ kTaggedSize,
*var->name(), mode);
- scope_info.set(module_var_entry + kModuleVariableIndexOffset,
- Smi::FromInt(var->index()));
+ scope_info.set(
+ module_var_entry +
+ TorqueGeneratedModuleVariableOffsets::kIndexOffset /
+ kTaggedSize,
+ Smi::FromInt(var->index()));
uint32_t properties =
VariableModeBits::encode(var->mode()) |
InitFlagBit::encode(var->initialization_flag()) |
MaybeAssignedFlagBit::encode(var->maybe_assigned()) |
ParameterNumberBits::encode(ParameterNumberBits::kMax) |
IsStaticFlagBit::encode(var->is_static_flag());
- scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
- Smi::FromInt(properties));
+ scope_info.set(
+ module_var_entry +
+ TorqueGeneratedModuleVariableOffsets::kPropertiesOffset /
+ kTaggedSize,
+ Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
break;
}
@@ -371,7 +388,8 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
DCHECK_EQ(index, scope_info.ModuleInfoIndex());
scope_info.set(index++, *module_info);
DCHECK_EQ(index, scope_info.ModuleVariableCountIndex());
- scope_info.set(index++, Smi::FromInt(module_vars_count));
+ // Module variable count was already written above.
+ index++;
DCHECK_EQ(index, scope_info.ModuleVariablesIndex());
// The variable entries themselves have already been written above.
index += kModuleVariableEntryLength * module_vars_count;
@@ -556,6 +574,53 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
return scope_info;
}
+Object ScopeInfo::get(int index) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
+}
+
+Object ScopeInfo::get(PtrComprCageBase cage_base, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this,
+ OffsetOfElementAt(index));
+}
+
+void ScopeInfo::set(int index, Smi value) {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ DCHECK(Object(value).IsSmi());
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+}
+
+void ScopeInfo::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void ScopeInfo::CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
+ int src_index, int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(src_index + len, src.length());
+ DisallowGarbageCollection no_gc;
+
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(src.RawFieldOfElementAt(src_index));
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
+ObjectSlot ScopeInfo::RawFieldOfElementAt(int index) {
+ return RawField(OffsetOfElementAt(index));
+}
+
+int ScopeInfo::length() const {
+ // AllocatedSize() is generated by Torque and represents the size in bytes of
+ // the object, as computed from flags, context_local_count, and possibly
+ // module_variable_count. Convert that size into a number of slots.
+ return (AllocatedSize() - HeapObject::kHeaderSize) / kTaggedSize;
+}
+
// static
Handle<ScopeInfo> ScopeInfo::RecreateWithBlockList(
Isolate* isolate, Handle<ScopeInfo> original, Handle<StringSet> blocklist) {
@@ -580,7 +645,7 @@ Handle<ScopeInfo> ScopeInfo::RecreateWithBlockList(
isolate, kVariablePartIndex, *original, kVariablePartIndex,
scope_info->LocalsBlockListIndex() - kVariablePartIndex,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
- scope_info->set_locals_block_list(0, *blocklist);
+ scope_info->set_locals_block_list(*blocklist);
scope_info->CopyElements(
isolate, scope_info->LocalsBlockListIndex() + 1, *original,
scope_info->LocalsBlockListIndex(),
@@ -700,12 +765,12 @@ bool ScopeInfo::HasSharedFunctionName() const {
void ScopeInfo::SetFunctionName(Object name) {
DCHECK(HasFunctionName());
DCHECK(name.IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
- set_function_variable_info_name(0, name);
+ set_function_variable_info_name(name);
}
void ScopeInfo::SetInferredFunctionName(String name) {
DCHECK(HasInferredFunctionName());
- set_inferred_function_name(0, name);
+ set_inferred_function_name(name);
}
bool ScopeInfo::HasOuterScopeInfo() const {
@@ -736,19 +801,19 @@ bool ScopeInfo::HasLocalsBlockList() const {
StringSet ScopeInfo::LocalsBlockList() const {
DCHECK(HasLocalsBlockList());
- return StringSet::cast(locals_block_list(0));
+ return StringSet::cast(locals_block_list());
}
bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
Object ScopeInfo::FunctionName() const {
DCHECK(HasFunctionName());
- return function_variable_info_name(0);
+ return function_variable_info_name();
}
Object ScopeInfo::InferredFunctionName() const {
DCHECK(HasInferredFunctionName());
- return inferred_function_name(0);
+ return inferred_function_name();
}
String ScopeInfo::FunctionDebugName() const {
@@ -766,29 +831,29 @@ String ScopeInfo::FunctionDebugName() const {
int ScopeInfo::StartPosition() const {
DCHECK(HasPositionInfo());
- return position_info_start(0);
+ return position_info_start();
}
int ScopeInfo::EndPosition() const {
DCHECK(HasPositionInfo());
- return position_info_end(0);
+ return position_info_end();
}
void ScopeInfo::SetPositionInfo(int start, int end) {
DCHECK(HasPositionInfo());
DCHECK_LE(start, end);
- set_position_info_start(0, start);
- set_position_info_end(0, end);
+ set_position_info_start(start);
+ set_position_info_end(end);
}
ScopeInfo ScopeInfo::OuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
- return ScopeInfo::cast(outer_scope_info(0));
+ return ScopeInfo::cast(outer_scope_info());
}
SourceTextModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
- return SourceTextModuleInfo::cast(module_info(0));
+ return SourceTextModuleInfo::cast(module_info());
}
String ScopeInfo::ContextLocalName(int var) const {
@@ -836,6 +901,11 @@ bool ScopeInfo::VariableIsSynthetic(String name) {
name.Equals(name.GetReadOnlyRoots().this_string());
}
+int ScopeInfo::ModuleVariableCount() const {
+ DCHECK_EQ(scope_type(), MODULE_SCOPE);
+ return module_variable_count();
+}
+
int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
@@ -846,16 +916,14 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- int module_vars_count = module_variable_count(0);
- int entry = ModuleVariablesIndex();
+ int module_vars_count = module_variable_count();
for (int i = 0; i < module_vars_count; ++i) {
- String var_name = String::cast(get(entry + kModuleVariableNameOffset));
+ String var_name = module_variables_name(i);
if (name.Equals(var_name)) {
int index;
ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
return index;
}
- entry += kModuleVariableEntryLength;
}
return 0;
@@ -875,11 +943,11 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
if (scope_info.IsEmpty()) return -1;
- int start = scope_info.ContextLocalNamesIndex();
- int end = start + scope_info.context_local_count();
- for (int i = start; i < end; ++i) {
- if (name != scope_info.get(i)) continue;
- int var = i - start;
+ int context_local_count = scope_info.context_local_count();
+ for (int var = 0; var < context_local_count; ++var) {
+ if (name != scope_info.context_local_names(var)) {
+ continue;
+ }
*mode = scope_info.ContextLocalMode(var);
*is_static_flag = scope_info.ContextLocalIsStaticFlag(var);
*init_flag = scope_info.ContextLocalInitFlag(var);
@@ -895,7 +963,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
int ScopeInfo::SavedClassVariableContextLocalIndex() const {
if (HasSavedClassVariableIndexBit::decode(Flags())) {
- int index = saved_class_variable_info(0);
+ int index = saved_class_variable_info();
return index - Context::MIN_CONTEXT_SLOTS;
}
return -1;
@@ -904,7 +972,7 @@ int ScopeInfo::SavedClassVariableContextLocalIndex() const {
int ScopeInfo::ReceiverContextSlotIndex() const {
if (ReceiverVariableBits::decode(Flags()) ==
VariableAllocationInfo::CONTEXT) {
- return receiver_info(0);
+ return receiver_info();
}
return -1;
}
@@ -914,7 +982,7 @@ int ScopeInfo::FunctionContextSlotIndex(String name) const {
if (FunctionVariableBits::decode(Flags()) ==
VariableAllocationInfo::CONTEXT &&
FunctionName() == name) {
- return function_variable_info_context_or_stack_slot_index(0);
+ return function_variable_info_context_or_stack_slot_index();
}
return -1;
}
@@ -1147,3 +1215,5 @@ FixedArray SourceTextModuleInfo::RegularExportExportNames(int i) const {
} // namespace internal
} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 5ee404e15bb..57e5d2e308f 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -38,26 +38,11 @@ class Zone;
// This object provides quick access to scope info details for runtime
// routines.
-class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
+class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
public:
DEFINE_TORQUE_GENERATED_SCOPE_FLAGS()
DECL_PRINTER(ScopeInfo)
- DECL_VERIFIER(ScopeInfo)
-
- // For refactoring, clone some FixedArray member functions. Eventually this
- // class will stop pretending to be a FixedArray, but we're not quite there.
- inline Object get(int index) const;
- inline Object get(IsolateRoot isolate, int index) const;
- // Setter that doesn't need write barrier.
- inline void set(int index, Smi value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
- int src_index, int len, WriteBarrierMode mode);
- inline ObjectSlot RawFieldOfElementAt(int index);
-
class BodyDescriptor;
// Return the type of this scope.
@@ -180,6 +165,8 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
+ int ModuleVariableCount() const;
+
// Lookup support for serialized scope info. Returns the function context
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
@@ -272,18 +259,20 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
kVariablePartIndex
};
-// Make sure the Fields enum agrees with Torque-generated offsets.
-#define ASSERT_MATCHED_FIELD(name) \
- STATIC_ASSERT(FixedArray::OffsetOfElementAt(k##name) == k##name##Offset);
- FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(ASSERT_MATCHED_FIELD)
-#undef ASSERT_MATCHED_FIELD
-
STATIC_ASSERT(LanguageModeSize == 1 << LanguageModeBit::kSize);
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
bool IsEmpty() const;
+ // Returns the size in bytes for a ScopeInfo with |length| slots.
+ static constexpr int SizeFor(int length) { return OffsetOfElementAt(length); }
+
+ // Gives access to raw memory which stores the ScopeInfo's data.
+ inline ObjectSlot data_start();
+
private:
+ friend class WebSnapshotDeserializer;
+
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int SavedClassVariableInfoIndex() const;
@@ -299,10 +288,33 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
static bool NeedsPositionInfo(ScopeType type);
- // Converts byte offsets within the object to FixedArray-style indices.
+ // Raw access by slot index. These functions rely on the fact that everything
+ // in ScopeInfo is tagged. Each slot is tagged-pointer sized. Slot 0 is
+ // 'flags', the first field defined by ScopeInfo after the standard-size
+ // HeapObject header.
+ V8_EXPORT_PRIVATE Object get(int index) const;
+ Object get(PtrComprCageBase cage_base, int index) const;
+ // Setter that doesn't need write barrier.
+ void set(int index, Smi value);
+ // Setter with explicit barrier mode.
+ void set(int index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
+ int src_index, int len, WriteBarrierMode mode);
+ ObjectSlot RawFieldOfElementAt(int index);
+ // The number of tagged-pointer-sized slots in the ScopeInfo after its
+ // standard HeapObject header.
+ V8_EXPORT_PRIVATE int length() const;
+
+ // Conversions between offset (bytes from the beginning of the object) and
+ // index (number of tagged-pointer-sized slots starting after the standard
+ // HeapObject header).
+ static constexpr int OffsetOfElementAt(int index) {
+ return HeapObject::kHeaderSize + index * kTaggedSize;
+ }
static constexpr int ConvertOffsetToIndex(int offset) {
- int index = (offset - FixedArray::kHeaderSize) / kTaggedSize;
- CONSTEXPR_DCHECK(FixedArray::OffsetOfElementAt(index) == offset);
+ int index = (offset - HeapObject::kHeaderSize) / kTaggedSize;
+ CONSTEXPR_DCHECK(OffsetOfElementAt(index) == offset);
return index;
}
@@ -322,8 +334,12 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
InitializationFlag* init_flag = nullptr,
MaybeAssignedFlag* maybe_assigned_flag = nullptr);
- static const int kFunctionNameEntries = 2;
- static const int kPositionInfoEntries = 2;
+ static const int kFunctionNameEntries =
+ TorqueGeneratedFunctionVariableInfoOffsets::kSize / kTaggedSize;
+ static const int kPositionInfoEntries =
+ TorqueGeneratedPositionInfoOffsets::kSize / kTaggedSize;
+ static const int kModuleVariableEntryLength =
+ TorqueGeneratedModuleVariableOffsets::kSize / kTaggedSize;
// Properties of variables.
DEFINE_TORQUE_GENERATED_VARIABLE_PROPERTIES()
diff --git a/deps/v8/src/objects/scope-info.tq b/deps/v8/src/objects/scope-info.tq
index c238d5309d1..ffa8546df67 100644
--- a/deps/v8/src/objects/scope-info.tq
+++ b/deps/v8/src/objects/scope-info.tq
@@ -99,7 +99,7 @@ struct ModuleVariable {
@generateCppClass
@generateBodyDescriptor
-extern class ScopeInfo extends FixedArrayBase {
+extern class ScopeInfo extends HeapObject {
const flags: SmiTagged<ScopeFlags>;
// The number of parameters. For non-function scopes this is 0.
@@ -121,50 +121,51 @@ extern class ScopeInfo extends FixedArrayBase {
// If the scope is a class scope and it has static private methods that
// may be accessed directly or through eval, one slot is reserved to hold
// the context slot index for the class variable.
- saved_class_variable_info[flags.has_saved_class_variable_index ? 1 : 0]: Smi;
+ saved_class_variable_info?[flags.has_saved_class_variable_index]: Smi;
// If the scope binds a "this" value, one slot is reserved to hold the
// context or stack slot index for the variable.
- receiver_info[
- flags.receiver_variable ==
- FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::STACK)
- || flags.receiver_variable ==
- FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::CONTEXT)
- ? 1 : 0]: Smi;
+ receiver_info?[
+ flags.receiver_variable ==
+ FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::STACK) ||
+ flags.receiver_variable ==
+ FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::CONTEXT)
+ ]: Smi;
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
// b. The context or stack slot index for the variable.
- function_variable_info[flags.function_variable != FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::NONE) ? 1 : 0]:
- FunctionVariableInfo;
+ function_variable_info?
+ [flags.function_variable !=
+ FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::NONE)]:
+ FunctionVariableInfo;
- inferred_function_name[flags.has_inferred_function_name ? 1 : 0]: String|
- Undefined;
+ inferred_function_name?[flags.has_inferred_function_name]: String|Undefined;
// Contains two slots with a) the startPosition and b) the endPosition if
// the scope belongs to a function or script.
- position_info[flags.scope_type == ScopeType::FUNCTION_SCOPE ||
- flags.scope_type == ScopeType::SCRIPT_SCOPE ||
- flags.scope_type == ScopeType::EVAL_SCOPE ||
- flags.scope_type == ScopeType::MODULE_SCOPE
- ? 1 : 0]: PositionInfo;
+ position_info?
+ [flags.scope_type == ScopeType::FUNCTION_SCOPE ||
+ flags.scope_type == ScopeType::SCRIPT_SCOPE ||
+ flags.scope_type == ScopeType::EVAL_SCOPE ||
+ flags.scope_type == ScopeType::MODULE_SCOPE]: PositionInfo;
- outer_scope_info[flags.has_outer_scope_info ? 1 : 0]: ScopeInfo|TheHole;
+ outer_scope_info?[flags.has_outer_scope_info]: ScopeInfo|TheHole;
// List of stack allocated local variables. Used by debug evaluate to properly
// abort variable lookup when a name clashes with a stack allocated local that
// can't be materialized.
- locals_block_list[flags.has_locals_block_list ? 1 : 0]: HashTable;
+ locals_block_list?[flags.has_locals_block_list]: HashTable;
// For a module scope, this part contains the SourceTextModuleInfo, the
// number of MODULE-allocated variables, and the metadata of those
// variables. For non-module scopes it is empty.
- module_info[flags.scope_type == ScopeType::MODULE_SCOPE ? 1 : 0]:
- SourceTextModuleInfo;
- const module_variable_count[flags.scope_type == ScopeType::MODULE_SCOPE ? 1 : 0]:
- Smi;
- module_variables[flags.scope_type == ScopeType::MODULE_SCOPE ? module_variable_count[0] : 0]:
+ module_info?
+ [flags.scope_type == ScopeType::MODULE_SCOPE]: SourceTextModuleInfo;
+ const module_variable_count?
+ [flags.scope_type == ScopeType::MODULE_SCOPE]: Smi;
+ module_variables[flags.scope_type == ScopeType::MODULE_SCOPE ? module_variable_count : 0]:
ModuleVariable;
}
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 3865a7ccda8..b1e226a0465 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -23,12 +23,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(Script)
NEVER_READ_ONLY_SPACE_IMPL(Script)
-SMI_ACCESSORS(Script, type, kScriptTypeOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
- kEvalFromSharedOrWrappedArgumentsOffset,
- this->type() != TYPE_WASM)
-SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
- this->type() != TYPE_WASM)
+#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray,
kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
@@ -36,6 +31,18 @@ ACCESSORS_CHECKED(Script, wasm_managed_native_module, Object,
kEvalFromPositionOffset, this->type() == TYPE_WASM)
ACCESSORS_CHECKED(Script, wasm_weak_instance_list, WeakArrayList,
kSharedFunctionInfosOffset, this->type() == TYPE_WASM)
+#define CHECK_SCRIPT_NOT_WASM this->type() != TYPE_WASM
+#else
+#define CHECK_SCRIPT_NOT_WASM true
+#endif // V8_ENABLE_WEBASSEMBLY
+
+SMI_ACCESSORS(Script, type, kScriptTypeOffset)
+ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
+ CHECK_SCRIPT_NOT_WASM)
+SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
+ CHECK_SCRIPT_NOT_WASM)
+#undef CHECK_SCRIPT_NOT_WASM
bool Script::is_wrapped() const {
return eval_from_shared_or_wrapped_arguments().IsFixedArray();
@@ -67,19 +74,24 @@ FixedArray Script::wrapped_arguments() const {
}
DEF_GETTER(Script, shared_function_infos, WeakFixedArray) {
- return type() == TYPE_WASM
- ? ReadOnlyRoots(GetHeap()).empty_weak_fixed_array()
- : TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::load(
- *this);
+#if V8_ENABLE_WEBASSEMBLY
+ if (type() == TYPE_WASM) {
+ return ReadOnlyRoots(GetHeap()).empty_weak_fixed_array();
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::load(*this);
}
void Script::set_shared_function_infos(WeakFixedArray value,
WriteBarrierMode mode) {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK_NE(TYPE_WASM, type());
+#endif // V8_ENABLE_WEBASSEMBLY
TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::store(*this, value);
CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfosOffset, value, mode);
}
+#if V8_ENABLE_WEBASSEMBLY
bool Script::has_wasm_breakpoint_infos() const {
return type() == TYPE_WASM && wasm_breakpoint_infos().length() > 0;
}
@@ -88,6 +100,13 @@ wasm::NativeModule* Script::wasm_native_module() const {
return Managed<wasm::NativeModule>::cast(wasm_managed_native_module()).raw();
}
+bool Script::break_on_entry() const { return BreakOnEntryBit::decode(flags()); }
+
+void Script::set_break_on_entry(bool value) {
+ set_flags(BreakOnEntryBit::update(flags(), value));
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
Script::CompilationType Script::compilation_type() {
return CompilationTypeBit::decode(flags());
}
@@ -107,12 +126,6 @@ void Script::set_is_repl_mode(bool value) {
set_flags(IsReplModeBit::update(flags(), value));
}
-bool Script::break_on_entry() const { return BreakOnEntryBit::decode(flags()); }
-
-void Script::set_break_on_entry(bool value) {
- set_flags(BreakOnEntryBit::update(flags(), value));
-}
-
ScriptOriginOptions Script::origin_options() {
return ScriptOriginOptions(OriginOptionsBits::decode(flags()));
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index b6da24372c5..e487da76497 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -35,7 +35,9 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
TYPE_NATIVE = 0,
TYPE_EXTENSION = 1,
TYPE_NORMAL = 2,
+#if V8_ENABLE_WEBASSEMBLY
TYPE_WASM = 3,
+#endif // V8_ENABLE_WEBASSEMBLY
TYPE_INSPECTOR = 4
};
@@ -76,6 +78,7 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// function infos created from this script.
DECL_ACCESSORS(shared_function_infos, WeakFixedArray)
+#if V8_ENABLE_WEBASSEMBLY
// [wasm_breakpoint_infos]: the list of {BreakPointInfo} objects describing
// all WebAssembly breakpoints for modules/instances managed via this script.
// This must only be called if the type of this script is TYPE_WASM.
@@ -92,6 +95,17 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// This must only be called if the type of this script is TYPE_WASM.
DECL_ACCESSORS(wasm_weak_instance_list, WeakArrayList)
+ // [break_on_entry] (wasm only): whether an instrumentation breakpoint is set
+ // for this script; this information will be transferred to existing and
+ // future instances to make sure that we stop before executing any code in
+ // this wasm module.
+ inline bool break_on_entry() const;
+ inline void set_break_on_entry(bool value);
+
+ // Check if the script contains any Asm modules.
+ bool ContainsAsmModule();
+#endif // V8_ENABLE_WEBASSEMBLY
+
// [compilation_type]: how the the script was compiled. Encoded in the
// 'flags' field.
inline CompilationType compilation_type();
@@ -107,13 +121,6 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
inline bool is_repl_mode() const;
inline void set_is_repl_mode(bool value);
- // [break_on_entry] (wasm only): whether an instrumentation breakpoint is set
- // for this script; this information will be transferred to existing and
- // future instances to make sure that we stop before executing any code in
- // this wasm module.
- inline bool break_on_entry() const;
- inline void set_break_on_entry(bool value);
-
// [origin_options]: optional attributes set by the embedder via ScriptOrigin,
// and used by the embedder to make decisions about the script. V8 just passes
// this through. Encoded in the 'flags' field.
@@ -129,9 +136,6 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// Retrieve source position from where eval was called.
static int GetEvalPosition(Isolate* isolate, Handle<Script> script);
- // Check if the script contains any Asm modules.
- bool ContainsAsmModule();
-
// Init line_ends array with source code positions of line ends.
template <typename LocalIsolate>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index b3884f44879..c125b01a3d0 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -15,8 +15,11 @@
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -245,8 +248,10 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
SharedFunctionInfo::IsNativeBit)
+#if V8_ENABLE_WEBASSEMBLY
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
+#endif // V8_ENABLE_WEBASSEMBLY
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
requires_instance_members_initializer,
SharedFunctionInfo::RequiresInstanceMembersInitializerBit)
@@ -355,8 +360,10 @@ void SharedFunctionInfo::UpdateFunctionMapIndex() {
}
void SharedFunctionInfo::DontAdaptArguments() {
+#if V8_ENABLE_WEBASSEMBLY
// TODO(leszeks): Revise this DCHECK now that the code field is gone.
DCHECK(!HasWasmExportedFunctionData());
+#endif // V8_ENABLE_WEBASSEMBLY
set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
@@ -618,10 +625,23 @@ void SharedFunctionInfo::flush_baseline_data() {
set_function_data(baseline_data().data(), kReleaseStore);
}
+#if V8_ENABLE_WEBASSEMBLY
bool SharedFunctionInfo::HasAsmWasmData() const {
return function_data(kAcquireLoad).IsAsmWasmData();
}
+bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
+ return function_data(kAcquireLoad).IsWasmExportedFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmJSFunctionData() const {
+ return function_data(kAcquireLoad).IsWasmJSFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
+ return function_data(kAcquireLoad).IsWasmCapiFunctionData();
+}
+
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
return AsmWasmData::cast(function_data(kAcquireLoad));
@@ -633,6 +653,23 @@ void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
set_function_data(data, kReleaseStore);
}
+const wasm::WasmModule* SharedFunctionInfo::wasm_module() const {
+ if (!HasWasmExportedFunctionData()) return nullptr;
+ const WasmExportedFunctionData& function_data = wasm_exported_function_data();
+ const WasmInstanceObject& wasm_instance = function_data.instance();
+ const WasmModuleObject& wasm_module_object = wasm_instance.module_object();
+ return wasm_module_object.module();
+}
+
+const wasm::FunctionSig* SharedFunctionInfo::wasm_function_signature() const {
+ const wasm::WasmModule* module = wasm_module();
+ if (!module) return nullptr;
+ const WasmExportedFunctionData& function_data = wasm_exported_function_data();
+ DCHECK_LT(function_data.function_index(), module->functions.size());
+ return module->functions[function_data.function_index()].sig;
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
bool SharedFunctionInfo::HasBuiltinId() const {
return function_data(kAcquireLoad).IsSmi();
}
@@ -727,34 +764,6 @@ void UncompiledData::InitAfterBytecodeFlush(
set_end_position(end_position);
}
-bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
- return function_data(kAcquireLoad).IsWasmExportedFunctionData();
-}
-
-bool SharedFunctionInfo::HasWasmJSFunctionData() const {
- return function_data(kAcquireLoad).IsWasmJSFunctionData();
-}
-
-const wasm::WasmModule* SharedFunctionInfo::wasm_module() const {
- if (!HasWasmExportedFunctionData()) return nullptr;
- const WasmExportedFunctionData& function_data = wasm_exported_function_data();
- const WasmInstanceObject& wasm_instance = function_data.instance();
- const WasmModuleObject& wasm_module_object = wasm_instance.module_object();
- return wasm_module_object.module();
-}
-
-const wasm::FunctionSig* SharedFunctionInfo::wasm_function_signature() const {
- const wasm::WasmModule* module = wasm_module();
- if (!module) return nullptr;
- const WasmExportedFunctionData& function_data = wasm_exported_function_data();
- DCHECK_LT(function_data.function_index(), module->functions.size());
- return module->functions[function_data.function_index()].sig;
-}
-
-bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
- return function_data(kAcquireLoad).IsWasmCapiFunctionData();
-}
-
HeapObject SharedFunctionInfo::script() const {
HeapObject maybe_script = script_or_debug_info(kAcquireLoad);
if (maybe_script.IsDebugInfo()) {
@@ -822,14 +831,18 @@ bool SharedFunctionInfo::IsUserJavaScript() const {
}
bool SharedFunctionInfo::IsSubjectToDebugging() const {
- return IsUserJavaScript() && !HasAsmWasmData();
+#if V8_ENABLE_WEBASSEMBLY
+ if (HasAsmWasmData()) return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return IsUserJavaScript();
}
bool SharedFunctionInfo::CanDiscardCompiled() const {
- bool can_decompile =
- (HasBytecodeArray() || HasAsmWasmData() ||
- HasUncompiledDataWithPreparseData() || HasBaselineData());
- return can_decompile;
+#if V8_ENABLE_WEBASSEMBLY
+ if (HasAsmWasmData()) return true;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return HasBytecodeArray() || HasUncompiledDataWithPreparseData() ||
+ HasBaselineData();
}
bool SharedFunctionInfo::is_class_constructor() const {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index 433c69de337..5f5917d64a9 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -78,43 +78,55 @@ Code SharedFunctionInfo::GetCode() const {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
return isolate->builtins()->builtin(builtin_id());
- } else if (data.IsBytecodeArray()) {
+ }
+ if (data.IsBytecodeArray()) {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- } else if (data.IsBaselineData()) {
+ }
+ if (data.IsBaselineData()) {
// Having BaselineData means we are a compiled, baseline function.
DCHECK(HasBaselineData());
return baseline_data().baseline_code();
- } else if (data.IsAsmWasmData()) {
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (data.IsAsmWasmData()) {
// Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
- } else if (data.IsUncompiledData()) {
+ }
+ if (data.IsWasmExportedFunctionData()) {
+ // Having a WasmExportedFunctionData means the code is in there.
+ DCHECK(HasWasmExportedFunctionData());
+ return wasm_exported_function_data().wrapper_code();
+ }
+ if (data.IsWasmJSFunctionData()) {
+ return wasm_js_function_data().wrapper_code();
+ }
+ if (data.IsWasmCapiFunctionData()) {
+ return wasm_capi_function_data().wrapper_code();
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (data.IsUncompiledData()) {
// Having uncompiled data (with or without scope) means we need to compile.
DCHECK(HasUncompiledData());
return isolate->builtins()->builtin(Builtins::kCompileLazy);
- } else if (data.IsFunctionTemplateInfo()) {
+ }
+ if (data.IsFunctionTemplateInfo()) {
// Having a function template info means we are an API function.
DCHECK(IsApiFunction());
return isolate->builtins()->builtin(Builtins::kHandleApiCall);
- } else if (data.IsWasmExportedFunctionData()) {
- // Having a WasmExportedFunctionData means the code is in there.
- DCHECK(HasWasmExportedFunctionData());
- return wasm_exported_function_data().wrapper_code();
- } else if (data.IsInterpreterData()) {
+ }
+ if (data.IsInterpreterData()) {
Code code = InterpreterTrampoline();
DCHECK(code.IsCode());
DCHECK(code.is_interpreter_trampoline_builtin());
return code;
- } else if (data.IsWasmJSFunctionData()) {
- return wasm_js_function_data().wrapper_code();
- } else if (data.IsWasmCapiFunctionData()) {
- return wasm_capi_function_data().wrapper_code();
}
UNREACHABLE();
}
+#if V8_ENABLE_WEBASSEMBLY
WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
const {
DCHECK(HasWasmExportedFunctionData());
@@ -130,6 +142,7 @@ WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
DCHECK(HasWasmCapiFunctionData());
return WasmCapiFunctionData::cast(function_data(kAcquireLoad));
}
+#endif // V8_ENABLE_WEBASSEMBLY
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
Script script)
@@ -238,10 +251,12 @@ CoverageInfo SharedFunctionInfo::GetCoverageInfo() const {
}
std::unique_ptr<char[]> SharedFunctionInfo::DebugNameCStr() {
+#if V8_ENABLE_WEBASSEMBLY
if (HasWasmExportedFunctionData()) {
return WasmExportedFunction::GetDebugName(
wasm_exported_function_data().sig());
}
+#endif // V8_ENABLE_WEBASSEMBLY
DisallowGarbageCollection no_gc;
String function_name = Name();
if (function_name.length() == 0) function_name = inferred_name();
@@ -251,12 +266,14 @@ std::unique_ptr<char[]> SharedFunctionInfo::DebugNameCStr() {
// static
Handle<String> SharedFunctionInfo::DebugName(
Handle<SharedFunctionInfo> shared) {
+#if V8_ENABLE_WEBASSEMBLY
if (shared->HasWasmExportedFunctionData()) {
return shared->GetIsolate()
->factory()
->NewStringFromUtf8(CStrVector(shared->DebugNameCStr().get()))
.ToHandleChecked();
}
+#endif // V8_ENABLE_WEBASSEMBLY
DisallowHeapAllocation no_gc;
String function_name = shared->Name();
if (function_name.length() == 0) function_name = shared->inferred_name();
@@ -597,12 +614,14 @@ int SharedFunctionInfo::StartPosition() const {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
}
+#if V8_ENABLE_WEBASSEMBLY
if (HasWasmExportedFunctionData()) {
WasmInstanceObject instance = wasm_exported_function_data().instance();
int func_index = wasm_exported_function_data().function_index();
auto& function = instance.module()->functions[func_index];
return static_cast<int>(function.code.offset());
}
+#endif // V8_ENABLE_WEBASSEMBLY
return kNoSourcePosition;
}
@@ -622,12 +641,14 @@ int SharedFunctionInfo::EndPosition() const {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
}
+#if V8_ENABLE_WEBASSEMBLY
if (HasWasmExportedFunctionData()) {
WasmInstanceObject instance = wasm_exported_function_data().instance();
int func_index = wasm_exported_function_data().function_index();
auto& function = instance.module()->functions[func_index];
return static_cast<int>(function.code.end_offset());
}
+#endif // V8_ENABLE_WEBASSEMBLY
return kNoSourcePosition;
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 4318b23d32e..f9db1acbcf7 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -18,7 +18,6 @@
#include "src/objects/smi.h"
#include "src/objects/struct.h"
#include "src/roots/roots.h"
-#include "src/wasm/value-type.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
#include "torque-generated/bit-fields.h"
#include "torque-generated/field-offsets.h"
@@ -43,6 +42,8 @@ class WasmJSFunctionData;
namespace wasm {
struct WasmModule;
+class ValueType;
+using FunctionSig = Signature<ValueType>;
} // namespace wasm
#include "torque-generated/src/objects/shared-function-info-tq.inc"
@@ -314,10 +315,24 @@ class SharedFunctionInfo
inline void flush_baseline_data();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
+
+#if V8_ENABLE_WEBASSEMBLY
inline bool HasAsmWasmData() const;
+ inline bool HasWasmExportedFunctionData() const;
+ inline bool HasWasmJSFunctionData() const;
+ inline bool HasWasmCapiFunctionData() const;
inline AsmWasmData asm_wasm_data() const;
inline void set_asm_wasm_data(AsmWasmData data);
+ V8_EXPORT_PRIVATE WasmExportedFunctionData
+ wasm_exported_function_data() const;
+ WasmJSFunctionData wasm_js_function_data() const;
+ WasmCapiFunctionData wasm_capi_function_data() const;
+
+ inline const wasm::WasmModule* wasm_module() const;
+ inline const wasm::FunctionSig* wasm_function_signature() const;
+#endif // V8_ENABLE_WEBASSEMBLY
+
// builtin_id corresponds to the auto-generated Builtins::Name id.
inline bool HasBuiltinId() const;
inline int builtin_id() const;
@@ -331,16 +346,6 @@ class SharedFunctionInfo
inline void set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData data);
inline bool HasUncompiledDataWithoutPreparseData() const;
- inline bool HasWasmExportedFunctionData() const;
- V8_EXPORT_PRIVATE WasmExportedFunctionData
- wasm_exported_function_data() const;
- inline bool HasWasmJSFunctionData() const;
- WasmJSFunctionData wasm_js_function_data() const;
- inline bool HasWasmCapiFunctionData() const;
- WasmCapiFunctionData wasm_capi_function_data() const;
-
- inline const wasm::WasmModule* wasm_module() const;
- inline const wasm::FunctionSig* wasm_function_signature() const;
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
@@ -446,8 +451,10 @@ class SharedFunctionInfo
// global object.
DECL_BOOLEAN_ACCESSORS(native)
+#if V8_ENABLE_WEBASSEMBLY
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
+#endif // V8_ENABLE_WEBASSEMBLY
// Indicates that the function was created by the Function function.
// Though it's anonymous, toString should treat it as if it had the name
@@ -651,6 +658,8 @@ class SharedFunctionInfo
Isolate* isolate);
private:
+ friend class WebSnapshotDeserializer;
+
#ifdef VERIFY_HEAP
void SharedFunctionInfoVerify(ReadOnlyRoots roots);
#endif
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index 2943c117c74..c0d35c525fb 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -31,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
Object FullObjectSlot::operator*() const { return Object(*location()); }
-Object FullObjectSlot::load(IsolateRoot isolate) const { return **this; }
+Object FullObjectSlot::load(PtrComprCageBase cage_base) const { return **this; }
void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
@@ -39,7 +39,7 @@ Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
}
-Object FullObjectSlot::Acquire_Load(IsolateRoot isolate) const {
+Object FullObjectSlot::Acquire_Load(PtrComprCageBase cage_base) const {
return Acquire_Load();
}
@@ -47,7 +47,7 @@ Object FullObjectSlot::Relaxed_Load() const {
return Object(base::AsAtomicPointer::Relaxed_Load(location()));
}
-Object FullObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
+Object FullObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
return Relaxed_Load();
}
@@ -79,7 +79,7 @@ MaybeObject FullMaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
-MaybeObject FullMaybeObjectSlot::load(IsolateRoot isolate) const {
+MaybeObject FullMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
return **this;
}
@@ -91,7 +91,8 @@ MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(base::AsAtomicPointer::Relaxed_Load(location()));
}
-MaybeObject FullMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
+MaybeObject FullMaybeObjectSlot::Relaxed_Load(
+ PtrComprCageBase cage_base) const {
return Relaxed_Load();
}
@@ -113,7 +114,7 @@ HeapObjectReference FullHeapObjectSlot::operator*() const {
return HeapObjectReference(*location());
}
-HeapObjectReference FullHeapObjectSlot::load(IsolateRoot isolate) const {
+HeapObjectReference FullHeapObjectSlot::load(PtrComprCageBase cage_base) const {
return **this;
}
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index 2221fb41c86..69c6a8a80bc 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -110,13 +110,13 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
inline bool contains_value(Address raw_value) const;
inline Object operator*() const;
- inline Object load(IsolateRoot isolate) const;
+ inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
- inline Object Acquire_Load(IsolateRoot isolate) const;
+ inline Object Acquire_Load(PtrComprCageBase cage_base) const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(IsolateRoot isolate) const;
+ inline Object Relaxed_Load(PtrComprCageBase cage_base) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Relaxed_CompareAndSwap(Object old, Object target) const;
@@ -147,11 +147,11 @@ class FullMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(IsolateRoot isolate) const;
+ inline MaybeObject load(PtrComprCageBase cage_base) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
+ inline MaybeObject Relaxed_Load(PtrComprCageBase cage_base) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -174,7 +174,7 @@ class FullHeapObjectSlot : public SlotBase<FullHeapObjectSlot, Address> {
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(IsolateRoot isolate) const;
+ inline HeapObjectReference load(PtrComprCageBase cage_base) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 2df1e97ada8..5f433ba9b40 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -22,12 +22,14 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
+#if V8_ENABLE_WEBASSEMBLY
BOOL_GETTER(StackFrameInfo, flags, IsWasm, IsWasmBit::kShift)
BOOL_GETTER(StackFrameInfo, flags, IsAsmJsWasm, IsAsmJsWasmBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsStrict, IsStrictBit::kShift)
-BOOL_GETTER(StackFrameInfo, flags, IsConstructor, IsConstructorBit::kShift)
BOOL_GETTER(StackFrameInfo, flags, IsAsmJsAtNumberConversion,
IsAsmJsAtNumberConversionBit::kShift)
+#endif // V8_ENABLE_WEBASSEMBLY
+BOOL_GETTER(StackFrameInfo, flags, IsStrict, IsStrictBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsConstructor, IsConstructorBit::kShift)
BOOL_GETTER(StackFrameInfo, flags, IsAsync, IsAsyncBit::kShift)
} // namespace internal
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index dff2e8e7ecc..ef1135acbed 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -38,11 +38,17 @@ bool StackFrameInfo::IsEval() const {
}
bool StackFrameInfo::IsUserJavaScript() const {
- return !IsWasm() && GetSharedFunctionInfo().IsUserJavaScript();
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsWasm()) return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return GetSharedFunctionInfo().IsUserJavaScript();
}
bool StackFrameInfo::IsMethodCall() const {
- return !IsWasm() && !IsToplevel() && !IsConstructor();
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsWasm()) return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+ return !IsToplevel() && !IsConstructor();
}
bool StackFrameInfo::IsToplevel() const {
@@ -53,9 +59,11 @@ bool StackFrameInfo::IsToplevel() const {
// static
int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
return 1;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<Script> script;
if (GetScript(isolate, info).ToHandle(&script)) {
int position = GetSourcePosition(info);
@@ -68,9 +76,11 @@ int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
int position = GetSourcePosition(info);
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
return position + 1;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<Script> script;
if (GetScript(isolate, info).ToHandle(&script)) {
return Script::GetColumnNumber(script, position) + 1;
@@ -81,47 +91,53 @@ int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
// static
int StackFrameInfo::GetEnclosingLineNumber(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
return 1;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<Script> script;
- if (GetScript(isolate, info).ToHandle(&script)) {
- int position;
- if (info->IsAsmJsWasm()) {
- auto module = info->GetWasmInstance().module();
- auto func_index = info->GetWasmFunctionIndex();
- position = wasm::GetSourcePosition(module, func_index, 0,
- info->IsAsmJsAtNumberConversion());
- } else {
- position = info->GetSharedFunctionInfo().function_token_position();
- }
+ if (!GetScript(isolate, info).ToHandle(&script)) {
+ return Message::kNoLineNumberInfo;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (info->IsAsmJsWasm()) {
+ auto module = info->GetWasmInstance().module();
+ auto func_index = info->GetWasmFunctionIndex();
+ int position = wasm::GetSourcePosition(module, func_index, 0,
+ info->IsAsmJsAtNumberConversion());
return Script::GetLineNumber(script, position) + 1;
}
- return Message::kNoLineNumberInfo;
+#endif // V8_ENABLE_WEBASSEMBLY
+ int position = info->GetSharedFunctionInfo().function_token_position();
+ return Script::GetLineNumber(script, position) + 1;
}
// static
int StackFrameInfo::GetEnclosingColumnNumber(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm() && !info->IsAsmJsWasm()) {
auto module = info->GetWasmInstance().module();
auto func_index = info->GetWasmFunctionIndex();
return GetWasmFunctionOffset(module, func_index);
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<Script> script;
- if (GetScript(isolate, info).ToHandle(&script)) {
- int position;
- if (info->IsAsmJsWasm()) {
- auto module = info->GetWasmInstance().module();
- auto func_index = info->GetWasmFunctionIndex();
- position = wasm::GetSourcePosition(module, func_index, 0,
- info->IsAsmJsAtNumberConversion());
- } else {
- position = info->GetSharedFunctionInfo().function_token_position();
- }
+ if (!GetScript(isolate, info).ToHandle(&script)) {
+ return Message::kNoColumnInfo;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (info->IsAsmJsWasm()) {
+ auto module = info->GetWasmInstance().module();
+ auto func_index = info->GetWasmFunctionIndex();
+ int position = wasm::GetSourcePosition(module, func_index, 0,
+ info->IsAsmJsAtNumberConversion());
return Script::GetColumnNumber(script, position) + 1;
}
- return Message::kNoColumnInfo;
+#endif // V8_ENABLE_WEBASSEMBLY
+ int position = info->GetSharedFunctionInfo().function_token_position();
+ return Script::GetColumnNumber(script, position) + 1;
}
int StackFrameInfo::GetScriptId() const {
@@ -145,6 +161,22 @@ Object StackFrameInfo::GetScriptNameOrSourceURL() const {
return ReadOnlyRoots(GetIsolate()).null_value();
}
+Object StackFrameInfo::GetScriptSource() const {
+ if (auto script = GetScript()) {
+ if (script->HasValidSource()) {
+ return script->source();
+ }
+ }
+ return ReadOnlyRoots(GetIsolate()).null_value();
+}
+
+Object StackFrameInfo::GetScriptSourceMappingURL() const {
+ if (auto script = GetScript()) {
+ return script->source_mapping_url();
+ }
+ return ReadOnlyRoots(GetIsolate()).null_value();
+}
+
namespace {
MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
@@ -213,6 +245,7 @@ Handle<PrimitiveHeapObject> StackFrameInfo::GetEvalOrigin(
// static
Handle<Object> StackFrameInfo::GetFunctionName(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
Handle<WasmModuleObject> module_object(
info->GetWasmInstance().module_object(), isolate);
@@ -223,12 +256,13 @@ Handle<Object> StackFrameInfo::GetFunctionName(Handle<StackFrameInfo> info) {
.ToHandle(&name)) {
return name;
}
- } else {
- Handle<JSFunction> function(JSFunction::cast(info->function()), isolate);
- Handle<String> name = JSFunction::GetDebugName(function);
- if (name->length() != 0) return name;
- if (info->IsEval()) return isolate->factory()->eval_string();
+ return isolate->factory()->null_value();
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ Handle<JSFunction> function(JSFunction::cast(info->function()), isolate);
+ Handle<String> name = JSFunction::GetDebugName(function);
+ if (name->length() != 0) return name;
+ if (info->IsEval()) return isolate->factory()->eval_string();
return isolate->factory()->null_value();
}
@@ -240,7 +274,7 @@ PrimitiveHeapObject InferMethodNameFromFastObject(Isolate* isolate,
PrimitiveHeapObject name) {
ReadOnlyRoots roots(isolate);
Map map = receiver.map();
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors(isolate);
for (auto i : map.IterateOwnDescriptors()) {
PrimitiveHeapObject key = descriptors.GetKey(i);
if (key.IsSymbol()) continue;
@@ -310,9 +344,9 @@ PrimitiveHeapObject InferMethodName(Isolate* isolate, JSReceiver receiver,
name = InferMethodNameFromDictionary(
isolate, JSGlobalObject::cast(object).global_dictionary(kAcquireLoad),
fun, name);
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
name = InferMethodNameFromDictionary(
- isolate, object.property_dictionary_ordered(), fun, name);
+ isolate, object.property_dictionary_swiss(), fun, name);
} else {
name = InferMethodNameFromDictionary(
isolate, object.property_dictionary(), fun, name);
@@ -328,7 +362,10 @@ PrimitiveHeapObject InferMethodName(Isolate* isolate, JSReceiver receiver,
Handle<Object> StackFrameInfo::GetMethodName(Handle<StackFrameInfo> info) {
Isolate* isolate = info->GetIsolate();
Handle<Object> receiver_or_instance(info->receiver_or_instance(), isolate);
- if (info->IsWasm() || receiver_or_instance->IsNullOrUndefined(isolate)) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (info->IsWasm()) return isolate->factory()->null_value();
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (receiver_or_instance->IsNullOrUndefined(isolate)) {
return isolate->factory()->null_value();
}
@@ -403,6 +440,7 @@ Handle<Object> StackFrameInfo::GetTypeName(Handle<StackFrameInfo> info) {
return JSReceiver::GetConstructorName(receiver);
}
+#if V8_ENABLE_WEBASSEMBLY
uint32_t StackFrameInfo::GetWasmFunctionIndex() const {
DCHECK(IsWasm());
return Smi::ToInt(Smi::cast(function()));
@@ -414,6 +452,22 @@ WasmInstanceObject StackFrameInfo::GetWasmInstance() const {
}
// static
+Handle<Object> StackFrameInfo::GetWasmModuleName(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm()) {
+ Handle<String> name;
+ auto module_object =
+ handle(info->GetWasmInstance().module_object(), isolate);
+ if (WasmModuleObject::GetModuleNameOrNull(isolate, module_object)
+ .ToHandle(&name)) {
+ return name;
+ }
+ }
+ return isolate->factory()->null_value();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// static
int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
if (info->flags() & kIsSourcePositionComputed) {
return info->code_offset_or_source_position();
@@ -431,6 +485,7 @@ int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
MessageLocation* location) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
int pos = GetSourcePosition(info);
Handle<Script> script(info->GetWasmInstance().module_object().script(),
@@ -438,6 +493,7 @@ bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
*location = MessageLocation(script, pos, pos + 1);
return true;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<SharedFunctionInfo> shared(info->GetSharedFunctionInfo(), isolate);
if (!shared->IsSubjectToDebugging()) return false;
@@ -459,6 +515,7 @@ bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
int StackFrameInfo::ComputeSourcePosition(Handle<StackFrameInfo> info,
int offset) {
Isolate* isolate = info->GetIsolate();
+#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
auto code_ref = Managed<wasm::GlobalWasmCodeRef>::cast(info->code_object());
int byte_offset = code_ref.get()->code()->GetSourcePositionBefore(offset);
@@ -467,37 +524,27 @@ int StackFrameInfo::ComputeSourcePosition(Handle<StackFrameInfo> info,
return wasm::GetSourcePosition(module, func_index, byte_offset,
info->IsAsmJsAtNumberConversion());
}
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<SharedFunctionInfo> shared(info->GetSharedFunctionInfo(), isolate);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
return AbstractCode::cast(info->code_object()).SourcePosition(offset);
}
-// static
-Handle<Object> StackFrameInfo::GetWasmModuleName(Handle<StackFrameInfo> info) {
- Isolate* isolate = info->GetIsolate();
- if (info->IsWasm()) {
- Handle<String> name;
- auto module_object =
- handle(info->GetWasmInstance().module_object(), isolate);
- if (WasmModuleObject::GetModuleNameOrNull(isolate, module_object)
- .ToHandle(&name)) {
- return name;
- }
- }
- return isolate->factory()->null_value();
-}
-
base::Optional<Script> StackFrameInfo::GetScript() const {
+#if V8_ENABLE_WEBASSEMBLY
if (IsWasm()) {
return GetWasmInstance().module_object().script();
}
+#endif // V8_ENABLE_WEBASSEMBLY
Object script = GetSharedFunctionInfo().script();
if (script.IsScript()) return Script::cast(script);
return base::nullopt;
}
SharedFunctionInfo StackFrameInfo::GetSharedFunctionInfo() const {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(!IsWasm());
+#endif // V8_ENABLE_WEBASSEMBLY
return JSFunction::cast(function()).shared();
}
@@ -659,6 +706,7 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
builder->AppendCString(")");
}
+#if V8_ENABLE_WEBASSEMBLY
bool IsAnonymousWasmScript(Isolate* isolate, Handle<Object> url) {
Handle<String> prefix =
isolate->factory()->NewStringFromStaticChars("wasm://wasm/");
@@ -703,19 +751,19 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
if (has_name) builder->AppendCString(")");
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace
void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
- // Ordering here is important, as AsmJs frames are also marked as Wasm.
- if (frame->IsAsmJsWasm()) {
- SerializeJSStackFrame(isolate, frame, builder);
- } else if (frame->IsWasm()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (frame->IsWasm() && !frame->IsAsmJsWasm()) {
SerializeWasmStackFrame(isolate, frame, builder);
- } else {
- SerializeJSStackFrame(isolate, frame, builder);
+ return;
}
+#endif // V8_ENABLE_WEBASSEMBLY
+ SerializeJSStackFrame(isolate, frame, builder);
}
MaybeHandle<String> SerializeStackFrameInfo(Isolate* isolate,
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 941b774f45b..783bbd33eb7 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -24,11 +24,14 @@ class StackFrameInfo
public:
NEVER_READ_ONLY_SPACE
+#if V8_ENABLE_WEBASSEMBLY
inline bool IsWasm() const;
inline bool IsAsmJsWasm() const;
+ inline bool IsAsmJsAtNumberConversion() const;
+#endif // V8_ENABLE_WEBASSEMBLY
+
inline bool IsStrict() const;
inline bool IsConstructor() const;
- inline bool IsAsmJsAtNumberConversion() const;
inline bool IsAsync() const;
bool IsEval() const;
bool IsUserJavaScript() const;
@@ -56,16 +59,20 @@ class StackFrameInfo
int GetScriptId() const;
Object GetScriptName() const;
Object GetScriptNameOrSourceURL() const;
+ Object GetScriptSource() const;
+ Object GetScriptSourceMappingURL() const;
static Handle<PrimitiveHeapObject> GetEvalOrigin(Handle<StackFrameInfo> info);
static Handle<Object> GetFunctionName(Handle<StackFrameInfo> info);
static Handle<Object> GetMethodName(Handle<StackFrameInfo> info);
static Handle<Object> GetTypeName(Handle<StackFrameInfo> info);
+#if V8_ENABLE_WEBASSEMBLY
// These methods are only valid for Wasm and asm.js Wasm frames.
uint32_t GetWasmFunctionIndex() const;
WasmInstanceObject GetWasmInstance() const;
static Handle<Object> GetWasmModuleName(Handle<StackFrameInfo> info);
+#endif // V8_ENABLE_WEBASSEMBLY
// Returns the 0-based source position, which is the offset into the
// Script in case of JavaScript and Asm.js, and the bytecode offset
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 5be0141ab83..912109b2e0a 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -9,6 +9,7 @@
#include "src/common/external-pointer-inl.h"
#include "src/common/external-pointer.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/numbers/conversions-inl.h"
@@ -38,14 +39,15 @@ class V8_NODISCARD SharedStringAccessGuardIfNeeded {
// from a background thread.
explicit SharedStringAccessGuardIfNeeded(LocalIsolate* local_isolate) {
if (IsNeeded(local_isolate)) {
- mutex_guard.emplace(local_isolate->string_access());
+ mutex_guard.emplace(local_isolate->internalized_string_access());
}
}
// Slow version which gets the isolate from the String.
explicit SharedStringAccessGuardIfNeeded(String str) {
Isolate* isolate = GetIsolateIfNeeded(str);
- if (isolate != nullptr) mutex_guard.emplace(isolate->string_access());
+ if (isolate != nullptr)
+ mutex_guard.emplace(isolate->internalized_string_access());
}
static SharedStringAccessGuardIfNeeded NotNeeded() {
@@ -132,49 +134,51 @@ StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
-bool StringShape::IsInternalized() {
+bool StringShape::IsInternalized() const {
DCHECK(valid());
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
-bool StringShape::IsCons() {
+bool StringShape::IsCons() const {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
-bool StringShape::IsThin() {
+bool StringShape::IsThin() const {
return (type_ & kStringRepresentationMask) == kThinStringTag;
}
-bool StringShape::IsSliced() {
+bool StringShape::IsSliced() const {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
-bool StringShape::IsIndirect() {
+bool StringShape::IsIndirect() const {
return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
}
-bool StringShape::IsExternal() {
+bool StringShape::IsExternal() const {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
-bool StringShape::IsSequential() {
+bool StringShape::IsSequential() const {
return (type_ & kStringRepresentationMask) == kSeqStringTag;
}
-bool StringShape::IsUncachedExternal() {
+bool StringShape::IsUncachedExternal() const {
return (type_ & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
-StringRepresentationTag StringShape::representation_tag() {
+StringRepresentationTag StringShape::representation_tag() const {
uint32_t tag = (type_ & kStringRepresentationMask);
return static_cast<StringRepresentationTag>(tag);
}
-uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
+uint32_t StringShape::encoding_tag() const {
+ return type_ & kStringEncodingMask;
+}
-uint32_t StringShape::full_representation_tag() {
+uint32_t StringShape::full_representation_tag() const {
return (type_ & (kStringRepresentationMask | kStringEncodingMask));
}
@@ -184,15 +188,15 @@ STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
-bool StringShape::IsSequentialOneByte() {
+bool StringShape::IsSequentialOneByte() const {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
-bool StringShape::IsSequentialTwoByte() {
+bool StringShape::IsSequentialTwoByte() const {
return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
}
-bool StringShape::IsExternalOneByte() {
+bool StringShape::IsExternalOneByte() const {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
@@ -201,7 +205,7 @@ STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
-bool StringShape::IsExternalTwoByte() {
+bool StringShape::IsExternalTwoByte() const {
return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
}
@@ -270,12 +274,12 @@ inline TResult StringShape::DispatchToSpecificType(String str,
}
DEF_GETTER(String, IsOneByteRepresentation, bool) {
- uint32_t type = map(isolate).instance_type();
+ uint32_t type = map(cage_base).instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
DEF_GETTER(String, IsTwoByteRepresentation, bool) {
- uint32_t type = map(isolate).instance_type();
+ uint32_t type = map(cage_base).instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
@@ -297,7 +301,7 @@ bool String::IsOneByteRepresentationUnderneath(String string) {
}
}
-uc32 FlatStringReader::Get(int index) {
+uc32 FlatStringReader::Get(int index) const {
if (is_one_byte_) {
return Get<uint8_t>(index);
} else {
@@ -306,7 +310,7 @@ uc32 FlatStringReader::Get(int index) {
}
template <typename Char>
-Char FlatStringReader::Get(int index) {
+Char FlatStringReader::Get(int index) const {
DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
DCHECK(0 <= index && index < length_);
if (sizeof(Char) == 1) {
@@ -400,7 +404,8 @@ class SeqSubStringKey final : public StringTableKey {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*string_));
DisallowGarbageCollection no_gc;
return string.IsEqualTo<String::EqualityType::kNoLengthCheck>(
- Vector<const Char>(string_->GetChars(no_gc) + from_, length()));
+ Vector<const Char>(string_->GetChars(no_gc) + from_, length()),
+ isolate);
}
Handle<String> AsHandle(Isolate* isolate) {
@@ -431,7 +436,7 @@ class SeqSubStringKey final : public StringTableKey {
using SeqOneByteSubStringKey = SeqSubStringKey<SeqOneByteString>;
using SeqTwoByteSubStringKey = SeqSubStringKey<SeqTwoByteString>;
-bool String::Equals(String other) {
+bool String::Equals(String other) const {
if (other == *this) return true;
if (this->IsInternalizedString() && other.IsInternalizedString()) {
return false;
@@ -439,6 +444,7 @@ bool String::Equals(String other) {
return SlowEquals(other);
}
+// static
bool String::Equals(Isolate* isolate, Handle<String> one, Handle<String> two) {
if (one.is_identical_to(two)) return true;
if (one->IsInternalizedString() && two->IsInternalizedString()) {
@@ -450,19 +456,26 @@ bool String::Equals(Isolate* isolate, Handle<String> one, Handle<String> two) {
template <String::EqualityType kEqType, typename Char>
bool String::IsEqualTo(Vector<const Char> str, Isolate* isolate) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
- return IsEqualToImpl<kEqType>(str,
+ return IsEqualToImpl<kEqType>(str, isolate,
+ SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
+template <String::EqualityType kEqType, typename Char>
+bool String::IsEqualTo(Vector<const Char> str) const {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return IsEqualToImpl<kEqType>(str, GetPtrComprCageBase(*this),
SharedStringAccessGuardIfNeeded::NotNeeded());
}
template <String::EqualityType kEqType, typename Char>
bool String::IsEqualTo(Vector<const Char> str, LocalIsolate* isolate) const {
SharedStringAccessGuardIfNeeded access_guard(isolate);
- return IsEqualToImpl<kEqType>(str, access_guard);
+ return IsEqualToImpl<kEqType>(str, isolate, access_guard);
}
template <String::EqualityType kEqType, typename Char>
bool String::IsEqualToImpl(
- Vector<const Char> str,
+ Vector<const Char> str, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const {
size_t len = str.size();
switch (kEqType) {
@@ -479,66 +492,93 @@ bool String::IsEqualToImpl(
DisallowGarbageCollection no_gc;
- class IsEqualToDispatcher : public AllStatic {
- public:
- static inline bool HandleSeqOneByteString(
- SeqOneByteString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- return CompareCharsEqual(str.GetChars(no_gc, access_guard), data, len);
- }
- static inline bool HandleSeqTwoByteString(
- SeqTwoByteString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- return CompareCharsEqual(str.GetChars(no_gc, access_guard), data, len);
- }
- static inline bool HandleExternalOneByteString(
- ExternalOneByteString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- return CompareCharsEqual(str.GetChars(), data, len);
- }
- static inline bool HandleExternalTwoByteString(
- ExternalTwoByteString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- return CompareCharsEqual(str.GetChars(), data, len);
- }
- static inline bool HandleConsString(
- ConsString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- UNREACHABLE();
- }
- static inline bool HandleSlicedString(
- SlicedString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- UNREACHABLE();
- }
- static inline bool HandleThinString(
- ThinString str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- UNREACHABLE();
- }
- static inline bool HandleInvalidString(
- String str, const Char* data, size_t len,
- const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
- UNREACHABLE();
+ int slice_offset = 0;
+ String string = *this;
+ const Char* data = str.data();
+ while (true) {
+ int32_t type = string.map(cage_base).instance_type();
+ switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ case kSeqStringTag | kOneByteStringTag:
+ return CompareCharsEqual(
+ SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
+ slice_offset,
+ data, len);
+ case kSeqStringTag | kTwoByteStringTag:
+ return CompareCharsEqual(
+ SeqTwoByteString::cast(string).GetChars(no_gc, access_guard) +
+ slice_offset,
+ data, len);
+ case kExternalStringTag | kOneByteStringTag:
+ return CompareCharsEqual(
+ ExternalOneByteString::cast(string).GetChars() + slice_offset, data,
+ len);
+ case kExternalStringTag | kTwoByteStringTag:
+ return CompareCharsEqual(
+ ExternalTwoByteString::cast(string).GetChars() + slice_offset, data,
+ len);
+
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag: {
+ SlicedString slicedString = SlicedString::cast(string);
+ slice_offset += slicedString.offset();
+ string = slicedString.parent(cage_base);
+ continue;
+ }
+
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag: {
+ // The ConsString path is more complex and rare, so call out to an
+ // out-of-line handler.
+ return IsConsStringEqualToImpl<Char>(ConsString::cast(string),
+ slice_offset, str, cage_base,
+ access_guard);
+ }
+
+ case kThinStringTag | kOneByteStringTag:
+ case kThinStringTag | kTwoByteStringTag:
+ string = ThinString::cast(string).actual(cage_base);
+ continue;
+
+ default:
+ UNREACHABLE();
}
- };
+ }
+}
- return StringShape(*this).DispatchToSpecificType<IsEqualToDispatcher, bool>(
- *this, str.data(), len, no_gc, access_guard);
+// static
+template <typename Char>
+bool String::IsConsStringEqualToImpl(
+ ConsString string, int slice_offset, Vector<const Char> str,
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ // Already checked the len in IsEqualToImpl. Check GE rather than EQ in case
+ // this is a prefix check.
+ DCHECK_GE(string.length(), str.size());
+
+ ConsStringIterator iter(ConsString::cast(string), slice_offset);
+ Vector<const Char> remaining_str = str;
+ for (String segment = iter.Next(&slice_offset); !segment.is_null();
+ segment = iter.Next(&slice_offset)) {
+ // Compare the individual segment against the appropriate subvector of the
+ // remaining string.
+ size_t len = std::min<size_t>(segment.length(), remaining_str.size());
+ Vector<const Char> sub_str = remaining_str.SubVector(0, len);
+ if (!segment.IsEqualToImpl<EqualityType::kNoLengthCheck>(sub_str, cage_base,
+ access_guard)) {
+ return false;
+ }
+ remaining_str += len;
+ if (remaining_str.empty()) break;
+ }
+ DCHECK_EQ(remaining_str.data(), str.end());
+ DCHECK_EQ(remaining_str.size(), 0);
+ return true;
}
bool String::IsOneByteEqualTo(Vector<const char> str) { return IsEqualTo(str); }
template <typename Char>
-const Char* String::GetChars(const DisallowGarbageCollection& no_gc) {
+const Char* String::GetChars(const DisallowGarbageCollection& no_gc) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
return StringShape(*this).IsExternal()
? CharTraits<Char>::ExternalString::cast(*this).GetChars()
@@ -548,7 +588,7 @@ const Char* String::GetChars(const DisallowGarbageCollection& no_gc) {
template <typename Char>
const Char* String::GetChars(
const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
return StringShape(*this).IsExternal()
? CharTraits<Char>::ExternalString::cast(*this).GetChars()
: CharTraits<Char>::String::cast(*this).GetChars(no_gc,
@@ -579,17 +619,17 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
return string;
}
-uint16_t String::Get(int index, Isolate* isolate) {
+uint16_t String::Get(int index, Isolate* isolate) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
return GetImpl(index);
}
-uint16_t String::Get(int index, LocalIsolate* local_isolate) {
+uint16_t String::Get(int index, LocalIsolate* local_isolate) const {
SharedStringAccessGuardIfNeeded scope(local_isolate);
return GetImpl(index);
}
-uint16_t String::GetImpl(int index) {
+uint16_t String::GetImpl(int index) const {
DCHECK(index >= 0 && index < length());
class StringGetDispatcher : public AllStatic {
@@ -618,12 +658,12 @@ void String::Set(int index, uint16_t value) {
: SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
-bool String::IsFlat() {
+bool String::IsFlat() const {
if (!StringShape(*this).IsCons()) return true;
return ConsString::cast(*this).second().length() == 0;
}
-String String::GetUnderlying() {
+String String::GetUnderlying() const {
// Giving direct access to underlying string only makes sense if the
// wrapping string is already flattened.
DCHECK(this->IsFlat());
@@ -727,7 +767,7 @@ uint32_t String::ToValidIndex(Object number) {
return index;
}
-uint8_t SeqOneByteString::Get(int index) {
+uint8_t SeqOneByteString::Get(int index) const {
DCHECK(index >= 0 && index < length());
return ReadField<byte>(kHeaderSize + index * kCharSize);
}
@@ -737,11 +777,12 @@ void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
WriteField<byte>(kHeaderSize + index * kCharSize, static_cast<byte>(value));
}
-Address SeqOneByteString::GetCharsAddress() {
+Address SeqOneByteString::GetCharsAddress() const {
return field_address(kHeaderSize);
}
-uint8_t* SeqOneByteString::GetChars(const DisallowGarbageCollection& no_gc) {
+uint8_t* SeqOneByteString::GetChars(
+ const DisallowGarbageCollection& no_gc) const {
USE(no_gc);
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
return reinterpret_cast<uint8_t*>(GetCharsAddress());
@@ -749,17 +790,17 @@ uint8_t* SeqOneByteString::GetChars(const DisallowGarbageCollection& no_gc) {
uint8_t* SeqOneByteString::GetChars(
const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(no_gc);
USE(access_guard);
return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
-Address SeqTwoByteString::GetCharsAddress() {
+Address SeqTwoByteString::GetCharsAddress() const {
return field_address(kHeaderSize);
}
-uc16* SeqTwoByteString::GetChars(const DisallowGarbageCollection& no_gc) {
+uc16* SeqTwoByteString::GetChars(const DisallowGarbageCollection& no_gc) const {
USE(no_gc);
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
return reinterpret_cast<uc16*>(GetCharsAddress());
@@ -767,13 +808,13 @@ uc16* SeqTwoByteString::GetChars(const DisallowGarbageCollection& no_gc) {
uc16* SeqTwoByteString::GetChars(
const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard) {
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(no_gc);
USE(access_guard);
return reinterpret_cast<uc16*>(GetCharsAddress());
}
-uint16_t SeqTwoByteString::Get(int index) {
+uint16_t SeqTwoByteString::Get(int index) const {
DCHECK(index >= 0 && index < length());
return ReadField<uint16_t>(kHeaderSize + index * kShortSize);
}
@@ -806,7 +847,7 @@ Object ConsString::unchecked_second() {
}
DEF_GETTER(ThinString, unchecked_actual, HeapObject) {
- return TaggedField<HeapObject, kActualOffset>::load(isolate, *this);
+ return TaggedField<HeapObject, kActualOffset>::load(cage_base, *this);
}
bool ExternalString::is_uncached() const {
@@ -821,7 +862,7 @@ void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
}
DEF_GETTER(ExternalString, resource_as_address, Address) {
- return ReadExternalPointerField(kResourceOffset, isolate,
+ return ReadExternalPointerField(kResourceOffset, cage_base,
kExternalStringResourceTag);
}
@@ -869,7 +910,7 @@ DEF_GETTER(ExternalOneByteString, resource,
DEF_GETTER(ExternalOneByteString, mutable_resource,
ExternalOneByteString::Resource*) {
- return reinterpret_cast<Resource*>(resource_as_address(isolate));
+ return reinterpret_cast<Resource*>(resource_as_address(cage_base));
}
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
@@ -900,7 +941,7 @@ void ExternalOneByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint8_t* ExternalOneByteString::GetChars() {
+const uint8_t* ExternalOneByteString::GetChars() const {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
if (resource()->IsCacheable()) {
@@ -922,7 +963,7 @@ const uint8_t* ExternalOneByteString::GetChars() {
return reinterpret_cast<const uint8_t*>(resource()->data());
}
-uint8_t ExternalOneByteString::Get(int index) {
+uint8_t ExternalOneByteString::Get(int index) const {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
@@ -934,7 +975,7 @@ DEF_GETTER(ExternalTwoByteString, resource,
DEF_GETTER(ExternalTwoByteString, mutable_resource,
ExternalTwoByteString::Resource*) {
- return reinterpret_cast<Resource*>(resource_as_address(isolate));
+ return reinterpret_cast<Resource*>(resource_as_address(cage_base));
}
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
@@ -965,7 +1006,7 @@ void ExternalTwoByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint16_t* ExternalTwoByteString::GetChars() {
+const uint16_t* ExternalTwoByteString::GetChars() const {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
if (resource()->IsCacheable()) {
@@ -987,7 +1028,7 @@ const uint16_t* ExternalTwoByteString::GetChars() {
return resource()->data();
}
-uint16_t ExternalTwoByteString::Get(int index) {
+uint16_t ExternalTwoByteString::Get(int index) const {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index 8d5b44c6c59..a5493761162 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -91,15 +91,15 @@ bool KeyIsMatch(LocalIsolate* isolate, StringTableKey* key, String string) {
class StringTable::Data {
public:
static std::unique_ptr<Data> New(int capacity);
- static std::unique_ptr<Data> Resize(IsolateRoot isolate,
+ static std::unique_ptr<Data> Resize(PtrComprCageBase cage_base,
std::unique_ptr<Data> data, int capacity);
OffHeapObjectSlot slot(InternalIndex index) const {
return OffHeapObjectSlot(&elements_[index.as_uint32()]);
}
- Object Get(IsolateRoot isolate, InternalIndex index) const {
- return slot(index).Acquire_Load(isolate);
+ Object Get(PtrComprCageBase cage_base, InternalIndex index) const {
+ return slot(index).Acquire_Load(cage_base);
}
void Set(InternalIndex index, String entry) {
@@ -139,7 +139,8 @@ class StringTable::Data {
InternalIndex FindEntry(LocalIsolate* isolate, StringTableKey* key,
uint32_t hash) const;
- InternalIndex FindInsertionEntry(IsolateRoot isolate, uint32_t hash) const;
+ InternalIndex FindInsertionEntry(PtrComprCageBase cage_base,
+ uint32_t hash) const;
template <typename LocalIsolate, typename StringTableKey>
InternalIndex FindEntryOrInsertionEntry(LocalIsolate* isolate,
@@ -157,7 +158,7 @@ class StringTable::Data {
Data* PreviousData() { return previous_data_.get(); }
void DropPreviousData() { previous_data_.reset(); }
- void Print(IsolateRoot isolate) const;
+ void Print(PtrComprCageBase cage_base) const;
size_t GetCurrentMemoryUsage() const;
private:
@@ -224,7 +225,7 @@ std::unique_ptr<StringTable::Data> StringTable::Data::New(int capacity) {
}
std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
- IsolateRoot isolate, std::unique_ptr<Data> data, int capacity) {
+ PtrComprCageBase cage_base, std::unique_ptr<Data> data, int capacity) {
std::unique_ptr<Data> new_data(new (capacity) Data(capacity));
DCHECK_LT(data->number_of_elements(), new_data->capacity());
@@ -234,11 +235,12 @@ std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
// Rehash the elements.
for (InternalIndex i : InternalIndex::Range(data->capacity())) {
- Object element = data->Get(isolate, i);
+ Object element = data->Get(cage_base, i);
if (element == empty_element() || element == deleted_element()) continue;
String string = String::cast(element);
uint32_t hash = string.hash();
- InternalIndex insertion_index = new_data->FindInsertionEntry(isolate, hash);
+ InternalIndex insertion_index =
+ new_data->FindInsertionEntry(cage_base, hash);
new_data->Set(insertion_index, string);
}
new_data->number_of_elements_ = data->number_of_elements();
@@ -265,7 +267,7 @@ InternalIndex StringTable::Data::FindEntry(LocalIsolate* isolate,
}
}
-InternalIndex StringTable::Data::FindInsertionEntry(IsolateRoot isolate,
+InternalIndex StringTable::Data::FindInsertionEntry(PtrComprCageBase cage_base,
uint32_t hash) const {
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -273,7 +275,7 @@ InternalIndex StringTable::Data::FindInsertionEntry(IsolateRoot isolate,
entry = NextProbe(entry, count++, capacity_)) {
// TODO(leszeks): Consider delaying the decompression until after the
// comparisons against empty/deleted.
- Object element = Get(isolate, entry);
+ Object element = Get(cage_base, entry);
if (element == empty_element() || element == deleted_element())
return entry;
}
@@ -314,11 +316,12 @@ void StringTable::Data::IterateElements(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kStringTable, nullptr, first_slot, end_slot);
}
-void StringTable::Data::Print(IsolateRoot isolate) const {
+void StringTable::Data::Print(PtrComprCageBase cage_base) const {
OFStream os(stdout);
os << "StringTable {" << std::endl;
for (InternalIndex i : InternalIndex::Range(capacity_)) {
- os << " " << i.as_uint32() << ": " << Brief(Get(isolate, i)) << std::endl;
+ os << " " << i.as_uint32() << ": " << Brief(Get(cage_base, i))
+ << std::endl;
}
os << "}" << std::endl;
}
@@ -530,7 +533,7 @@ template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
template Handle<String> StringTable::LookupKey(Isolate* isolate,
StringTableInsertionKey* key);
-StringTable::Data* StringTable::EnsureCapacity(IsolateRoot isolate,
+StringTable::Data* StringTable::EnsureCapacity(PtrComprCageBase cage_base,
int additional_elements) {
// This call is only allowed while the write mutex is held.
write_mutex_.AssertHeld();
@@ -560,7 +563,7 @@ StringTable::Data* StringTable::EnsureCapacity(IsolateRoot isolate,
if (new_capacity != -1) {
std::unique_ptr<Data> new_data =
- Data::Resize(isolate, std::unique_ptr<Data>(data), new_capacity);
+ Data::Resize(cage_base, std::unique_ptr<Data>(data), new_capacity);
// `new_data` is the new owner of `data`.
DCHECK_EQ(new_data->PreviousData(), data);
// Release-store the new data pointer as `data_`, so that it can be
@@ -669,8 +672,8 @@ Address StringTable::TryStringToIndexOrLookupExisting(Isolate* isolate,
isolate, string, source, start);
}
-void StringTable::Print(IsolateRoot isolate) const {
- data_.load(std::memory_order_acquire)->Print(isolate);
+void StringTable::Print(PtrComprCageBase cage_base) const {
+ data_.load(std::memory_order_acquire)->Print(cage_base);
}
size_t StringTable::GetCurrentMemoryUsage() const {
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index baf9518ea3f..fe87ce15f2b 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE StringTable {
static Address TryStringToIndexOrLookupExisting(Isolate* isolate,
Address raw_string);
- void Print(IsolateRoot isolate) const;
+ void Print(PtrComprCageBase cage_base) const;
size_t GetCurrentMemoryUsage() const;
// The following methods must be called either while holding the write lock,
@@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE StringTable {
private:
class Data;
- Data* EnsureCapacity(IsolateRoot isolate, int additional_elements);
+ Data* EnsureCapacity(PtrComprCageBase cage_base, int additional_elements);
std::atomic<Data*> data_;
// Write mutex is mutable so that readers of concurrently mutated values (e.g.
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index b9413618ee5..ffa1be3aa34 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -6,6 +6,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
@@ -171,7 +172,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate->string_access());
+ isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -249,7 +250,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate->string_access());
+ isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -773,7 +774,7 @@ template Handle<FixedArray> String::CalculateLineEnds(LocalIsolate* isolate,
Handle<String> src,
bool include_ending_line);
-bool String::SlowEquals(String other) {
+bool String::SlowEquals(String other) const {
DisallowGarbageCollection no_gc;
// Fast check: negative check with lengths.
int len = length();
@@ -825,6 +826,7 @@ bool String::SlowEquals(String other) {
return comparator.Equals(*this, other);
}
+// static
bool String::SlowEquals(Isolate* isolate, Handle<String> one,
Handle<String> two) {
// Fast check: negative check with lengths.
@@ -1285,7 +1287,10 @@ Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
}
bool String::HasOneBytePrefix(Vector<const char> str) {
- return IsEqualTo<EqualityType::kPrefix>(str);
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return IsEqualToImpl<EqualityType::kPrefix>(
+ str, GetPtrComprCageBase(*this),
+ SharedStringAccessGuardIfNeeded::NotNeeded());
}
namespace {
@@ -1437,7 +1442,7 @@ void SeqTwoByteString::clear_padding() {
SizeFor(length()) - data_size);
}
-uint16_t ConsString::Get(int index) {
+uint16_t ConsString::Get(int index) const {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
@@ -1466,9 +1471,11 @@ uint16_t ConsString::Get(int index) {
UNREACHABLE();
}
-uint16_t ThinString::Get(int index) { return actual().Get(index); }
+uint16_t ThinString::Get(int index) const { return actual().Get(index); }
-uint16_t SlicedString::Get(int index) { return parent().Get(offset() + index); }
+uint16_t SlicedString::Get(int index) const {
+ return parent().Get(offset() + index);
+}
int ExternalString::ExternalPayloadSize() const {
int length_multiplier = IsTwoByteRepresentation() ? i::kShortSize : kCharSize;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 5f235fa3816..b8d47b5551f 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -44,25 +44,25 @@ class StringShape {
inline explicit StringShape(const String s);
inline explicit StringShape(Map s);
inline explicit StringShape(InstanceType t);
- inline bool IsSequential();
- inline bool IsExternal();
- inline bool IsCons();
- inline bool IsSliced();
- inline bool IsThin();
- inline bool IsIndirect();
- inline bool IsUncachedExternal();
- inline bool IsExternalOneByte();
- inline bool IsExternalTwoByte();
- inline bool IsSequentialOneByte();
- inline bool IsSequentialTwoByte();
- inline bool IsInternalized();
- inline StringRepresentationTag representation_tag();
- inline uint32_t encoding_tag();
- inline uint32_t full_representation_tag();
+ inline bool IsSequential() const;
+ inline bool IsExternal() const;
+ inline bool IsCons() const;
+ inline bool IsSliced() const;
+ inline bool IsThin() const;
+ inline bool IsIndirect() const;
+ inline bool IsUncachedExternal() const;
+ inline bool IsExternalOneByte() const;
+ inline bool IsExternalTwoByte() const;
+ inline bool IsSequentialOneByte() const;
+ inline bool IsSequentialTwoByte() const;
+ inline bool IsInternalized() const;
+ inline StringRepresentationTag representation_tag() const;
+ inline uint32_t encoding_tag() const;
+ inline uint32_t full_representation_tag() const;
#ifdef DEBUG
- inline uint32_t type() { return type_; }
+ inline uint32_t type() const { return type_; }
inline void invalidate() { valid_ = false; }
- inline bool valid() { return valid_; }
+ inline bool valid() const { return valid_; }
#else
inline void invalidate() {}
#endif
@@ -181,13 +181,13 @@ class String : public TorqueGeneratedString<String, Name> {
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
template <typename Char>
- inline const Char* GetChars(const DisallowGarbageCollection& no_gc);
+ inline const Char* GetChars(const DisallowGarbageCollection& no_gc) const;
// Get chars from sequential or external strings.
template <typename Char>
inline const Char* GetChars(
const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard);
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Returns the address of the character at an offset into this string.
// Requires: this->IsFlat()
@@ -217,8 +217,8 @@ class String : public TorqueGeneratedString<String, Name> {
// to this method are not efficient unless the string is flat.
// If it is called from a background thread, the LocalIsolate version should
// be used.
- V8_INLINE uint16_t Get(int index, Isolate* isolate = nullptr);
- V8_INLINE uint16_t Get(int index, LocalIsolate* local_isolate);
+ V8_INLINE uint16_t Get(int index, Isolate* isolate = nullptr) const;
+ V8_INLINE uint16_t Get(int index, LocalIsolate* local_isolate) const;
// ES6 section 7.1.3.1 ToNumber Applied to the String Type
static Handle<Object> ToNumber(Isolate* isolate, Handle<String> subject);
@@ -253,7 +253,7 @@ class String : public TorqueGeneratedString<String, Name> {
// Returns the parent of a sliced string or first part of a flat cons string.
// Requires: StringShape(this).IsIndirect() && this->IsFlat()
- inline String GetUnderlying();
+ inline String GetUnderlying() const;
// String relational comparison, implemented according to ES6 section 7.2.11
// Abstract Relational Comparison (step 5): The comparison of Strings uses a
@@ -314,7 +314,7 @@ class String : public TorqueGeneratedString<String, Name> {
int start_index = 0);
// String equality operations.
- inline bool Equals(String other);
+ inline bool Equals(String other) const;
inline static bool Equals(Isolate* isolate, Handle<String> one,
Handle<String> two);
@@ -326,8 +326,15 @@ class String : public TorqueGeneratedString<String, Name> {
// The Isolate is passed as "evidence" that this call is on the main thread,
// and to distiguish from the LocalIsolate overload.
template <EqualityType kEqType = EqualityType::kWholeString, typename Char>
- inline bool IsEqualTo(Vector<const Char> str,
- Isolate* isolate = nullptr) const;
+ inline bool IsEqualTo(Vector<const Char> str, Isolate* isolate) const;
+
+ // Check if this string matches the given vector of characters, either as a
+ // whole string or just a prefix.
+ //
+ // This is main-thread only, like the Isolate* overload, but additionally
+ // computes the PtrComprCageBase for IsEqualToImpl.
+ template <EqualityType kEqType = EqualityType::kWholeString, typename Char>
+ inline bool IsEqualTo(Vector<const Char> str) const;
// Check if this string matches the given vector of characters, either as a
// whole string or just a prefix.
@@ -412,7 +419,7 @@ class String : public TorqueGeneratedString<String, Name> {
DECL_PRINTER(String)
DECL_VERIFIER(String)
- inline bool IsFlat();
+ inline bool IsFlat() const;
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
@@ -534,20 +541,27 @@ class String : public TorqueGeneratedString<String, Name> {
friend class InternalizedStringKey;
// Implementation of the Get() public methods. Do not use directly.
- V8_INLINE uint16_t GetImpl(int index);
+ V8_INLINE uint16_t GetImpl(int index) const;
// Implementation of the IsEqualTo() public methods. Do not use directly.
template <EqualityType kEqType, typename Char>
V8_INLINE bool IsEqualToImpl(
- Vector<const Char> str,
+ Vector<const Char> str, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
+ // Out-of-line IsEqualToImpl for ConsString.
+ template <typename Char>
+ V8_NOINLINE static bool IsConsStringEqualToImpl(
+ ConsString string, int slice_offset, Vector<const Char> str,
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
V8_EXPORT_PRIVATE static Handle<String> SlowFlatten(
Isolate* isolate, Handle<ConsString> cons, AllocationType allocation);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
- V8_EXPORT_PRIVATE bool SlowEquals(String other);
+ V8_EXPORT_PRIVATE bool SlowEquals(String other) const;
V8_EXPORT_PRIVATE static bool SlowEquals(Isolate* isolate, Handle<String> one,
Handle<String> two);
@@ -619,20 +633,21 @@ class SeqOneByteString
using Char = uint8_t;
// Dispatched behavior.
- inline uint8_t Get(int index);
+ inline uint8_t Get(int index) const;
inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
- inline Address GetCharsAddress();
+ inline Address GetCharsAddress() const;
// Get a pointer to the characters of the string. May only be called when a
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
- inline uint8_t* GetChars(const DisallowGarbageCollection& no_gc);
+ inline uint8_t* GetChars(const DisallowGarbageCollection& no_gc) const;
// Get a pointer to the characters of the string.
- inline uint8_t* GetChars(const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard);
+ inline uint8_t* GetChars(
+ const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -664,20 +679,21 @@ class SeqTwoByteString
using Char = uint16_t;
// Dispatched behavior.
- inline uint16_t Get(int index);
+ inline uint16_t Get(int index) const;
inline void SeqTwoByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
- inline Address GetCharsAddress();
+ inline Address GetCharsAddress() const;
// Get a pointer to the characters of the string. May only be called when a
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
- inline uc16* GetChars(const DisallowGarbageCollection& no_gc);
+ inline uc16* GetChars(const DisallowGarbageCollection& no_gc) const;
// Get a pointer to the characters of the string.
- inline uc16* GetChars(const DisallowGarbageCollection& no_gc,
- const SharedStringAccessGuardIfNeeded& access_guard);
+ inline uc16* GetChars(
+ const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -720,7 +736,7 @@ class ConsString : public TorqueGeneratedConsString<ConsString, String> {
inline Object unchecked_second();
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t Get(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index) const;
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -743,7 +759,7 @@ class ThinString : public TorqueGeneratedThinString<ThinString, String> {
public:
DECL_GETTER(unchecked_actual, HeapObject)
- V8_EXPORT_PRIVATE uint16_t Get(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index) const;
DECL_VERIFIER(ThinString)
@@ -769,7 +785,7 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
inline void set_parent(String parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t Get(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index) const;
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -848,10 +864,10 @@ class ExternalOneByteString : public ExternalString {
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint8_t* GetChars();
+ inline const uint8_t* GetChars() const;
// Dispatched behavior.
- inline uint8_t Get(int index);
+ inline uint8_t Get(int index) const;
DECL_CAST(ExternalOneByteString)
@@ -894,10 +910,10 @@ class ExternalTwoByteString : public ExternalString {
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint16_t* GetChars();
+ inline const uint16_t* GetChars() const;
// Dispatched behavior.
- inline uint16_t Get(int index);
+ inline uint16_t Get(int index) const;
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
@@ -927,9 +943,9 @@ class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
public:
FlatStringReader(Isolate* isolate, Handle<String> str);
void PostGarbageCollection() override;
- inline uc32 Get(int index);
+ inline uc32 Get(int index) const;
template <typename Char>
- inline Char Get(int index);
+ inline Char Get(int index) const;
int length() { return length_; }
private:
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.h b/deps/v8/src/objects/swiss-hash-table-helpers.h
index db4b2d807e9..98a1abd39df 100644
--- a/deps/v8/src/objects/swiss-hash-table-helpers.h
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.h
@@ -202,6 +202,11 @@ static_assert(kDeleted == -2,
// Table implementations rely on this being 7.
static constexpr int kH2Bits = 7;
+static constexpr int kNotFullMask = (1 << kH2Bits);
+static_assert(
+ kEmpty & kDeleted & kSentinel & kNotFullMask,
+ "Special markers need to have the MSB to make checking for them efficient");
+
// Extracts H1 from the given overall hash, which means discarding the lowest 7
// bits of the overall hash. H1 is used to determine the first group to probe.
inline static uint32_t H1(uint32_t hash) { return (hash >> kH2Bits); }
@@ -293,6 +298,9 @@ struct GroupPortableImpl {
: ctrl(base::ReadLittleEndianValue<uint64_t>(
reinterpret_cast<uintptr_t>(const_cast<ctrl_t*>(pos)))) {}
+ static constexpr uint64_t kMsbs = 0x8080808080808080ULL;
+ static constexpr uint64_t kLsbs = 0x0101010101010101ULL;
+
// Returns a bitmask representing the positions of slots that match |hash|.
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
// For the technique, see:
@@ -308,22 +316,18 @@ struct GroupPortableImpl {
// v = 0x1716151413121110
// hash = 0x12
// retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl ^ (lsbs * hash);
- return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ auto x = ctrl ^ (kLsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - kLsbs) & ~x & kMsbs);
}
// Returns a bitmask representing the positions of empty slots.
BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & kMsbs);
}
// Returns a bitmask representing the positions of empty or deleted slots.
BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & kMsbs);
}
// Returns the number of trailing empty or deleted elements in the group.
@@ -336,10 +340,8 @@ struct GroupPortableImpl {
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl & msbs;
- auto res = (~x + (x >> 7)) & ~lsbs;
+ auto x = ctrl & kMsbs;
+ auto res = (~x + (x >> 7)) & ~kLsbs;
base::WriteLittleEndianValue(reinterpret_cast<uint64_t*>(dst), res);
}
@@ -347,11 +349,29 @@ struct GroupPortableImpl {
};
// Determine which Group implementation SwissNameDictionary uses.
+#if defined(V8_ENABLE_SWISS_NAME_DICTIONARY) && DEBUG
+// TODO(v8:11388) If v8_enable_swiss_name_dictionary is enabled, we are supposed
+// to use SwissNameDictionary as the dictionary backing store. If we want to use
+// the SIMD version of SwissNameDictionary, that would require us to compile SSE
+// instructions into the snapshot that exceed the minimum requirements for V8
+// SSE support. Therefore, this fails a DCHECK. However, given the experimental
+// nature of v8_enable_swiss_name_dictionary mode, we only except this to be run
+// by developers/bots, that always have the necessary instructions. This means
+// that if v8_enable_swiss_name_dictionary is enabled and debug mode isn't, we
+// ignore the DCHECK that would fail in debug mode. However, if both
+// v8_enable_swiss_name_dictionary and debug mode are enabled, we must fallback
+// to the non-SSE implementation. Given that V8 requires SSE2, there should be a
+// solution that doesn't require the workaround present here. Instead, the
+// backend should only use SSE2 when compiling the SIMD version of
+// SwissNameDictionary into the builtin.
+using Group = GroupPortableImpl;
+#else
#if SWISS_TABLE_HAVE_SSE2
using Group = GroupSse2Impl;
#else
using Group = GroupPortableImpl;
#endif
+#endif
#undef SWISS_TABLE_HAVE_SSE2
#undef SWISS_TABLE_HAVE_SSE3
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.tq b/deps/v8/src/objects/swiss-hash-table-helpers.tq
new file mode 100644
index 00000000000..627fde72979
--- /dev/null
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.tq
@@ -0,0 +1,174 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note that most structs and macros in this file have 1:1 C++ counterparts in
+// the corresponding .h file.
+
+#include 'src/objects/swiss-hash-table-helpers.h'
+
+namespace swiss_table {
+
+const kGroupWidth:
+ constexpr int32 generates 'swiss_table::Group::kWidth';
+
+const kUseSIMD:
+ constexpr bool generates 'swiss_table::Group::kWidth == 16';
+
+namespace ctrl {
+const kEmpty: constexpr uint8
+ generates 'static_cast<uint8_t>(swiss_table::Ctrl::kEmpty)';
+
+const kDeleted: constexpr uint8
+ generates 'static_cast<uint8_t>(swiss_table::Ctrl::kDeleted)';
+}
+
+const kH2Bits: constexpr int32 generates 'swiss_table::kH2Bits';
+const kH2Mask:
+ constexpr uint32 generates '((1 << swiss_table::kH2Bits) - 1)';
+
+extern macro LoadSwissNameDictionaryCtrlTableGroup(intptr): uint64;
+
+// Counterpart to swiss_table::ProbeSequence in C++ implementation.
+struct ProbeSequence {
+ macro Next() {
+ this.index = this.index + Unsigned(FromConstexpr<int32>(kGroupWidth));
+ this.offset = (this.offset + this.index) & this.mask;
+ }
+
+ macro Offset(index: int32): uint32 {
+ return (this.offset + Unsigned(index)) & this.mask;
+ }
+
+ mask: uint32;
+ offset: uint32;
+ index: uint32;
+}
+
+macro ClearLowestSetBit<T: type>(value: T): T {
+ return value & (value - FromConstexpr<T>(1));
+}
+
+const kByteMaskShift: uint64 = 3;
+
+// Counterpart to swiss_table::BitMask<uint64_t, kWidth, 3>, as used by
+// swiss_table::GroupPortableImpl in C++ implementation.
+struct ByteMask {
+ macro HasBitsSet(): bool {
+ return this.mask != FromConstexpr<uint64>(0);
+ }
+
+ macro LowestBitSet(): int32 {
+ return Convert<int32>(
+ CountTrailingZeros64(this.mask) >> Signed(kByteMaskShift));
+ }
+
+ // Counterpart to operator++() in C++ version.
+ macro ClearLowestSetBit() {
+ this.mask = ClearLowestSetBit<uint64>(this.mask);
+ }
+
+ mask: uint64;
+}
+
+// Counterpart to swiss_table::BitMask<uint32t, kWidth, 0>, as used by
+// swiss_table::GroupSse2Impl in C++ implementation.
+struct BitMask {
+ macro HasBitsSet(): bool {
+ return this.mask != FromConstexpr<uint32>(0);
+ }
+
+ macro LowestBitSet(): int32 {
+ return Convert<int32>(CountTrailingZeros32(this.mask));
+ }
+
+ // Counterpart to operator++() in C++ version.
+ macro ClearLowestSetBit() {
+ this.mask = ClearLowestSetBit<uint32>(this.mask);
+ }
+
+ mask: uint32;
+}
+
+macro H1(hash: uint32): uint32 {
+ return hash >>> Unsigned(FromConstexpr<int32>(kH2Bits));
+}
+
+macro H2(hash: uint32): uint32 {
+ return hash & kH2Mask;
+}
+
+const kLsbs: constexpr uint64
+ generates 'swiss_table::GroupPortableImpl::kLsbs';
+const kMsbs: constexpr uint64
+ generates 'swiss_table::GroupPortableImpl::kMsbs';
+
+// Counterpart to swiss_table::GroupPortableImpl in C++.
+struct GroupPortableImpl {
+ macro Match(h2: uint32): ByteMask {
+ const x = Word64Xor(this.ctrl, (kLsbs * Convert<uint64>(h2)));
+ const result = (x - kLsbs) & ~x & kMsbs;
+ return ByteMask{mask: result};
+ }
+
+ macro MatchEmpty(): ByteMask {
+ const result = ((this.ctrl & (~this.ctrl << 6)) & kMsbs);
+ return ByteMask{mask: result};
+ }
+
+ const ctrl: uint64;
+}
+
+// Counterpart to swiss_table::GroupSse2Impl in C++. Note that the name is
+// chosen for consistency, this struct is not actually SSE-specific.
+struct GroupSse2Impl {
+ macro Match(h2: uint32): BitMask {
+ // Fill 16 8-bit lanes with |h2|:
+ const searchPattern = I8x16Splat(Signed(h2));
+ // Create a 128 bit mask such that in each of the 16 8-bit lanes, the MSB
+ // indicates whether or not the corresponding lanes of |this.ctrl| and
+ // |searchPattern| have the same value:
+ const matches128 = I8x16Eq(searchPattern, this.ctrl);
+ // Turn the 128 bit mask into a 32 bit one, by turning the MSB of the i-th
+ // lane into the i-th bit in the output mask:
+ const matches32 = Unsigned(I8x16BitMask(matches128));
+ return BitMask{mask: matches32};
+ }
+
+ macro MatchEmpty(): BitMask {
+ // TODO(v8:11330) The C++ implementation in
+ // swiss_table::GroupSse2Impl::MatchEmpty utilizes a special trick that is
+ // possible due to kEmpty being -128 and allows shaving off one SSE
+ // instruction. This depends on having access to _mm_cmpeq_epi8 aka PCMPEQB,
+ // which the V8 backend currently doesn't expose.
+
+ // Fill 16 8-bit lanes with |kEmpty|:
+ const searchPattern =
+ I8x16Splat(Convert<int32>(FromConstexpr<uint8>(ctrl::kEmpty)));
+ // Create a 128 bit mask such that in each of the 16 8-bit lanes, the MSB
+ // indicates whether or not the corresponding lanes of |this.ctrl| contains
+ // |kEmpty|:
+ const matches128 = I8x16Eq(searchPattern, this.ctrl);
+ // Turn the 128 bit mask into a 32 bit one, by turning the MSB of the i-th
+ // lane into the i-th bit in the output mask:
+ const matches32 = Unsigned(I8x16BitMask(matches128));
+ return BitMask{mask: matches32};
+ }
+
+ const ctrl: I8X16;
+}
+
+struct GroupPortableLoader {
+ macro LoadGroup(ctrlPtr: intptr): GroupPortableImpl {
+ return GroupPortableImpl{
+ ctrl: LoadSwissNameDictionaryCtrlTableGroup(ctrlPtr)
+ };
+ }
+}
+
+struct GroupSse2Loader {
+ macro LoadGroup(ctrlPtr: intptr): GroupSse2Impl {
+ return GroupSse2Impl{ctrl: Convert<I8X16>(LoadSimd128(ctrlPtr))};
+ }
+}
+}
diff --git a/deps/v8/src/objects/swiss-name-dictionary-inl.h b/deps/v8/src/objects/swiss-name-dictionary-inl.h
index e6264a0bc3d..343abfc8cc3 100644
--- a/deps/v8/src/objects/swiss-name-dictionary-inl.h
+++ b/deps/v8/src/objects/swiss-name-dictionary-inl.h
@@ -49,19 +49,19 @@ void SwissNameDictionary::SetCapacity(int capacity) {
}
int SwissNameDictionary::NumberOfElements() {
- return GetMetaTableField(kMetaTableElementCountOffset);
+ return GetMetaTableField(kMetaTableElementCountFieldIndex);
}
int SwissNameDictionary::NumberOfDeletedElements() {
- return GetMetaTableField(kMetaTableDeletedElementCountOffset);
+ return GetMetaTableField(kMetaTableDeletedElementCountFieldIndex);
}
void SwissNameDictionary::SetNumberOfElements(int elements) {
- SetMetaTableField(kMetaTableElementCountOffset, elements);
+ SetMetaTableField(kMetaTableElementCountFieldIndex, elements);
}
void SwissNameDictionary::SetNumberOfDeletedElements(int deleted_elements) {
- SetMetaTableField(kMetaTableDeletedElementCountOffset, deleted_elements);
+ SetMetaTableField(kMetaTableDeletedElementCountFieldIndex, deleted_elements);
}
int SwissNameDictionary::UsedCapacity() {
@@ -132,7 +132,7 @@ int SwissNameDictionary::CapacityFor(int at_least_space_for) {
int SwissNameDictionary::EntryForEnumerationIndex(int enumeration_index) {
DCHECK_LT(enumeration_index, UsedCapacity());
- return GetMetaTableField(kMetaTableEnumerationTableStartOffset +
+ return GetMetaTableField(kMetaTableEnumerationDataStartIndex +
enumeration_index);
}
@@ -142,7 +142,7 @@ void SwissNameDictionary::SetEntryForEnumerationIndex(int enumeration_index,
DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
DCHECK(IsFull(GetCtrl(entry)));
- SetMetaTableField(kMetaTableEnumerationTableStartOffset + enumeration_index,
+ SetMetaTableField(kMetaTableEnumerationDataStartIndex + enumeration_index,
entry);
}
@@ -219,15 +219,15 @@ InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
}
Object SwissNameDictionary::LoadFromDataTable(int entry, int data_offset) {
- return LoadFromDataTable(GetIsolateForPtrCompr(*this), entry, data_offset);
+ return LoadFromDataTable(GetPtrComprCageBase(*this), entry, data_offset);
}
-Object SwissNameDictionary::LoadFromDataTable(IsolateRoot isolate, int entry,
- int data_offset) {
+Object SwissNameDictionary::LoadFromDataTable(PtrComprCageBase cage_base,
+ int entry, int data_offset) {
DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
int offset = DataTableStartOffset() +
(entry * kDataTableEntryCount + data_offset) * kTaggedSize;
- return TaggedField<Object>::Relaxed_Load(isolate, *this, offset);
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this, offset);
}
void SwissNameDictionary::StoreToDataTable(int entry, int data_offset,
@@ -317,6 +317,21 @@ PropertyDetails SwissNameDictionary::DetailsAt(InternalIndex entry) {
return DetailsAt(entry.as_int());
}
+// static
+template <typename LocalIsolate>
+Handle<SwissNameDictionary> SwissNameDictionary::EnsureGrowable(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> table) {
+ int capacity = table->Capacity();
+
+ if (table->UsedCapacity() < MaxUsableCapacity(capacity)) {
+ // We have room for at least one more entry, nothing to do.
+ return table;
+ }
+
+ int new_capacity = capacity == 0 ? kInitialCapacity : capacity * 2;
+ return Rehash(isolate, table, new_capacity);
+}
+
swiss_table::ctrl_t SwissNameDictionary::GetCtrl(int entry) {
DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
@@ -352,6 +367,25 @@ void SwissNameDictionary::SetCtrl(int entry, ctrl_t h) {
ctrl[copy_entry] = h;
}
+// static
+inline int SwissNameDictionary::FindFirstEmpty(uint32_t hash) {
+ // See SwissNameDictionary::FindEntry for description of probing algorithm.
+
+ auto seq = probe(hash, Capacity());
+ while (true) {
+ Group g{CtrlTable() + seq.offset()};
+ auto mask = g.MatchEmpty();
+ if (mask) {
+ // Note that picking the lowest bit set here means using the leftmost
+ // empty bucket in the group. Here, "left" means smaller entry/bucket
+ // index.
+ return seq.offset(mask.LowestBitSet());
+ }
+ seq.next();
+ DCHECK_LT(seq.index(), Capacity());
+ }
+}
+
void SwissNameDictionary::SetMetaTableField(int field_index, int value) {
// See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
// |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
@@ -453,6 +487,57 @@ bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, InternalIndex entry,
return ToKey(roots, entry.as_int(), out_key);
}
+// static
+template <typename LocalIsolate>
+Handle<SwissNameDictionary> SwissNameDictionary::Add(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> original_table,
+ Handle<Name> key, Handle<Object> value, PropertyDetails details,
+ InternalIndex* entry_out) {
+ DCHECK(original_table->FindEntry(isolate, *key).is_not_found());
+
+ Handle<SwissNameDictionary> table = EnsureGrowable(isolate, original_table);
+
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+ int new_enum_index = nof + nod;
+
+ int new_entry = table->AddInternal(*key, *value, details);
+
+ table->SetNumberOfElements(nof + 1);
+ table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
+
+ if (entry_out) {
+ *entry_out = InternalIndex(new_entry);
+ }
+
+ return table;
+}
+
+int SwissNameDictionary::AddInternal(Name key, Object value,
+ PropertyDetails details) {
+ DisallowHeapAllocation no_gc;
+
+ DCHECK(key.IsUniqueName());
+ DCHECK_LE(UsedCapacity(), MaxUsableCapacity(Capacity()));
+
+ uint32_t hash = key.hash();
+
+ // For now we don't re-use deleted buckets (due to enumeration table
+ // complications), which is why we only look for empty buckets here, not
+ // deleted ones.
+ int target = FindFirstEmpty(hash);
+
+ SetCtrl(target, swiss_table::H2(hash));
+ SetKey(target, key);
+ ValueAtPut(target, value);
+ DetailsAtPut(target, details);
+
+ // Note that we do not update the number of elements or the enumeration table
+ // in this function.
+
+ return target;
+}
+
template <typename LocalIsolate>
void SwissNameDictionary::Initialize(LocalIsolate* isolate,
ByteArray meta_table, int capacity) {
@@ -651,7 +736,7 @@ SwissNameDictionary::probe(uint32_t hash, int capacity) {
ACCESSORS_CHECKED2(SwissNameDictionary, meta_table, ByteArray,
MetaTablePointerOffset(), true,
- value.length() >= kMetaTableEnumerationTableStartOffset)
+ value.length() >= kMetaTableEnumerationDataStartIndex)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/swiss-name-dictionary.cc b/deps/v8/src/objects/swiss-name-dictionary.cc
index 89053d18180..5b567aeaeed 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.cc
+++ b/deps/v8/src/objects/swiss-name-dictionary.cc
@@ -10,6 +10,274 @@
namespace v8 {
namespace internal {
+// static
+Handle<SwissNameDictionary> SwissNameDictionary::DeleteEntry(
+ Isolate* isolate, Handle<SwissNameDictionary> table, InternalIndex entry) {
+ // GetCtrl() does the bounds check.
+ DCHECK(IsFull(table->GetCtrl(entry.as_int())));
+
+ int i = entry.as_int();
+
+ table->SetCtrl(i, Ctrl::kDeleted);
+ table->ClearDataTableEntry(isolate, i);
+ // We leave the PropertyDetails unchanged because they are not relevant for
+ // GC.
+
+ int nof = table->NumberOfElements();
+ table->SetNumberOfElements(nof - 1);
+ int nod = table->NumberOfDeletedElements();
+ table->SetNumberOfDeletedElements(nod + 1);
+
+ // TODO(v8:11388) Abseil's flat_hash_map doesn't shrink on deletion, but may
+ // decide on addition to do an in-place rehash to remove deleted elements. We
+ // shrink on deletion here to follow what NameDictionary and
+ // OrderedNameDictionary do. We should investigate which approach works
+ // better.
+ return Shrink(isolate, table);
+}
+
+// static
+template <typename LocalIsolate>
+Handle<SwissNameDictionary> SwissNameDictionary::Rehash(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> table,
+ int new_capacity) {
+ DCHECK(IsValidCapacity(new_capacity));
+ DCHECK_LE(table->NumberOfElements(), MaxUsableCapacity(new_capacity));
+ ReadOnlyRoots roots(isolate);
+
+ Handle<SwissNameDictionary> new_table =
+ isolate->factory()->NewSwissNameDictionaryWithCapacity(
+ new_capacity, Heap::InYoungGeneration(*table) ? AllocationType::kYoung
+ : AllocationType::kOld);
+
+ DisallowHeapAllocation no_gc;
+
+ int new_enum_index = 0;
+ new_table->SetNumberOfElements(table->NumberOfElements());
+ for (int enum_index = 0; enum_index < table->UsedCapacity(); ++enum_index) {
+ int entry = table->EntryForEnumerationIndex(enum_index);
+
+ Object key;
+
+ if (table->ToKey(roots, entry, &key)) {
+ Object value = table->ValueAtRaw(entry);
+ PropertyDetails details = table->DetailsAt(entry);
+
+ int new_entry = new_table->AddInternal(Name::cast(key), value, details);
+
+ // TODO(v8::11388) Investigate ways of hoisting the branching needed to
+ // select the correct meta table entry size (based on the capacity of the
+ // table) out of the loop.
+ new_table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
+ ++new_enum_index;
+ }
+ }
+
+ new_table->SetHash(table->Hash());
+ return new_table;
+}
+
+bool SwissNameDictionary::EqualsForTesting(SwissNameDictionary other) {
+ if (Capacity() != other.Capacity() ||
+ NumberOfElements() != other.NumberOfElements() ||
+ NumberOfDeletedElements() != other.NumberOfDeletedElements() ||
+ Hash() != other.Hash()) {
+ return false;
+ }
+
+ for (int i = 0; i < Capacity() + kGroupWidth; i++) {
+ if (CtrlTable()[i] != other.CtrlTable()[i]) {
+ return false;
+ }
+ }
+ for (int i = 0; i < Capacity(); i++) {
+ if (KeyAt(i) != other.KeyAt(i) || ValueAtRaw(i) != other.ValueAtRaw(i)) {
+ return false;
+ }
+ if (IsFull(GetCtrl(i))) {
+ if (DetailsAt(i) != other.DetailsAt(i)) return false;
+ }
+ }
+ for (int i = 0; i < UsedCapacity(); i++) {
+ if (EntryForEnumerationIndex(i) != other.EntryForEnumerationIndex(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// static
+Handle<SwissNameDictionary> SwissNameDictionary::ShallowCopy(
+ Isolate* isolate, Handle<SwissNameDictionary> table) {
+ // TODO(v8:11388) Consider doing some cleanup during copying: For example, we
+ // could turn kDeleted into kEmpty in certain situations. But this would
+ // require tidying up the enumeration table in a similar fashion as would be
+ // required when trying to re-use deleted entries.
+
+ if (table->Capacity() == 0) {
+ return table;
+ }
+
+ int capacity = table->Capacity();
+ int used_capacity = table->UsedCapacity();
+
+ Handle<SwissNameDictionary> new_table =
+ isolate->factory()->NewSwissNameDictionaryWithCapacity(
+ capacity, Heap::InYoungGeneration(*table) ? AllocationType::kYoung
+ : AllocationType::kOld);
+
+ new_table->SetHash(table->Hash());
+
+ DisallowGarbageCollection no_gc;
+ WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+
+ if (mode == WriteBarrierMode::SKIP_WRITE_BARRIER) {
+ // Copy data table and ctrl table, which are stored next to each other.
+ void* original_start =
+ reinterpret_cast<void*>(table->field_address(DataTableStartOffset()));
+ void* new_table_start = reinterpret_cast<void*>(
+ new_table->field_address(DataTableStartOffset()));
+ size_t bytes_to_copy = DataTableSize(capacity) + CtrlTableSize(capacity);
+ DCHECK(DataTableEndOffset(capacity) == CtrlTableStartOffset(capacity));
+ MemCopy(new_table_start, original_start, bytes_to_copy);
+ } else {
+ DCHECK_EQ(UPDATE_WRITE_BARRIER, mode);
+
+ // We may have to trigger write barriers when copying the data table.
+ for (int i = 0; i < capacity; ++i) {
+ Object key = table->KeyAt(i);
+ Object value = table->ValueAtRaw(i);
+
+ // Cannot use SetKey/ValueAtPut because they don't accept the hole as data
+ // to store.
+ new_table->StoreToDataTable(i, kDataTableKeyEntryIndex, key);
+ new_table->StoreToDataTable(i, kDataTableValueEntryIndex, value);
+ }
+
+ void* original_ctrl_table = table->CtrlTable();
+ void* new_ctrl_table = new_table->CtrlTable();
+ MemCopy(new_ctrl_table, original_ctrl_table, CtrlTableSize(capacity));
+ }
+
+ // PropertyDetails table may contain uninitialized data for unused slots.
+ for (int i = 0; i < capacity; ++i) {
+ if (IsFull(table->GetCtrl(i))) {
+ new_table->DetailsAtPut(i, table->DetailsAt(i));
+ }
+ }
+
+ // Meta table is only initialized for the first 2 + UsedCapacity() entries,
+ // where size of each entry depends on table capacity.
+ int size_per_meta_table_entry = MetaTableSizePerEntryFor(capacity);
+ int meta_table_used_bytes = (2 + used_capacity) * size_per_meta_table_entry;
+ new_table->meta_table().copy_in(0, table->meta_table().GetDataStartAddress(),
+ meta_table_used_bytes);
+
+ return new_table;
+}
+
+// static
+Handle<SwissNameDictionary> SwissNameDictionary::Shrink(
+ Isolate* isolate, Handle<SwissNameDictionary> table) {
+ // TODO(v8:11388) We're using the same logic to decide whether or not to
+ // shrink as OrderedNameDictionary and NameDictionary here. We should compare
+ // this with the logic used by Abseil's flat_hash_map, which has a heuristic
+ // for triggering an (in-place) rehash on addition, but never shrinks the
+ // table. Abseil's heuristic doesn't take the numbere of deleted elements into
+ // account, because it doesn't track that.
+
+ int nof = table->NumberOfElements();
+ int capacity = table->Capacity();
+ if (nof >= (capacity >> 2)) return table;
+ int new_capacity = std::max(capacity / 2, kInitialCapacity);
+ return Rehash(isolate, table, new_capacity);
+}
+
+// TODO(v8::11388) Copying all data into a std::vector and then re-adding into
+// the table doesn't seem like a good algorithm. Abseil's Swiss Tables come with
+// a clever algorithm for re-hashing in place: It first changes the control
+// table, effectively changing the roles of full, empty and deleted buckets. It
+// then moves each entry to its new bucket by swapping entries (see
+// drop_deletes_without_resize in Abseil's raw_hash_set.h). This algorithm could
+// generally adapted to work on our insertion order preserving implementation,
+// too. However, it would require a mapping from hash table buckets back to
+// enumeration indices. This could either be be created in this function
+// (requiring a vector with Capacity() entries and a separate pass over the
+// enumeration table) or by creating this backwards mapping ahead of time and
+// storing it somewhere in the main table or the meta table, for those
+// SwissNameDictionaries that we know will be in-place rehashed, most notably
+// those stored in the snapshot.
+void SwissNameDictionary::Rehash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+
+ struct Entry {
+ Name key;
+ Object value;
+ PropertyDetails details;
+ };
+
+ if (Capacity() == 0) return;
+
+ Entry dummy{Name(), Object(), PropertyDetails::Empty()};
+ std::vector<Entry> data(NumberOfElements(), dummy);
+
+ ReadOnlyRoots roots(isolate);
+ int data_index = 0;
+ for (int enum_index = 0; enum_index < UsedCapacity(); ++enum_index) {
+ int entry = EntryForEnumerationIndex(enum_index);
+ Object key;
+ if (!ToKey(roots, entry, &key)) continue;
+
+ data[data_index++] =
+ Entry{Name::cast(key), ValueAtRaw(entry), DetailsAt(entry)};
+ }
+
+ Initialize(isolate, meta_table(), Capacity());
+
+ int new_enum_index = 0;
+ SetNumberOfElements(static_cast<int>(data.size()));
+ for (Entry& e : data) {
+ int new_entry = AddInternal(e.key, e.value, e.details);
+
+ // TODO(v8::11388) Investigate ways of hoisting the branching needed to
+ // select the correct meta table entry size (based on the capacity of the
+ // table) out of the loop.
+ SetEntryForEnumerationIndex(new_enum_index, new_entry);
+ ++new_enum_index;
+ }
+}
+
+// TODO(emrich,v8:11388): This is almost an identical copy of
+// HashTable<..>::NumberOfEnumerableProperties. Consolidate both versions
+// elsewhere (e.g., hash-table-utils)?
+int SwissNameDictionary::NumberOfEnumerableProperties() {
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ int result = 0;
+ for (InternalIndex i : this->IterateEntries()) {
+ Object k;
+ if (!this->ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & ONLY_ENUMERABLE) == 0) result++;
+ }
+ return result;
+}
+
+// TODO(emrich, v8:11388): This is almost an identical copy of
+// Dictionary<..>::SlowReverseLookup. Consolidate both versions elsewhere (e.g.,
+// hash-table-utils)?
+Object SwissNameDictionary::SlowReverseLookup(Isolate* isolate, Object value) {
+ ReadOnlyRoots roots(isolate);
+ for (InternalIndex i : IterateEntries()) {
+ Object k;
+ if (!ToKey(roots, i, &k)) continue;
+ Object e = this->ValueAt(i);
+ if (e == value) return k;
+ }
+ return roots.undefined_value();
+}
+
// The largest value we ever have to store in the enumeration table is
// Capacity() - 1. The largest value we ever have to store for the present or
// deleted element count is MaxUsableCapacity(Capacity()). All data in the
@@ -26,12 +294,21 @@ STATIC_ASSERT(SwissNameDictionary::MaxUsableCapacity(
SwissNameDictionary::kMax2ByteMetaTableCapacity) <=
std::numeric_limits<uint16_t>::max());
-template void SwissNameDictionary::Initialize(Isolate* isolate,
- ByteArray meta_table,
- int capacity);
-template void SwissNameDictionary::Initialize(LocalIsolate* isolate,
- ByteArray meta_table,
- int capacity);
+template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize(
+ Isolate* isolate, ByteArray meta_table, int capacity);
+template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize(
+ LocalIsolate* isolate, ByteArray meta_table, int capacity);
+
+template V8_EXPORT_PRIVATE Handle<SwissNameDictionary>
+SwissNameDictionary::Rehash(LocalIsolate* isolate,
+ Handle<SwissNameDictionary> table,
+ int new_capacity);
+template V8_EXPORT_PRIVATE Handle<SwissNameDictionary>
+SwissNameDictionary::Rehash(Isolate* isolate, Handle<SwissNameDictionary> table,
+ int new_capacity);
+
+constexpr int SwissNameDictionary::kInitialCapacity;
+constexpr int SwissNameDictionary::kGroupWidth;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/swiss-name-dictionary.h b/deps/v8/src/objects/swiss-name-dictionary.h
index 40466c441c9..9ab225dd349 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.h
+++ b/deps/v8/src/objects/swiss-name-dictionary.h
@@ -68,11 +68,23 @@ namespace internal {
// contains the number of the bucket representing the i-th entry of the
// table in enumeration order. Entries may contain unitialized data if the
// corresponding bucket hasn't been used before.
-class SwissNameDictionary : public HeapObject {
+class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
public:
using Group = swiss_table::Group;
template <typename LocalIsolate>
+ inline static Handle<SwissNameDictionary> Add(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> table,
+ Handle<Name> key, Handle<Object> value, PropertyDetails details,
+ InternalIndex* entry_out = nullptr);
+
+ static Handle<SwissNameDictionary> Shrink(Isolate* isolate,
+ Handle<SwissNameDictionary> table);
+
+ static Handle<SwissNameDictionary> DeleteEntry(
+ Isolate* isolate, Handle<SwissNameDictionary> table, InternalIndex entry);
+
+ template <typename LocalIsolate>
inline InternalIndex FindEntry(LocalIsolate* isolate, Object key);
// This is to make the interfaces of NameDictionary::FindEntry and
@@ -100,12 +112,31 @@ class SwissNameDictionary : public HeapObject {
inline int Capacity();
inline int UsedCapacity();
+ int NumberOfEnumerableProperties();
+
+ static Handle<SwissNameDictionary> ShallowCopy(
+ Isolate* isolate, Handle<SwissNameDictionary> table);
+
+ // Strict in the sense that it checks that all used/initialized memory in
+ // |this| and |other| is the same. The only exceptions are the meta table
+ // pointer (which must differ between the two tables) and PropertyDetails of
+ // deleted entries (which reside in initialized memory, but are not compared).
+ bool EqualsForTesting(SwissNameDictionary other);
+
template <typename LocalIsolate>
void Initialize(LocalIsolate* isolate, ByteArray meta_table, int capacity);
+ template <typename LocalIsolate>
+ static Handle<SwissNameDictionary> Rehash(LocalIsolate* isolate,
+ Handle<SwissNameDictionary> table,
+ int new_capacity);
+ void Rehash(Isolate* isolate);
+
inline void SetHash(int hash);
inline int Hash();
+ Object SlowReverseLookup(Isolate* isolate, Object value);
+
class IndexIterator {
public:
inline IndexIterator(Handle<SwissNameDictionary> dict, int start);
@@ -168,7 +199,12 @@ class SwissNameDictionary : public HeapObject {
// Indicates that IterateEntries() returns entries ordered.
static constexpr bool kIsOrderedDictionaryType = true;
+ // Only used in CSA/Torque, where indices are actual integers. In C++,
+ // InternalIndex::NotFound() is always used instead.
+ static constexpr int kNotFoundSentinel = -1;
+
static const int kGroupWidth = Group::kWidth;
+ static const bool kUseSIMD = kGroupWidth == 16;
class BodyDescriptor;
@@ -182,9 +218,17 @@ class SwissNameDictionary : public HeapObject {
static constexpr int kDataTableKeyEntryIndex = 0;
static constexpr int kDataTableValueEntryIndex = kDataTableKeyEntryIndex + 1;
- static constexpr int kMetaTableElementCountOffset = 0;
- static constexpr int kMetaTableDeletedElementCountOffset = 1;
- static constexpr int kMetaTableEnumerationTableStartOffset = 2;
+ // Field indices describing the layout of the meta table: A field index of i
+ // means that the corresponding meta table entry resides at an offset of {i *
+ // sizeof(uintX_t)} bytes from the beginning of the meta table. Here, the X in
+ // uintX_t can be 8, 16, or 32, and depends on the capacity of the overall
+ // SwissNameDictionary. See the section "Meta table" in the comment at the
+ // beginning of the SwissNameDictionary class in this file.
+ static constexpr int kMetaTableElementCountFieldIndex = 0;
+ static constexpr int kMetaTableDeletedElementCountFieldIndex = 1;
+ // Field index of the first entry of the enumeration table (which is part of
+ // the meta table).
+ static constexpr int kMetaTableEnumerationDataStartIndex = 2;
// The maximum capacity of any SwissNameDictionary whose meta table can use 1
// byte per entry.
@@ -218,6 +262,10 @@ class SwissNameDictionary : public HeapObject {
using ctrl_t = swiss_table::ctrl_t;
using Ctrl = swiss_table::Ctrl;
+ template <typename LocalIsolate>
+ inline static Handle<SwissNameDictionary> EnsureGrowable(
+ LocalIsolate* isolate, Handle<SwissNameDictionary> table);
+
// Returns table of byte-encoded PropertyDetails (without enumeration index
// stored in PropertyDetails).
inline uint8_t* PropertyDetailsTable();
@@ -235,6 +283,13 @@ class SwissNameDictionary : public HeapObject {
inline bool ToKey(ReadOnlyRoots roots, int entry, Object* out_key);
+ inline int FindFirstEmpty(uint32_t hash);
+ // Adds |key| -> (|value|, |details|) as a new mapping to the table, which
+ // must have sufficient room. Returns the entry (= bucket) used by the new
+ // mapping. Does not update the number of present entries or the
+ // enumeration table.
+ inline int AddInternal(Name key, Object value, PropertyDetails details);
+
// Use |set_ctrl| for modifications whenever possible, since that function
// correctly maintains the copy of the first group at the end of the ctrl
// table.
@@ -251,7 +306,8 @@ class SwissNameDictionary : public HeapObject {
inline ctrl_t GetCtrl(int entry);
inline Object LoadFromDataTable(int entry, int data_offset);
- inline Object LoadFromDataTable(IsolateRoot root, int entry, int data_offset);
+ inline Object LoadFromDataTable(PtrComprCageBase cage_base, int entry,
+ int data_offset);
inline void StoreToDataTable(int entry, int data_offset, Object data);
inline void StoreToDataTableNoBarrier(int entry, int data_offset,
Object data);
diff --git a/deps/v8/src/objects/swiss-name-dictionary.tq b/deps/v8/src/objects/swiss-name-dictionary.tq
index 575d8bab461..ff648a9a889 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.tq
+++ b/deps/v8/src/objects/swiss-name-dictionary.tq
@@ -10,6 +10,310 @@ extern class SwissNameDictionary extends HeapObject {
const capacity: int32;
meta_table: ByteArray;
data_table[Convert<intptr>(capacity) * 2]: JSAny|TheHole;
- ctrl_table[Convert<intptr>(capacity) + kSwissNameDictionaryGroupWidth]: uint8;
+ ctrl_table[Convert<intptr>(capacity) + swiss_table::kGroupWidth]: uint8;
property_details_table[Convert<intptr>(capacity)]: uint8;
}
+
+namespace swiss_table {
+
+const kDataTableEntryCount: constexpr intptr
+ generates 'SwissNameDictionary::kDataTableEntryCount';
+
+const kMax1ByteMetaTableCapacity: constexpr int32
+ generates 'SwissNameDictionary::kMax1ByteMetaTableCapacity';
+
+const kMax2ByteMetaTableCapacity: constexpr int32
+ generates 'SwissNameDictionary::kMax2ByteMetaTableCapacity';
+
+const kNotFoundSentinel:
+ constexpr int32 generates 'SwissNameDictionary::kNotFoundSentinel';
+
+extern macro LoadSwissNameDictionaryKey(SwissNameDictionary, intptr): Name;
+
+extern macro StoreSwissNameDictionaryKeyAndValue(
+ SwissNameDictionary, intptr, Object, Object);
+
+extern macro SwissNameDictionarySetCtrl(
+ SwissNameDictionary, intptr, intptr, uint8);
+
+extern macro StoreSwissNameDictionaryPropertyDetails(
+ SwissNameDictionary, intptr, intptr, uint8);
+
+extern macro
+SwissNameDictionaryIncreaseElementCountOrBailout(
+ ByteArray, intptr, uint32): uint32 labels Bailout;
+
+extern macro
+StoreSwissNameDictionaryEnumToEntryMapping(
+ SwissNameDictionary, intptr, intptr, int32);
+
+extern macro
+SwissNameDictionaryUpdateCountsForDeletion(ByteArray, intptr): uint32;
+
+namespace runtime {
+extern runtime SwissTableFindEntry(NoContext, SwissNameDictionary, Name): Smi;
+
+extern runtime SwissTableAdd(
+ NoContext, SwissNameDictionary, Name, Object, Smi): SwissNameDictionary;
+
+extern runtime ShrinkSwissNameDictionary(
+ NoContext, SwissNameDictionary): SwissNameDictionary;
+}
+
+// Counterpart for SwissNameDictionary::CapacityFor in C++.
+@export
+macro SwissNameDictionaryCapacityFor(atLeastSpaceFor: intptr): intptr {
+ if (atLeastSpaceFor <= 4) {
+ if (atLeastSpaceFor == 0) {
+ return 0;
+ } else if (atLeastSpaceFor < kSwissNameDictionaryInitialCapacity) {
+ return 4;
+ } else if (FromConstexpr<bool>(kGroupWidth == 16)) {
+ assert(atLeastSpaceFor == 4);
+ return 4;
+ } else if (FromConstexpr<bool>(kGroupWidth == 8)) {
+ assert(atLeastSpaceFor == 4);
+ return 8;
+ }
+ }
+
+ const nonNormalized = atLeastSpaceFor + atLeastSpaceFor / 7;
+ return IntPtrRoundUpToPowerOfTwo32(nonNormalized);
+}
+
+// Counterpart for SwissNameDictionary::MaxUsableCapacity in C++.
+@export
+macro SwissNameDictionaryMaxUsableCapacity(capacity: intptr): intptr {
+ assert(capacity == 0 || capacity >= kSwissNameDictionaryInitialCapacity);
+ if (FromConstexpr<bool>(kGroupWidth == 8) && capacity == 4) {
+ // If the group size is 16 we can fully utilize capacity 4: There will be
+ // enough kEmpty entries in the ctrl table.
+ return 3;
+ }
+ return capacity - capacity / 8;
+}
+
+// Counterpart for SwissNameDictionary::SizeFor in C++.
+@export
+macro SwissNameDictionarySizeFor(capacity: intptr): intptr {
+ const constant: constexpr int32 = kHeapObjectHeaderSize + 8 + kTaggedSize;
+ const dynamic: intptr =
+ capacity * FromConstexpr<intptr>(2 * kTaggedSize + 2) +
+ FromConstexpr<intptr>(kGroupWidth);
+ return constant + dynamic;
+}
+
+// Counterpart for SwissNameDictionary::MetaTableSizePerEntryFor in C++.
+@export
+macro SwissNameDictionaryMetaTableSizePerEntryFor(capacity: intptr): intptr {
+ if (capacity <= kMax1ByteMetaTableCapacity) {
+ return 1;
+ } else if (capacity <= kMax2ByteMetaTableCapacity) {
+ return 2;
+ } else {
+ return 4;
+ }
+}
+
+// Counterpart for SwissNameDictionary::MetaTableSizeFor in C++.
+@export
+macro SwissNameDictionaryMetaTableSizeFor(capacity: intptr): intptr {
+ const perEntry: intptr =
+ SwissNameDictionaryMetaTableSizePerEntryFor(capacity);
+ const maxUsable: intptr =
+ Convert<intptr>(SwissNameDictionaryMaxUsableCapacity(capacity));
+
+ return (2 + maxUsable) * perEntry;
+}
+
+//
+// Offsets. MT stands for "minus tag"
+//
+
+const kDataTableStartOffsetMT: constexpr intptr
+ generates 'SwissNameDictionary::DataTableStartOffset() - kHeapObjectTag';
+
+@export
+macro SwissNameDictionaryDataTableStartOffsetMT(): intptr {
+ return kDataTableStartOffsetMT;
+}
+
+@export
+macro SwissNameDictionaryCtrlTableStartOffsetMT(capacity: intptr): intptr {
+ return kDataTableStartOffsetMT +
+ kDataTableEntryCount * FromConstexpr<intptr>(kTaggedSize) * capacity;
+}
+
+macro Probe(hash: uint32, mask: uint32): ProbeSequence {
+ // Mask must be a power of 2 minus 1.
+ assert(((mask + 1) & mask) == 0);
+
+ return ProbeSequence{mask: mask, offset: H1(hash) & mask, index: 0};
+}
+
+macro FindEntry<GroupLoader: type>(
+ table: SwissNameDictionary, key: Name): never labels
+Found(intptr), NotFound {
+ const hash: uint32 = LoadNameHash(key);
+ const capacity: int32 = table.capacity;
+ const nonZeroCapacity: int32 = capacity | Convert<int32>(capacity == 0);
+ const mask: uint32 = Unsigned(nonZeroCapacity - 1);
+
+ const ctrlTableStart: intptr =
+ SwissNameDictionaryCtrlTableStartOffsetMT(Convert<intptr>(capacity)) +
+ BitcastTaggedToWord(table);
+
+ let seq = Probe(hash, mask);
+ while (true) {
+ const group =
+ GroupLoader{}.LoadGroup(ctrlTableStart + Convert<intptr>(seq.offset));
+ let match = group.Match(H2(hash));
+ while (match.HasBitsSet()) {
+ const inGroupIndex = match.LowestBitSet();
+ const candidateEntry = Convert<intptr>(seq.Offset(inGroupIndex));
+ const candidateKey: Object =
+ LoadSwissNameDictionaryKey(table, candidateEntry);
+ if (TaggedEqual(key, candidateKey)) {
+ goto Found(candidateEntry);
+ }
+ match.ClearLowestSetBit();
+ }
+ if (group.MatchEmpty().HasBitsSet()) {
+ goto NotFound;
+ }
+ seq.Next();
+ }
+
+ unreachable;
+}
+
+macro FindFirstEmpty<GroupLoader: type>(
+ table: SwissNameDictionary, capacity: intptr, hash: uint32): int32 {
+ const nonZeroCapacity: int32 =
+ Convert<int32>(capacity) | Convert<int32>(capacity == 0);
+ const mask: uint32 = Unsigned(nonZeroCapacity - 1);
+
+ const ctrlTableStart: intptr =
+ SwissNameDictionaryCtrlTableStartOffsetMT(capacity) +
+ BitcastTaggedToWord(table);
+
+ let seq = Probe(hash, mask);
+ while (true) {
+ const group =
+ GroupLoader{}.LoadGroup(ctrlTableStart + Convert<intptr>(seq.offset));
+ const match = group.MatchEmpty();
+ if (match.HasBitsSet()) {
+ const inGroupIndex = match.LowestBitSet();
+ return Signed(seq.Offset(inGroupIndex));
+ }
+ seq.Next();
+ }
+
+ unreachable;
+}
+
+macro Add<GroupLoader: type>(
+ table: SwissNameDictionary, key: Name, value: Object,
+ propertyDetails: uint8)
+ labels Bailout {
+ const capacity: intptr = Convert<intptr>(table.capacity);
+ const maxUsable: uint32 =
+ Unsigned(Convert<int32>(SwissNameDictionaryMaxUsableCapacity(capacity)));
+
+ try {
+ // We read the used capacity (present + deleted elements), compare it
+ // against the max usable capacity to determine if a bailout is necessary,
+ // and in case of no bailout increase the present element count all in one
+ // go using the following macro. This way we don't have to do the branching
+ // needed for meta table accesses multiple times.
+ const used: uint32 = SwissNameDictionaryIncreaseElementCountOrBailout(
+ table.meta_table, capacity, maxUsable) otherwise Bailout;
+
+ const hash: uint32 = LoadNameHash(key);
+ const newEntry32 = FindFirstEmpty<GroupLoader>(table, capacity, hash);
+ const newEntry = Convert<intptr>(newEntry32);
+
+ StoreSwissNameDictionaryKeyAndValue(table, newEntry, key, value);
+
+ StoreSwissNameDictionaryEnumToEntryMapping(
+ table, capacity, Convert<intptr>(used), newEntry32);
+
+ const h2 = Convert<uint8>(Convert<intptr>(H2(hash)));
+ SwissNameDictionarySetCtrl(table, capacity, newEntry, h2);
+
+ StoreSwissNameDictionaryPropertyDetails(
+ table, capacity, newEntry, propertyDetails);
+ } label Bailout {
+ goto Bailout;
+ }
+}
+
+@export
+macro SwissNameDictionaryDelete(table: SwissNameDictionary, entry: intptr)
+ labels
+ Shrunk(SwissNameDictionary) {
+ const capacity = Convert<intptr>(table.capacity);
+
+ // Update present and deleted element counts at once, without needing to do
+ // the meta table access related branching more than once.
+ const newElementCount =
+ SwissNameDictionaryUpdateCountsForDeletion(table.meta_table, capacity);
+
+ StoreSwissNameDictionaryKeyAndValue(table, entry, TheHole, TheHole);
+
+ const kDeleted = FromConstexpr<uint8>(ctrl::kDeleted);
+ SwissNameDictionarySetCtrl(table, capacity, entry, kDeleted);
+
+ // Same logic for deciding when to shrink as in SwissNameDictionary::Delete.
+ if (Convert<intptr>(Signed(newElementCount)) < (capacity >> 2)) {
+ const shrunkTable = runtime::ShrinkSwissNameDictionary(kNoContext, table);
+ goto Shrunk(shrunkTable);
+ }
+}
+
+// TODO(v8:11330) Ideally, we would like to implement
+// CodeStubAssembler::SwissNameDictionaryFindEntry in Torque and do the
+// necessary switching between the two implementations with if(kUseSimd) {...}
+// else {...}. However, Torque currently generates a call to
+// CodeAssembler::Branch which cannot guarantee that code for the "bad" path is
+// not generated, even if the branch can be resolved at compile time. This means
+// that we end up trying to generate unused code using unsupported instructions.
+@export
+macro SwissNameDictionaryFindEntrySIMD(table: SwissNameDictionary, key: Name):
+ never labels Found(intptr), NotFound {
+ FindEntry<GroupSse2Loader>(table, key)
+ otherwise Found, NotFound;
+}
+
+@export
+macro SwissNameDictionaryFindEntryPortable(
+ table: SwissNameDictionary, key: Name): never labels
+Found(intptr),
+ NotFound {
+ FindEntry<GroupPortableLoader>(table, key)
+ otherwise Found, NotFound;
+}
+
+// TODO(v8:11330) Ideally, we would like to implement
+// CodeStubAssembler::SwissNameDictionaryAdd in Torque and do the necessary
+// switching between the two implementations with if(kUseSimd) {...} else {...}.
+// However, Torque currently generates a call to CodeAssembler::Branch which
+// cannot guarantee that code for the "bad" path is not generated, even if the
+// branch can be resolved at compile time. This means that we end up trying to
+// generate unused code using unsupported instructions.
+@export
+macro SwissNameDictionaryAddSIMD(
+ table: SwissNameDictionary, key: Name, value: Object,
+ propertyDetails: uint8) labels Bailout {
+ Add<GroupSse2Loader>(table, key, value, propertyDetails)
+ otherwise Bailout;
+}
+
+@export
+macro SwissNameDictionaryAddPortable(
+ table: SwissNameDictionary, key: Name, value: Object,
+ propertyDetails: uint8) labels Bailout {
+ Add<GroupPortableLoader>(table, key, value, propertyDetails)
+ otherwise Bailout;
+}
+}
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index eaaa5574314..513f6a02d97 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -61,10 +61,10 @@ T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-T TaggedField<T, kFieldOffset>::load(IsolateRoot isolate, HeapObject host,
- int offset) {
+T TaggedField<T, kFieldOffset>::load(PtrComprCageBase cage_base,
+ HeapObject host, int offset) {
Tagged_t value = *location(host, offset);
- return T(tagged_to_full(isolate, value));
+ return T(tagged_to_full(cage_base, value));
}
// static
@@ -96,10 +96,10 @@ T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-T TaggedField<T, kFieldOffset>::Relaxed_Load(IsolateRoot isolate,
+T TaggedField<T, kFieldOffset>::Relaxed_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
- return T(tagged_to_full(isolate, value));
+ return T(tagged_to_full(cage_base, value));
}
// static
@@ -125,10 +125,10 @@ T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-T TaggedField<T, kFieldOffset>::Acquire_Load(IsolateRoot isolate,
+T TaggedField<T, kFieldOffset>::Acquire_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
- return T(tagged_to_full(isolate, value));
+ return T(tagged_to_full(cage_base, value));
}
// static
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 1c96cc9d92b..e3950fa0af3 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -38,20 +38,21 @@ class TaggedField : public AllStatic {
static inline Address address(HeapObject host, int offset = 0);
static inline T load(HeapObject host, int offset = 0);
- static inline T load(IsolateRoot isolate, HeapObject host, int offset = 0);
+ static inline T load(PtrComprCageBase cage_base, HeapObject host,
+ int offset = 0);
static inline void store(HeapObject host, T value);
static inline void store(HeapObject host, int offset, T value);
static inline T Relaxed_Load(HeapObject host, int offset = 0);
- static inline T Relaxed_Load(IsolateRoot isolate, HeapObject host,
+ static inline T Relaxed_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
- static inline T Acquire_Load(IsolateRoot isolate, HeapObject host,
+ static inline T Acquire_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
static inline void Release_Store(HeapObject host, T value);
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 613a4279a46..d5a08fd88eb 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -37,14 +37,35 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, DoNotCacheBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
AcceptAnyReceiverBit::kShift)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, published, PublishedBit::kShift)
+// TODO(nicohartmann@, v8:11122): Let Torque generate this accessor.
RELEASE_ACQUIRE_ACCESSORS(FunctionTemplateInfo, call_code, HeapObject,
kCallCodeOffset)
+// TODO(nicohartmann@, v8:11122): Let Torque generate this accessor.
+HeapObject FunctionTemplateInfo::rare_data(AcquireLoadTag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return rare_data(cage_base, kAcquireLoad);
+}
+HeapObject FunctionTemplateInfo::rare_data(PtrComprCageBase cage_base,
+ AcquireLoadTag) const {
+ HeapObject value =
+ TaggedField<HeapObject>::Acquire_Load(cage_base, *this, kRareDataOffset);
+ DCHECK(value.IsUndefined() || value.IsFunctionTemplateRareData());
+ return value;
+}
+void FunctionTemplateInfo::set_rare_data(HeapObject value, ReleaseStoreTag,
+ WriteBarrierMode mode) {
+ DCHECK(value.IsUndefined() || value.IsFunctionTemplateRareData());
+ RELEASE_WRITE_FIELD(*this, kRareDataOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kRareDataOffset, value, mode);
+}
+
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- HeapObject extra = function_template_info->rare_data(isolate);
+ HeapObject extra = function_template_info->rare_data(isolate, kAcquireLoad);
if (extra.IsUndefined(isolate)) {
return AllocateFunctionTemplateRareData(isolate, function_template_info);
} else {
@@ -54,8 +75,8 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
#define RARE_ACCESSORS(Name, CamelName, Type, Default) \
DEF_GETTER(FunctionTemplateInfo, Get##CamelName, Type) { \
- HeapObject extra = rare_data(isolate); \
- HeapObject undefined = GetReadOnlyRoots(isolate).undefined_value(); \
+ HeapObject extra = rare_data(cage_base, kAcquireLoad); \
+ HeapObject undefined = GetReadOnlyRoots(cage_base).undefined_value(); \
return extra == undefined ? Default \
: FunctionTemplateRareData::cast(extra).Name(); \
} \
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 13d68ef3918..966b81167c4 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -87,8 +87,16 @@ class FunctionTemplateInfo
DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
#undef DECL_RARE_ACCESSORS
+ // TODO(nicohartmann@, v8:11122): Let Torque generate the following accessor.
DECL_RELEASE_ACQUIRE_ACCESSORS(call_code, HeapObject)
+ // TODO(nicohartmann@, v8:11122): Let Torque generate the following accessor.
+ inline HeapObject rare_data(AcquireLoadTag) const;
+ inline HeapObject rare_data(PtrComprCageBase cage_base, AcquireLoadTag) const;
+ inline void set_rare_data(
+ HeapObject value, ReleaseStoreTag,
+ WriteBarrierMode mode = WriteBarrierMode::UPDATE_WRITE_BARRIER);
+
// Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -109,6 +117,12 @@ class FunctionTemplateInfo
// If not set an access may be performed on calling the associated JSFunction.
DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
+
+ // This flag is used to check that the FunctionTemplateInfo instance is not
+ // changed after it became visible to TurboFan (either set in a
+ // SharedFunctionInfo or an accessor), because TF relies on immutability to
+ // safely read concurrently.
+ DECL_BOOLEAN_ACCESSORS(published)
// End flag bits ---------------------
// Dispatched behavior.
diff --git a/deps/v8/src/objects/templates.tq b/deps/v8/src/objects/templates.tq
index d26b6dd5b70..e952747ecf7 100644
--- a/deps/v8/src/objects/templates.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -35,6 +35,7 @@ bitfield struct FunctionTemplateInfoFlags extends uint31 {
remove_prototype: bool: 1 bit;
do_not_cache: bool: 1 bit;
accept_any_receiver: bool: 1 bit;
+ published: bool: 1 bit;
}
@generateCppClass
@@ -51,7 +52,7 @@ extern class FunctionTemplateInfo extends TemplateInfo {
// If any of the setters declared by DECL_RARE_ACCESSORS are used then a
// FunctionTemplateRareData will be stored here. Until then this contains
// undefined.
- @acquireRead @releaseWrite rare_data: FunctionTemplateRareData|Undefined;
+ rare_data: FunctionTemplateRareData|Undefined;
shared_function_info: SharedFunctionInfo|Undefined;
// Internal field to store a flag bitfield.
flag: SmiTagged<FunctionTemplateInfoFlags>;
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index fbdde538bed..6178e018b6c 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -113,7 +113,7 @@ PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
// static
Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
InternalIndex descriptor = transition.LastAdded();
- return transition.instance_descriptors(kRelaxedLoad).GetKey(descriptor);
+ return transition.instance_descriptors().GetKey(descriptor);
}
// static
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 75ca763a1b4..ac908030a2d 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -145,7 +145,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate_->transition_array_access());
+ isolate_->full_transition_array_access());
array.SetRawTarget(index, HeapObjectReference::Weak(*target));
return;
}
@@ -158,7 +158,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// If there is enough capacity, insert new entry into the existing array.
if (new_nof <= array.Capacity()) {
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
- isolate_->transition_array_access());
+ isolate_->full_transition_array_access());
array.SetNumberOfTransitions(new_nof);
for (int i = number_of_transitions; i > insertion_index; --i) {
array.SetKey(i, array.GetKey(i - 1));
@@ -231,7 +231,7 @@ Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
}
case kFullTransitionArray: {
base::SharedMutexGuardIf<base::kShared> scope(
- isolate_->transition_array_access(), concurrent_access_);
+ isolate_->full_transition_array_access(), concurrent_access_);
return transitions().SearchAndGetTarget(kind, name, attributes);
}
}
@@ -270,6 +270,34 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
return Handle<Map>(target, isolate_);
}
+void TransitionsAccessor::ForEachTransitionTo(
+ Name name, const ForEachTransitionCallback& callback,
+ DisallowGarbageCollection* no_gc) {
+ DCHECK(name.IsUniqueName());
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ return;
+ case kWeakRef: {
+ Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ InternalIndex descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
+ Name key = descriptors.GetKey(descriptor);
+ if (key == name) {
+ callback(target);
+ }
+ return;
+ }
+ case kFullTransitionArray: {
+ base::SharedMutexGuardIf<base::kShared> scope(
+ isolate_->full_transition_array_access(), concurrent_access_);
+ return transitions().ForEachTransitionTo(name, callback);
+ }
+ }
+ UNREACHABLE();
+}
+
bool TransitionsAccessor::CanHaveMoreTransitions() {
if (map_.is_dictionary_map()) return false;
if (encoding() == kFullTransitionArray) {
@@ -529,8 +557,8 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
TransitionArray new_transitions = TransitionArray::cast(transitions);
for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
Map target = old_transitions.GetTarget(i);
- if (target.instance_descriptors(kRelaxedLoad) ==
- map_.instance_descriptors(kRelaxedLoad)) {
+ if (target.instance_descriptors(isolate_) ==
+ map_.instance_descriptors(isolate_)) {
Name key = old_transitions.GetKey(i);
int new_target_index;
if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
@@ -613,6 +641,21 @@ Map TransitionArray::SearchAndGetTarget(PropertyKind kind, Name name,
return SearchDetailsAndGetTarget(transition, kind, attributes);
}
+void TransitionArray::ForEachTransitionTo(
+ Name name, const ForEachTransitionCallback& callback) {
+ int transition = SearchName(name, nullptr);
+ if (transition == kNotFound) return;
+
+ int nof_transitions = number_of_transitions();
+ DCHECK(transition < nof_transitions);
+ Name key = GetKey(transition);
+ for (; transition < nof_transitions && GetKey(transition) == key;
+ transition++) {
+ Map target = GetTarget(transition);
+ callback(target);
+ }
+}
+
void TransitionArray::Sort() {
DisallowGarbageCollection no_gc;
// In-place insertion sort.
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index 4f992bc6cfd..237cfcd0efa 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -19,6 +19,9 @@
namespace v8 {
namespace internal {
+// Find all transitions with given name and calls the callback.
+using ForEachTransitionCallback = std::function<void(Map)>;
+
// TransitionsAccessor is a helper class to encapsulate access to the various
// ways a Map can store transitions to other maps in its respective field at
// Map::kTransitionsOrPrototypeInfo.
@@ -68,6 +71,14 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
return FindTransitionToDataProperty(name, kFieldOnly);
}
+ // Find all transitions with given name and calls the callback.
+ // Neither GCs nor operations requiring Isolate::full_transition_array_access
+ // lock are allowed inside the callback.
+ // If any of the GC- or lock-requiring processing is necessary, it has to be
+ // done outside of the callback.
+ void ForEachTransitionTo(Name name, const ForEachTransitionCallback& callback,
+ DisallowGarbageCollection* no_gc);
+
inline Handle<String> ExpectedTransitionKey();
inline Handle<Map> ExpectedTransitionTarget();
@@ -320,6 +331,10 @@ class TransitionArray : public WeakFixedArray {
Map SearchDetailsAndGetTarget(int transition, PropertyKind kind,
PropertyAttributes attributes);
+ // Find all transitions with given name and calls the callback.
+ void ForEachTransitionTo(Name name,
+ const ForEachTransitionCallback& callback);
+
inline int number_of_transitions() const;
static bool CompactPrototypeTransitionArray(Isolate* isolate,
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 246281e4e2b..43d946943b0 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -18,6 +18,7 @@
#include "src/heap/factory.h"
#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
@@ -30,10 +31,10 @@
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
-#include "src/wasm/wasm-engine.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-result.h"
-#include "src/wasm/wasm-serialization.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -571,6 +572,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
case JS_ERROR_TYPE:
return WriteJSError(Handle<JSObject>::cast(receiver));
+#if V8_ENABLE_WEBASSEMBLY
case WASM_MODULE_OBJECT_TYPE:
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
case WASM_MEMORY_OBJECT_TYPE: {
@@ -580,6 +582,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
}
break;
}
+#endif // V8_ENABLE_WEBASSEMBLY
default:
break;
}
@@ -602,11 +605,9 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
uint32_t properties_written = 0;
bool map_changed = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
- isolate_);
+ Handle<Name> key(map->instance_descriptors(isolate_).GetKey(i), isolate_);
if (!key->IsString()) continue;
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
+ PropertyDetails details = map->instance_descriptors(isolate_).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> value;
@@ -985,6 +986,7 @@ Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
return ThrowIfOutOfMemory();
}
+#if V8_ENABLE_WEBASSEMBLY
Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
if (delegate_ == nullptr) {
ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
@@ -1019,6 +1021,7 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
WriteZigZag<int32_t>(object->maximum_pages());
return WriteJSReceiver(Handle<JSReceiver>(object->array_buffer(), isolate_));
}
+#endif // V8_ENABLE_WEBASSEMBLY
Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
WriteTag(SerializationTag::kHostObject);
@@ -1100,6 +1103,15 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
id_map_(isolate->global_handles()->Create(
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
+ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
+ size_t size)
+ : isolate_(isolate),
+ delegate_(nullptr),
+ position_(data),
+ end_(data + size),
+ id_map_(isolate->global_handles()->Create(
+ ReadOnlyRoots(isolate_).empty_fixed_array())) {}
+
ValueDeserializer::~ValueDeserializer() {
GlobalHandles::Destroy(id_map_.location());
@@ -1344,10 +1356,12 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
}
case SerializationTag::kError:
return ReadJSError();
+#if V8_ENABLE_WEBASSEMBLY
case SerializationTag::kWasmModuleTransfer:
return ReadWasmModuleTransfer();
case SerializationTag::kWasmMemoryTransfer:
return ReadWasmMemory();
+#endif // V8_ENABLE_WEBASSEMBLY
case SerializationTag::kHostObject:
return ReadHostObject();
default:
@@ -1930,6 +1944,7 @@ MaybeHandle<Object> ValueDeserializer::ReadJSError() {
return error;
}
+#if V8_ENABLE_WEBASSEMBLY
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
uint32_t transfer_id = 0;
Local<Value> module_value;
@@ -1978,6 +1993,7 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
AddObjectWithID(id, result);
return result;
}
+#endif // V8_ENABLE_WEBASSEMBLY
MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
if (!delegate_) return MaybeHandle<JSObject>();
@@ -2003,8 +2019,7 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DCHECK(!object->map().is_dictionary_map());
DisallowGarbageCollection no_gc;
- DescriptorArray descriptors =
- object->map().instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = object->map().instance_descriptors();
for (InternalIndex i : InternalIndex::Range(properties.size())) {
// Initializing store.
object->WriteToField(i, descriptors.GetDetails(i),
@@ -2026,8 +2041,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool transitioning = true;
Handle<Map> map(object->map(), isolate_);
DCHECK(!map->is_dictionary_map());
- DCHECK_EQ(0,
- map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ DCHECK_EQ(0, map->instance_descriptors(isolate_).number_of_descriptors());
std::vector<Handle<Object>> properties;
properties.reserve(8);
@@ -2078,11 +2092,11 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
if (transitioning) {
InternalIndex descriptor(properties.size());
PropertyDetails details =
- target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
+ target->instance_descriptors(isolate_).GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors(kRelaxedLoad)
+ !target->instance_descriptors(isolate_)
.GetFieldType(descriptor)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -2091,7 +2105,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
details.constness(), expected_representation,
value_type);
}
- DCHECK(target->instance_descriptors(kRelaxedLoad)
+ DCHECK(target->instance_descriptors(isolate_)
.GetFieldType(descriptor)
.NowContains(value));
properties.push_back(value);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index e06badece38..d4304ab22c5 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -94,6 +94,8 @@ class ValueSerializer {
void SetTreatArrayBufferViewsAsHostObjects(bool mode);
private:
+ friend class WebSnapshotSerializer;
+
// Managing allocations of the internal buffer.
Maybe<bool> ExpandBuffer(size_t required_capacity);
@@ -129,10 +131,12 @@ class ValueSerializer {
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
Maybe<bool> WriteJSError(Handle<JSObject> error) V8_WARN_UNUSED_RESULT;
+#if V8_ENABLE_WEBASSEMBLY
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
V8_WARN_UNUSED_RESULT;
+#endif // V8_ENABLE_WEBASSEMBLY
Maybe<bool> WriteHostObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
/*
@@ -180,6 +184,7 @@ class ValueDeserializer {
public:
ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
v8::ValueDeserializer::Delegate* delegate);
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
~ValueDeserializer();
ValueDeserializer(const ValueDeserializer&) = delete;
ValueDeserializer& operator=(const ValueDeserializer&) = delete;
@@ -228,6 +233,8 @@ class ValueDeserializer {
bool ReadRawBytes(size_t length, const void** data) V8_WARN_UNUSED_RESULT;
private:
+ friend class WebSnapshotDeserializer;
+
// Reading the wire format.
Maybe<SerializationTag> PeekTag() const V8_WARN_UNUSED_RESULT;
void ConsumeTag(SerializationTag peeked_tag);
@@ -274,8 +281,10 @@ class ValueDeserializer {
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
MaybeHandle<Object> ReadJSError() V8_WARN_UNUSED_RESULT;
+#if V8_ENABLE_WEBASSEMBLY
MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
+#endif // V8_ENABLE_WEBASSEMBLY
MaybeHandle<JSObject> ReadHostObject() V8_WARN_UNUSED_RESULT;
/*
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index 8ae05388491..1111bf25129 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -23,7 +23,7 @@ class CodeDataContainer;
V(kStrongRootList, "(Strong roots)") \
V(kSmiRootList, "(Smi roots)") \
V(kBootstrapper, "(Bootstrapper)") \
- V(kTop, "(Isolate)") \
+ V(kStackRoots, "(Stack roots)") \
V(kRelocatable, "(Relocatable)") \
V(kDebug, "(Debugger)") \
V(kCompilationCache, "(Compilation cache)") \
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 9d54af5f2d6..a0077986c61 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -1,7 +1,5 @@
-adamk@chromium.org
gsathya@chromium.org
leszeks@chromium.org
-littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 5e8d79028f3..69d18ef2b2b 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -36,7 +36,6 @@ UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
isolate->NeedsDetailedOptimizedCodeLineInfo());
set_allow_harmony_top_level_await(FLAG_harmony_top_level_await);
- set_allow_harmony_logical_assignment(FLAG_harmony_logical_assignment);
}
// static
@@ -50,7 +49,9 @@ UnoptimizedCompileFlags UnoptimizedCompileFlags::ForFunctionCompile(
flags.SetFlagsForFunctionFromScript(script);
flags.set_allow_lazy_parsing(true);
+#if V8_ENABLE_WEBASSEMBLY
flags.set_is_asm_wasm_broken(shared.is_asm_wasm_broken());
+#endif // V8_ENABLE_WEBASSEMBLY
flags.set_is_repl_mode(shared.is_repl_mode());
// CollectTypeProfile uses its own feedback slots. If we have existing
@@ -194,7 +195,9 @@ ParseInfo::ParseInfo(const UnoptimizedCompileFlags flags,
source_range_map_(nullptr),
literal_(nullptr),
allow_eval_cache_(false),
+#if V8_ENABLE_WEBASSEMBLY
contains_asm_module_(false),
+#endif // V8_ENABLE_WEBASSEMBLY
language_mode_(flags.outer_language_mode()) {
if (flags.block_coverage_enabled()) {
AllocateSourceRangeMap();
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 6fc5e3163eb..2068847efbc 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -63,8 +63,7 @@ class Zone;
V(is_oneshot_iife, bool, 1, _) \
V(collect_source_positions, bool, 1, _) \
V(allow_harmony_top_level_await, bool, 1, _) \
- V(is_repl_mode, bool, 1, _) \
- V(allow_harmony_logical_assignment, bool, 1, _)
+ V(is_repl_mode, bool, 1, _)
class V8_EXPORT_PRIVATE UnoptimizedCompileFlags {
public:
@@ -255,8 +254,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
// Accessor methods for output flags.
bool allow_eval_cache() const { return allow_eval_cache_; }
void set_allow_eval_cache(bool value) { allow_eval_cache_ = value; }
+
+#if V8_ENABLE_WEBASSEMBLY
bool contains_asm_module() const { return contains_asm_module_; }
void set_contains_asm_module(bool value) { contains_asm_module_ = value; }
+#endif // V8_ENABLE_WEBASSEMBLY
+
LanguageMode language_mode() const { return language_mode_; }
void set_language_mode(LanguageMode value) { language_mode_ = value; }
@@ -348,7 +351,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
bool allow_eval_cache_ : 1;
+#if V8_ENABLE_WEBASSEMBLY
bool contains_asm_module_ : 1;
+#endif // V8_ENABLE_WEBASSEMBLY
LanguageMode language_mode_ : 1;
};
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index be96f61eeb9..db0966803b1 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -1186,7 +1186,6 @@ class ParserBase {
BlockT ParseClassStaticBlock(ClassInfo* class_info);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
ParsePropertyInfo* prop_info, bool* has_seen_proto);
- // TODO(syg): Remove has_spread once SpreadCallNew is removed.
void ParseArguments(
ExpressionListT* args, bool* has_spread,
ParsingArrowHeadFlag maybe_arrow = kCertainlyNotArrowHead);
@@ -1453,8 +1452,9 @@ class ParserBase {
// Convenience method which determines the type of return statement to emit
// depending on the current function type.
- inline StatementT BuildReturnStatement(ExpressionT expr, int pos,
- int end_pos = kNoSourcePosition) {
+ inline StatementT BuildReturnStatement(
+ ExpressionT expr, int pos,
+ int end_pos = ReturnStatement::kFunctionLiteralReturnPosition) {
if (impl()->IsNull(expr)) {
expr = factory()->NewUndefinedLiteral(kNoSourcePosition);
} else if (is_async_generator()) {
@@ -2852,10 +2852,6 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
Token::Value op = peek();
if (!Token::IsArrowOrAssignmentOp(op)) return expression;
- if (Token::IsLogicalAssignmentOp(op) &&
- !flags().allow_harmony_logical_assignment()) {
- return expression;
- }
// Arrow functions.
if (V8_UNLIKELY(op == Token::ARROW)) {
@@ -3568,11 +3564,7 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
bool has_spread;
ParseArguments(&args, &has_spread);
- if (has_spread) {
- result = impl()->SpreadCallNew(result, args, new_pos);
- } else {
- result = factory()->NewCallNew(result, args, new_pos);
- }
+ result = factory()->NewCallNew(result, args, new_pos, has_spread);
}
// The expression can still continue with . or [ after the arguments.
return ParseMemberExpressionContinuation(result);
@@ -3586,7 +3578,7 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
// NewExpression without arguments.
ExpressionListT args(pointer_buffer());
- return factory()->NewCallNew(result, args, new_pos);
+ return factory()->NewCallNew(result, args, new_pos, false);
}
template <typename Impl>
@@ -5032,14 +5024,18 @@ void ParserBase<Impl>::ParseStatementList(StatementListT* body,
while (peek() == Token::STRING) {
bool use_strict = false;
+#if V8_ENABLE_WEBASSEMBLY
bool use_asm = false;
+#endif // V8_ENABLE_WEBASSEMBLY
Scanner::Location token_loc = scanner()->peek_location();
if (scanner()->NextLiteralExactlyEquals("use strict")) {
use_strict = true;
+#if V8_ENABLE_WEBASSEMBLY
} else if (scanner()->NextLiteralExactlyEquals("use asm")) {
use_asm = true;
+#endif // V8_ENABLE_WEBASSEMBLY
}
StatementT stat = ParseStatementListItem();
@@ -5061,9 +5057,11 @@ void ParserBase<Impl>::ParseStatementList(StatementListT* body,
"use strict");
return;
}
+#if V8_ENABLE_WEBASSEMBLY
} else if (use_asm) {
// Directive "use asm".
impl()->SetAsmModule();
+#endif // V8_ENABLE_WEBASSEMBLY
} else {
// Possibly an unknown directive.
// Should not change mode, but will increment usage counters
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 4e9428c1c38..92e11f6b9cc 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -490,12 +490,14 @@ void Parser::DeserializeScopeChain(
namespace {
void MaybeResetCharacterStream(ParseInfo* info, FunctionLiteral* literal) {
+#if V8_ENABLE_WEBASSEMBLY
// Don't reset the character stream if there is an asm.js module since it will
// be used again by the asm-parser.
if (info->contains_asm_module()) {
if (FLAG_stress_validate_asm) return;
if (literal != nullptr && literal->scope()->ContainsAsmModule()) return;
}
+#endif // V8_ENABLE_WEBASSEMBLY
info->ResetCharacterStream();
}
@@ -751,8 +753,8 @@ void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
kNoSourcePosition, FunctionSyntaxKind::kWrapped, LanguageMode::kSloppy,
arguments_for_wrapped_function);
- Statement* return_statement = factory()->NewReturnStatement(
- function_literal, kNoSourcePosition, kNoSourcePosition);
+ Statement* return_statement =
+ factory()->NewReturnStatement(function_literal, kNoSourcePosition);
body->Add(return_statement);
}
@@ -1998,8 +2000,8 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
Expression* reject_call = factory()->NewCallRuntime(
Runtime::kInlineAsyncGeneratorReject, reject_args, kNoSourcePosition);
- catch_block = IgnoreCompletion(
- factory()->NewReturnStatement(reject_call, kNoSourcePosition));
+ catch_block = IgnoreCompletion(factory()->NewReturnStatement(
+ reject_call, kNoSourcePosition, kNoSourcePosition));
}
{
@@ -2866,8 +2868,8 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block,
reject_promise = factory()->NewCallRuntime(
Runtime::kInlineAsyncFunctionReject, args, kNoSourcePosition);
}
- Block* catch_block = IgnoreCompletion(
- factory()->NewReturnStatement(reject_promise, kNoSourcePosition));
+ Block* catch_block = IgnoreCompletion(factory()->NewReturnStatement(
+ reject_promise, kNoSourcePosition, kNoSourcePosition));
// Treat the exception for REPL mode scripts as UNCAUGHT. This will
// keep the corresponding JSMessageObject alive on the Isolate. The
@@ -3343,19 +3345,6 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
}
}
-namespace {
-
-bool OnlyLastArgIsSpread(const ScopedPtrList<Expression>& args) {
- for (int i = 0; i < args.length() - 1; i++) {
- if (args.at(i)->IsSpread()) {
- return false;
- }
- }
- return args.at(args.length() - 1)->IsSpread();
-}
-
-} // namespace
-
ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
const ScopedPtrList<Expression>& list) {
// If there's only a single spread argument, a fast path using CallWithSpread
@@ -3372,21 +3361,6 @@ ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
return factory()->NewArrayLiteral(list, first_spread, kNoSourcePosition);
}
-Expression* Parser::SpreadCallNew(Expression* function,
- const ScopedPtrList<Expression>& args_list,
- int pos) {
- // TODO(syg): Handle all spread cases in BytecodeGenerator.
- if (OnlyLastArgIsSpread(args_list)) {
- // Handle in BytecodeGenerator.
- return factory()->NewCallNew(function, args_list, pos);
- }
- ScopedPtrList<Expression> args(pointer_buffer());
- args.Add(function);
- args.Add(ArrayLiteralFromListWithSpread(args_list));
-
- return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
-}
-
void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
v8::Isolate::UseCounterFeature feature;
if (is_sloppy(mode))
@@ -3399,6 +3373,7 @@ void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
}
+#if V8_ENABLE_WEBASSEMBLY
void Parser::SetAsmModule() {
// Store the usage count; The actual use counter on the isolate is
// incremented after parsing is done.
@@ -3407,6 +3382,7 @@ void Parser::SetAsmModule() {
scope()->AsDeclarationScope()->set_is_asm_module();
info_->set_contains_asm_module(true);
}
+#endif // V8_ENABLE_WEBASSEMBLY
Expression* Parser::ExpressionListToExpression(
const ScopedPtrList<Expression>& args) {
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 027867584b6..4ede3035036 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -493,12 +493,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ArrayLiteral* ArrayLiteralFromListWithSpread(
const ScopedPtrList<Expression>& list);
- Expression* SpreadCallNew(Expression* function,
- const ScopedPtrList<Expression>& args, int pos);
Expression* RewriteSuperCall(Expression* call_expression);
void SetLanguageMode(Scope* scope, LanguageMode mode);
+#if V8_ENABLE_WEBASSEMBLY
void SetAsmModule();
+#endif // V8_ENABLE_WEBASSEMBLY
Expression* RewriteSpreads(ArrayLiteral* lit);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index cee84c1944a..93f5dc5cf21 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -662,7 +662,7 @@ class PreParserFactory {
}
PreParserExpression NewCallNew(const PreParserExpression& expression,
const PreParserExpressionList& arguments,
- int pos) {
+ int pos, bool has_spread) {
return PreParserExpression::Default();
}
PreParserStatement NewReturnStatement(
@@ -1050,10 +1050,6 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void SetAsmModule() {}
- V8_INLINE PreParserExpression
- SpreadCallNew(const PreParserExpression& function,
- const PreParserExpressionList& args, int pos);
-
V8_INLINE void PrepareGeneratorVariables() {}
V8_INLINE void RewriteAsyncFunctionBody(
const PreParserScopedStatementList* body, PreParserStatement block,
@@ -1678,12 +1674,6 @@ class PreParser : public ParserBase<PreParser> {
std::vector<void*> preparse_data_builder_buffer_;
};
-PreParserExpression PreParser::SpreadCallNew(
- const PreParserExpression& function, const PreParserExpressionList& args,
- int pos) {
- return factory()->NewCallNew(function, args, pos);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 7eedc0c05c8..434b83676c0 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -412,6 +412,104 @@ bool BufferedUtf16CharacterStream::ReadBlock() {
}
// ----------------------------------------------------------------------------
+// Windows1252CharacterStream - chunked streaming of windows-1252 data.
+//
+// Similar to BufferedCharacterStream, but does the translation of
+// windows-1252 that are incompatible with their latin-1 equivalents.
+
+namespace {
+
+static const uc16 kWindows1252ToUC16[256] = {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, // 00-07
+ 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, // 08-0F
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, // 10-17
+ 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, // 18-1F
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, // 20-27
+ 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, // 28-2F
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, // 30-37
+ 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, // 38-3F
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, // 40-47
+ 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F, // 48-4F
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, // 50-57
+ 0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, // 58-5F
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, // 60-67
+ 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, // 68-6F
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, // 70-77
+ 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, // 78-7F
+ 0x20AC, 0x0081, 0x201A, 0x0192, 0x201E, 0x2026, 0x2020, 0x2021, // 80-87
+ 0x02C6, 0x2030, 0x0160, 0x2039, 0x0152, 0x008D, 0x017D, 0x008F, // 88-8F
+ 0x0090, 0x2018, 0x2019, 0x201C, 0x201D, 0x2022, 0x2013, 0x2014, // 90-97
+ 0x02DC, 0x2122, 0x0161, 0x203A, 0x0153, 0x009D, 0x017E, 0x0178, // 98-9F
+ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, // A0-A7
+ 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, // A8-AF
+ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, // B0-B7
+ 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, // B8-BF
+ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7, // C0-C7
+ 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, // C8-CF
+ 0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, // D0-D7
+ 0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF, // D8-DF
+ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, // E0-E7
+ 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, // E8-EF
+ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, // F0-F7
+ 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF // F8-FF
+};
+
+} // namespace
+
+class Windows1252CharacterStream final : public Utf16CharacterStream {
+ public:
+ Windows1252CharacterStream(
+ size_t pos, ScriptCompiler::ExternalSourceStream* source_stream)
+ : byte_stream_(source_stream) {
+ buffer_pos_ = pos;
+ }
+
+ bool can_be_cloned() const final {
+ return ChunkedStream<uint16_t>::kCanBeCloned;
+ }
+
+ std::unique_ptr<Utf16CharacterStream> Clone() const override {
+ CHECK(can_be_cloned());
+ return std::unique_ptr<Utf16CharacterStream>(
+ new Windows1252CharacterStream(*this));
+ }
+
+ protected:
+ bool ReadBlock() final {
+ size_t position = pos();
+ buffer_pos_ = position;
+ buffer_start_ = &buffer_[0];
+ buffer_cursor_ = buffer_start_;
+
+ DisallowGarbageCollection no_gc;
+ Range<uint8_t> range =
+ byte_stream_.GetDataAt(position, runtime_call_stats(), &no_gc);
+ if (range.length() == 0) {
+ buffer_end_ = buffer_start_;
+ return false;
+ }
+
+ size_t length = std::min({kBufferSize, range.length()});
+ std::transform(range.start, range.start + length, &buffer_[0],
+ [](uint8_t c) { return kWindows1252ToUC16[c]; });
+ buffer_end_ = &buffer_[length];
+ return true;
+ }
+
+ bool can_access_heap() const final {
+ return ChunkedStream<uint8_t>::kCanAccessHeap;
+ }
+
+ private:
+ Windows1252CharacterStream(const Windows1252CharacterStream& other)
+ V8_NOEXCEPT : byte_stream_(other.byte_stream_) {}
+
+ static const size_t kBufferSize = 512;
+ uc16 buffer_[kBufferSize];
+ ChunkedStream<uint8_t> byte_stream_;
+};
+
+// ----------------------------------------------------------------------------
// Utf8ExternalStreamingStream - chunked streaming of Utf-8 data.
//
// This implementation is fairly complex, since data arrives in chunks which
@@ -833,6 +931,9 @@ Utf16CharacterStream* ScannerStream::For(
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
return new BufferedCharacterStream<ChunkedStream>(static_cast<size_t>(0),
source_stream);
+ case v8::ScriptCompiler::StreamedSource::WINDOWS_1252:
+ return new Windows1252CharacterStream(static_cast<size_t>(0),
+ source_stream);
case v8::ScriptCompiler::StreamedSource::UTF8:
return new Utf8ExternalStreamingStream(source_stream);
}
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
index 28a7353ef46..5b1221db01b 100644
--- a/deps/v8/src/profiler/OWNERS
+++ b/deps/v8/src/profiler/OWNERS
@@ -1,4 +1,5 @@
-alph@chromium.org
-petermarshall@chromium.org
+bmeurer@chromium.org
+cbruni@chromium.org
+yangguo@chromium.org
per-file *heap*=ulan@chromium.org
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 3e2e1e56c20..220f879fd66 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -47,13 +47,17 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(instruction_start);
if (entry) {
entry->SetBuiltinId(builtin_id);
- } else if (builtin_id == Builtins::kGenericJSToWasmWrapper) {
+ return;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (builtin_id == Builtins::kGenericJSToWasmWrapper) {
// Make sure to add the generic js-to-wasm wrapper builtin, because that
// one is supposed to show up in profiles.
entry = new CodeEntry(CodeEventListener::BUILTIN_TAG,
Builtins::name(builtin_id));
code_map->AddCode(instruction_start, entry, instruction_size);
}
+#endif // V8_ENABLE_WEBASSEMBLY
}
TickSample* SamplingEventsProcessor::StartTickSample() {
@@ -64,8 +68,9 @@ TickSample* SamplingEventsProcessor::StartTickSample() {
return &evt->sample;
}
-void BytecodeFlushEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->ClearCodesInRange(instruction_start, instruction_start + 1);
+void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ bool removed = code_map->RemoveCode(entry);
+ CHECK(removed);
}
void SamplingEventsProcessor::FinishTickSample() {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a161f4bce42..eba513b39d5 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -20,7 +20,10 @@
#include "src/profiler/profiler-stats.h"
#include "src/profiler/symbolizer.h"
#include "src/utils/locked-queue-inl.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -72,7 +75,9 @@ ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
profiler_count++;
isolate_->set_num_cpu_profilers(profiler_count);
isolate_->set_is_profiling(true);
+#if V8_ENABLE_WEBASSEMBLY
isolate_->wasm_engine()->EnableCodeLogging(isolate_);
+#endif // V8_ENABLE_WEBASSEMBLY
Logger* logger = isolate_->logger();
logger->AddCodeEventListener(listener_);
@@ -196,7 +201,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_CREATION:
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
- case CodeEventRecord::BYTECODE_FLUSH:
+ case CodeEventRecord::CODE_DELETE:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
@@ -323,12 +328,16 @@ void* SamplingEventsProcessor::operator new(size_t size) {
void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate)
- : isolate_(isolate), code_map_(strings_), processor_(nullptr) {
+ : isolate_(isolate),
+ code_map_(strings_),
+ weak_code_registry_(isolate),
+ processor_(nullptr) {
CreateEntriesForRuntimeCallStats();
LogBuiltins();
}
void ProfilerCodeObserver::ClearCodeMap() {
+ weak_code_registry_.Clear();
code_map_.Clear();
// We don't currently expect any references to refcounted strings to be
// maintained with zero profiles after the code map is cleared.
@@ -508,9 +517,9 @@ void CpuProfiler::EnableLogging() {
if (profiling_scope_) return;
if (!profiler_listener_) {
- profiler_listener_.reset(
- new ProfilerListener(isolate_, code_observer_.get(),
- *code_observer_->strings(), naming_mode_));
+ profiler_listener_.reset(new ProfilerListener(
+ isolate_, code_observer_.get(), *code_observer_->strings(),
+ *code_observer_->weak_code_registry(), naming_mode_));
}
profiling_scope_.reset(
new ProfilingScope(isolate_, profiler_listener_.get()));
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 25084a42657..d605a8c3d3d 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -35,7 +35,7 @@ class Symbolizer;
V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
V(CODE_DEOPT, CodeDeoptEventRecord) \
V(REPORT_BUILTIN, ReportBuiltinEventRecord) \
- V(BYTECODE_FLUSH, BytecodeFlushEventRecord)
+ V(CODE_DELETE, CodeDeleteEventRecord)
class CodeEventRecord {
public:
@@ -112,9 +112,9 @@ class TickSampleEventRecord {
TickSample sample;
};
-class BytecodeFlushEventRecord : public CodeEventRecord {
+class CodeDeleteEventRecord : public CodeEventRecord {
public:
- Address instruction_start;
+ CodeEntry* entry;
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
@@ -255,6 +255,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
CodeMap* code_map() { return &code_map_; }
StringsStorage* strings() { return &strings_; }
+ WeakCodeRegistry* weak_code_registry() { return &weak_code_registry_; }
void ClearCodeMap();
@@ -279,6 +280,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
Isolate* const isolate_;
StringsStorage strings_;
CodeMap code_map_;
+ WeakCodeRegistry weak_code_registry_;
ProfilerEventsProcessor* processor_;
};
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index e62f0b22e78..8a7ed34d46e 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -8,6 +8,7 @@
#include "src/debug/debug.h"
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/sampling-heap-profiler.h"
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 436dbe77976..9cc26fa3e20 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -635,7 +635,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
} else if (object.IsContext()) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
} else if (object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsByteArray() || object.IsScopeInfo()) {
+ object.IsByteArray()) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object.IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
@@ -1078,7 +1078,7 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
Map::kTransitionsOrPrototypeInfoOffset);
}
}
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors();
TagObject(descriptors, "(map descriptors)");
SetInternalReference(entry, "descriptors", descriptors,
Map::kInstanceDescriptorsOffset);
@@ -1184,10 +1184,17 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
TagObject(code.deoptimization_data(), "(code deopt data)");
SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
Code::kDeoptimizationDataOffset);
- TagObject(code.source_position_table(), "(source position table)");
- SetInternalReference(entry, "source_position_table",
- code.source_position_table(),
- Code::kSourcePositionTableOffset);
+ if (code.kind() == CodeKind::BASELINE) {
+ TagObject(code.bytecode_offset_table(), "(bytecode offset table)");
+ SetInternalReference(entry, "bytecode_offset_table",
+ code.bytecode_offset_table(),
+ Code::kPositionTableOffset);
+ } else {
+ TagObject(code.source_position_table(), "(source position table)");
+ SetInternalReference(entry, "source_position_table",
+ code.source_position_table(),
+ Code::kPositionTableOffset);
+ }
}
void V8HeapExplorer::ExtractCellReferences(HeapEntry* entry, Cell cell) {
@@ -1333,7 +1340,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
HeapEntry* entry) {
Isolate* isolate = js_obj.GetIsolate();
if (js_obj.HasFastProperties()) {
- DescriptorArray descs = js_obj.map().instance_descriptors(kRelaxedLoad);
+ DescriptorArray descs = js_obj.map().instance_descriptors(isolate);
for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
@@ -1370,8 +1377,12 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
PropertyDetails details = cell.property_details();
SetDataOrAccessorPropertyReference(details.kind(), entry, name, value);
}
- } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- OrderedNameDictionary dictionary = js_obj.property_dictionary_ordered();
+ } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ // SwissNameDictionary::IterateEntries creates a Handle, which should not
+ // leak out of here.
+ HandleScope scope(isolate);
+
+ SwissNameDictionary dictionary = js_obj.property_dictionary_swiss();
ReadOnlyRoots roots(isolate);
for (InternalIndex i : dictionary.IterateEntries()) {
Object k = dictionary.KeyAt(i);
@@ -1497,10 +1508,10 @@ class RootsReferencesExtractor : public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
- IsolateRoot isolate = Isolate::FromHeap(explorer_->heap_);
+ PtrComprCageBase cage_base = Isolate::FromHeap(explorer_->heap_);
for (OffHeapObjectSlot p = start; p < end; ++p) {
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
- p.load(isolate));
+ p.load(cage_base));
}
}
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 9f150f1e48f..375079de3e8 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -314,7 +314,6 @@ CpuProfileNode::SourceType ProfileNode::source_type() const {
case CodeEventListener::SCRIPT_TAG:
case CodeEventListener::LAZY_COMPILE_TAG:
case CodeEventListener::FUNCTION_TAG:
- case CodeEventListener::INTERPRETED_FUNCTION_TAG:
return CpuProfileNode::kScript;
case CodeEventListener::BUILTIN_TAG:
case CodeEventListener::HANDLER_TAG:
@@ -335,7 +334,6 @@ CpuProfileNode::SourceType ProfileNode::source_type() const {
case CodeEventListener::SHARED_FUNC_MOVE_EVENT:
case CodeEventListener::SNAPSHOT_CODE_NAME_EVENT:
case CodeEventListener::TICK_EVENT:
- case CodeEventListener::BYTECODE_FLUSH_EVENT:
case CodeEventListener::NUMBER_OF_LOG_EVENTS:
return CpuProfileNode::kInternal;
}
@@ -756,8 +754,24 @@ void CodeMap::Clear() {
}
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
- ClearCodesInRange(addr, addr + size);
code_map_.emplace(addr, CodeEntryMapInfo{entry, size});
+ entry->set_instruction_start(addr);
+}
+
+bool CodeMap::RemoveCode(CodeEntry* entry) {
+ auto range = code_map_.equal_range(entry->instruction_start());
+ for (auto i = range.first; i != range.second; ++i) {
+ if (i->second.entry == entry) {
+ if (!entry->used()) {
+ DeleteCodeEntry(entry);
+ } else {
+ used_entries_.push_back(entry);
+ }
+ code_map_.erase(i);
+ return true;
+ }
+ }
+ return false;
}
void CodeMap::ClearCodesInRange(Address start, Address end) {
@@ -778,6 +792,9 @@ void CodeMap::ClearCodesInRange(Address start, Address end) {
}
CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
+ // Note that an address may correspond to multiple CodeEntry objects. An
+ // arbitrary selection is made (as per multimap spec) in the event of a
+ // collision.
auto it = code_map_.upper_bound(addr);
if (it == code_map_.begin()) return nullptr;
--it;
@@ -791,13 +808,25 @@ CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
void CodeMap::MoveCode(Address from, Address to) {
if (from == to) return;
- auto it = code_map_.find(from);
- if (it == code_map_.end()) return;
- CodeEntryMapInfo info = it->second;
- code_map_.erase(it);
- DCHECK(from + info.size <= to || to + info.size <= from);
- ClearCodesInRange(to, to + info.size);
- code_map_.emplace(to, info);
+
+ auto range = code_map_.equal_range(from);
+ // Instead of iterating until |range.second|, iterate the number of elements.
+ // This is because the |range.second| may no longer be the element past the
+ // end of the equal elements range after insertions.
+ size_t distance = std::distance(range.first, range.second);
+ auto it = range.first;
+ while (distance--) {
+ CodeEntryMapInfo& info = it->second;
+ DCHECK(info.entry);
+ DCHECK_EQ(info.entry->instruction_start(), from);
+ info.entry->set_instruction_start(to);
+
+ DCHECK(from + info.size <= to || to + info.size <= from);
+ code_map_.emplace(to, info);
+ it++;
+ }
+
+ code_map_.erase(range.first, it);
}
void CodeMap::DeleteCodeEntry(CodeEntry* entry) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 44a40074454..551dfdf5917 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -72,6 +72,11 @@ class CodeEntry {
CodeType code_type = CodeType::JS);
CodeEntry(const CodeEntry&) = delete;
CodeEntry& operator=(const CodeEntry&) = delete;
+ ~CodeEntry() {
+ // No alive handles should be associated with the CodeEntry at time of
+ // destruction.
+ DCHECK(!heap_object_location_);
+ }
const char* name() const { return name_; }
const char* resource_name() const { return resource_name_; }
@@ -116,6 +121,13 @@ class CodeEntry {
}
}
+ // Returns the start address of the instruction segment represented by this
+ // CodeEntry. Used as a key in the containing CodeMap.
+ Address instruction_start() const { return instruction_start_; }
+ void set_instruction_start(Address address) { instruction_start_ = address; }
+
+ Address** heap_object_location_address() { return &heap_object_location_; }
+
void FillFunctionInfo(SharedFunctionInfo shared);
void SetBuiltinId(Builtins::Name id);
@@ -214,6 +226,8 @@ class CodeEntry {
int position_;
std::unique_ptr<SourcePositionTable> line_info_;
std::unique_ptr<RareData> rare_data_;
+ Address instruction_start_ = kNullAddress;
+ Address* heap_object_location_ = nullptr;
};
struct CodeEntryAndLineNumber {
@@ -420,9 +434,13 @@ class V8_EXPORT_PRIVATE CodeMap {
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
+ // Attempts to remove the given CodeEntry from the CodeMap.
+ // Returns true iff the entry was found and removed.
+ bool RemoveCode(CodeEntry*);
void ClearCodesInRange(Address start, Address end);
CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
+ size_t size() const { return code_map_.size(); }
void Clear();
@@ -434,7 +452,7 @@ class V8_EXPORT_PRIVATE CodeMap {
void DeleteCodeEntry(CodeEntry*);
- std::map<Address, CodeEntryMapInfo> code_map_;
+ std::multimap<Address, CodeEntryMapInfo> code_map_;
std::deque<CodeEntry*> used_entries_; // Entries that are no longer in the
// map, but used by a profile.
StringsStorage& function_and_resource_names_;
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index a851a970197..8b253fb4729 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -19,7 +19,10 @@
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/utils/vector.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -27,10 +30,12 @@ namespace internal {
ProfilerListener::ProfilerListener(Isolate* isolate,
CodeEventObserver* observer,
StringsStorage& function_and_resource_names,
+ WeakCodeRegistry& weak_code_registry,
CpuProfilingNamingMode naming_mode)
: isolate_(isolate),
observer_(observer),
function_and_resource_names_(function_and_resource_names),
+ weak_code_registry_(weak_code_registry),
naming_mode_(naming_mode) {}
ProfilerListener::~ProfilerListener() = default;
@@ -45,6 +50,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
+ weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -58,6 +64,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
+ weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -72,9 +79,10 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
GetName(InferScriptName(*script_name, *shared)),
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- DCHECK(!code->IsCode());
+ DCHECK_IMPLIES(code->IsCode(), code->kind() == CodeKind::BASELINE);
rec->entry->FillFunctionInfo(*shared);
rec->instruction_size = code->InstructionSize();
+ weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -116,13 +124,18 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin();
- // TODO(v8:11429,cbruni): improve iteration for baseline code
bool is_baseline = abstract_code->kind() == CodeKind::BASELINE;
Handle<ByteArray> source_position_table(
- abstract_code->source_position_table(), isolate_);
+ abstract_code->SourcePositionTable(*shared), isolate_);
+ std::unique_ptr<baseline::BytecodeOffsetIterator> baseline_iterator =
+ nullptr;
if (is_baseline) {
- source_position_table = handle(
- shared->GetBytecodeArray(isolate_).SourcePositionTable(), isolate_);
+ Handle<BytecodeArray> bytecodes(shared->GetBytecodeArray(isolate_),
+ isolate_);
+ Handle<ByteArray> bytecode_offsets(
+ abstract_code->GetCode().bytecode_offset_table(), isolate_);
+ baseline_iterator = std::make_unique<baseline::BytecodeOffsetIterator>(
+ bytecode_offsets, bytecodes);
}
// Add each position to the source position table and store inlining stacks
// for inline positions. We store almost the same information in the
@@ -136,10 +149,9 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
int code_offset = it.code_offset();
if (is_baseline) {
// Use the bytecode offset to calculate pc offset for baseline code.
- // TODO(v8:11429,cbruni): Speed this up.
- code_offset = static_cast<int>(
- abstract_code->GetCode().GetBaselinePCForBytecodeOffset(code_offset,
- false));
+ baseline_iterator->AdvanceToBytecodeOffset(code_offset);
+ code_offset =
+ static_cast<int>(baseline_iterator->current_pc_start_offset());
}
if (inlining_id == SourcePosition::kNotInlined) {
@@ -213,9 +225,11 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->entry->FillFunctionInfo(*shared);
rec->instruction_size = abstract_code->InstructionSize();
+ weak_code_registry_.Track(rec->entry, abstract_code);
DispatchCodeEvent(evt_rec);
}
+#if V8_ENABLE_WEBASSEMBLY
void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
const wasm::WasmCode* code,
wasm::WasmName name,
@@ -224,18 +238,15 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->instruction_start();
- // Wasm modules always have a source URL. Asm.js modules never have one.
- DCHECK_EQ(code->native_module()->module()->origin == wasm::kWasmOrigin,
- source_url != nullptr);
- rec->entry = new CodeEntry(
- tag, GetName(name),
- source_url ? GetName(source_url) : CodeEntry::kEmptyResourceName, 1,
- code_offset + 1, nullptr, true, CodeEntry::CodeType::WASM);
+ rec->entry =
+ new CodeEntry(tag, GetName(name), GetName(source_url), 1, code_offset + 1,
+ nullptr, true, CodeEntry::CodeType::WASM);
rec->entry->set_script_id(script_id);
rec->entry->set_position(code_offset);
rec->instruction_size = code->instructions().length();
DispatchCodeEvent(evt_rec);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void ProfilerListener::CallbackEvent(Handle<Name> name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -278,6 +289,7 @@ void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
+ weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -319,14 +331,16 @@ void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::BytecodeFlushEvent(Address compiled_data_start) {
- CodeEventsContainer evt_rec(CodeEventRecord::BYTECODE_FLUSH);
- BytecodeFlushEventRecord* rec = &evt_rec.BytecodeFlushEventRecord_;
- rec->instruction_start = compiled_data_start + BytecodeArray::kHeaderSize;
+void ProfilerListener::WeakCodeClearEvent() { weak_code_registry_.Sweep(this); }
+void ProfilerListener::OnHeapObjectDeletion(CodeEntry* entry) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DELETE);
+ evt_rec.CodeDeleteEventRecord_.entry = entry;
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::CodeSweepEvent() { weak_code_registry_.Sweep(this); }
+
const char* ProfilerListener::GetName(Vector<const char> name) {
// TODO(all): Change {StringsStorage} to accept non-null-terminated strings.
OwnedVector<char> null_terminated = OwnedVector<char>::New(name.size() + 1);
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index d4fd34a0065..49e7db32baa 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -11,6 +11,7 @@
#include "include/v8-profiler.h"
#include "src/logging/code-events.h"
#include "src/profiler/profile-generator.h"
+#include "src/profiler/weak-code-registry.h"
namespace v8 {
namespace internal {
@@ -24,10 +25,12 @@ class CodeEventObserver {
virtual ~CodeEventObserver() = default;
};
-class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
+class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener,
+ public WeakCodeRegistry::Listener {
public:
ProfilerListener(Isolate*, CodeEventObserver*,
StringsStorage& function_and_resource_names,
+ WeakCodeRegistry& weak_code_registry,
CpuProfilingNamingMode mode = kDebugNaming);
~ProfilerListener() override;
ProfilerListener(const ProfilerListener&) = delete;
@@ -43,9 +46,11 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line, int column) override;
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override;
+#endif // V8_ENABLE_WEBASSEMBLY
void CallbackEvent(Handle<Name> name, Address entry_point) override;
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override;
@@ -62,7 +67,12 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
- void BytecodeFlushEvent(Address compiled_data_start) override;
+ void WeakCodeClearEvent() override;
+
+ void OnHeapObjectDeletion(CodeEntry*) override;
+
+ // Invoked after a mark-sweep cycle.
+ void CodeSweepEvent();
const char* GetName(Name name) {
return function_and_resource_names_.GetName(name);
@@ -92,6 +102,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
Isolate* isolate_;
CodeEventObserver* observer_;
StringsStorage& function_and_resource_names_;
+ WeakCodeRegistry& weak_code_registry_;
const CpuProfilingNamingMode naming_mode_;
};
diff --git a/deps/v8/src/profiler/weak-code-registry.cc b/deps/v8/src/profiler/weak-code-registry.cc
new file mode 100644
index 00000000000..2918e1ca827
--- /dev/null
+++ b/deps/v8/src/profiler/weak-code-registry.cc
@@ -0,0 +1,62 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/weak-code-registry.h"
+
+#include "src/handles/global-handles.h"
+#include "src/objects/instance-type-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void Untrack(CodeEntry* entry) {
+ if (Address** heap_object_location_address =
+ entry->heap_object_location_address()) {
+ GlobalHandles::Destroy(*heap_object_location_address);
+ *heap_object_location_address = nullptr;
+ }
+}
+
+} // namespace
+
+void WeakCodeRegistry::Track(CodeEntry* entry, Handle<AbstractCode> code) {
+ DCHECK(!*entry->heap_object_location_address());
+ DisallowGarbageCollection no_gc;
+ Handle<AbstractCode> handle = isolate_->global_handles()->Create(*code);
+
+ Address** heap_object_location_address =
+ entry->heap_object_location_address();
+ *heap_object_location_address = handle.location();
+ GlobalHandles::MakeWeak(heap_object_location_address);
+
+ entries_.push_back(entry);
+}
+
+void WeakCodeRegistry::Sweep(WeakCodeRegistry::Listener* listener) {
+ std::vector<CodeEntry*> alive_entries;
+ for (CodeEntry* entry : entries_) {
+ // Mark the CodeEntry as being deleted on the heap if the heap object
+ // location was nulled, indicating the object was freed.
+ if (!*entry->heap_object_location_address()) {
+ if (listener) {
+ listener->OnHeapObjectDeletion(entry);
+ }
+ } else {
+ alive_entries.push_back(entry);
+ }
+ }
+ entries_ = std::move(alive_entries);
+}
+
+void WeakCodeRegistry::Clear() {
+ for (CodeEntry* entry : entries_) {
+ Untrack(entry);
+ }
+ entries_.clear();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/weak-code-registry.h b/deps/v8/src/profiler/weak-code-registry.h
new file mode 100644
index 00000000000..5e6cc1a0795
--- /dev/null
+++ b/deps/v8/src/profiler/weak-code-registry.h
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_WEAK_CODE_REGISTRY_H_
+#define V8_PROFILER_WEAK_CODE_REGISTRY_H_
+
+#include <vector>
+
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE WeakCodeRegistry {
+ public:
+ struct Listener {
+ virtual void OnHeapObjectDeletion(CodeEntry* entry) = 0;
+ };
+
+ explicit WeakCodeRegistry(Isolate* isolate) : isolate_(isolate) {}
+ ~WeakCodeRegistry() { Clear(); }
+
+ void Track(CodeEntry* entry, Handle<AbstractCode> code);
+
+ // Removes all dead code objects from the registry, invoking the provided
+ // listener for each new CodeEntry that is no longer referenced on the heap
+ // (if set).
+ void Sweep(Listener* listener);
+
+ // Removes all heap object tracking from stored CodeEntries.
+ void Clear();
+
+ private:
+ Isolate* const isolate_;
+ // Invariant: Entries will always be removed here before the CodeMap is
+ // destroyed. CodeEntries should not be freed while their heap objects exist.
+ std::vector<CodeEntry*> entries_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_WEAK_CODE_REGISTRY_H_
diff --git a/deps/v8/src/protobuf/OWNERS b/deps/v8/src/protobuf/OWNERS
index 507f904088e..7c8128c2f2e 100644
--- a/deps/v8/src/protobuf/OWNERS
+++ b/deps/v8/src/protobuf/OWNERS
@@ -1 +1 @@
-petermarshall@chromium.org
+cbruni@chromium.org
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index 3322bb95056..3279279fb57 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,2 +1,2 @@
jgruber@chromium.org
-yangguo@chromium.org
+pthier@chromium.org
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index d8871904b86..0441fe29763 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -1407,16 +1407,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
ExternalReference::re_check_stack_guard_state(isolate());
__ Mov(scratch, check_stack_guard_state);
- {
- UseScratchRegisterScope temps(masm_);
- Register scratch = temps.AcquireX();
-
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
-
- __ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- __ Call(scratch);
- }
+ __ CallBuiltin(Builtins::kDirectCEntry);
// The input string may have been moved in memory, we need to reload it.
__ Peek(input_start(), kSystemPointerSize);
@@ -1579,6 +1570,8 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
+ // TODO(v8:10026): Remove when we stop compacting for code objects that are
+ // active on the call stack.
__ Pop<TurboAssembler::kAuthLR>(padreg, lr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
diff --git a/deps/v8/src/regexp/regexp-dotprinter.cc b/deps/v8/src/regexp/regexp-dotprinter.cc
index 7cf1e82c4d0..4412960b3dc 100644
--- a/deps/v8/src/regexp/regexp-dotprinter.cc
+++ b/deps/v8/src/regexp/regexp-dotprinter.cc
@@ -13,8 +13,6 @@ namespace internal {
// -------------------------------------------------------------------
// Dot/dotty output
-#ifdef DEBUG
-
class DotPrinterImpl : public NodeVisitor {
public:
explicit DotPrinterImpl(std::ostream& os) : os_(os) {}
@@ -239,14 +237,10 @@ void DotPrinterImpl::VisitAction(ActionNode* that) {
Visit(successor);
}
-#endif // DEBUG
-
void DotPrinter::DotPrint(const char* label, RegExpNode* node) {
-#ifdef DEBUG
StdoutStream os;
DotPrinterImpl printer(os);
printer.PrintNode(label, node);
-#endif // DEBUG
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index d1feec4c33d..73d85d95263 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -13,11 +13,8 @@ namespace internal {
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
Isolate* isolate, RegExpMacroAssembler* assembler)
: RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
- IrregexpImplementation type = assembler->Implementation();
- DCHECK_LT(type, 9);
- const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "S390",
- "PPC", "X64", "X87", "Bytecode"};
- PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
+ PrintF("RegExpMacroAssembler%s();\n",
+ ImplementationToString(assembler->Implementation()));
}
RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() = default;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 3c2c06f64b5..789624299b4 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -39,19 +39,33 @@ class RegExpMacroAssembler {
static constexpr int kUseCharactersValue = -1;
+#define IMPLEMENTATIONS_LIST(V) \
+ V(IA32) \
+ V(ARM) \
+ V(ARM64) \
+ V(MIPS) \
+ V(RISCV) \
+ V(S390) \
+ V(PPC) \
+ V(X64) \
+ V(Bytecode)
+
enum IrregexpImplementation {
- kIA32Implementation,
- kARMImplementation,
- kARM64Implementation,
- kMIPSImplementation,
- kRISCVImplementation,
- kS390Implementation,
- kPPCImplementation,
- kX64Implementation,
- kX87Implementation,
- kBytecodeImplementation
+#define V(Name) k##Name##Implementation,
+ IMPLEMENTATIONS_LIST(V)
+#undef V
};
+ inline const char* ImplementationToString(IrregexpImplementation impl) {
+ static const char* const kNames[] = {
+#define V(Name) #Name,
+ IMPLEMENTATIONS_LIST(V)
+#undef V
+ };
+ return kNames[impl];
+ }
+#undef IMPLEMENTATIONS_LIST
+
enum StackCheckFlag {
kNoStackLimitCheck = false,
kCheckStackLimit = true
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 9a80f6f211f..6d73b7c03d6 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -12,8 +12,7 @@ namespace internal {
RegExpStackScope::RegExpStackScope(Isolate* isolate)
: regexp_stack_(isolate->regexp_stack()) {
- // Initialize, if not already initialized.
- regexp_stack_->EnsureCapacity(0);
+ DCHECK(regexp_stack_->IsValid());
// Irregexp is not reentrant in several ways; in particular, the
// RegExpStackScope is not reentrant since the destructor frees allocated
// memory. Protect against reentrancy here.
@@ -80,8 +79,8 @@ void RegExpStack::ThreadLocal::FreeAndInvalidate() {
Address RegExpStack::EnsureCapacity(size_t size) {
if (size > kMaximumStackSize) return kNullAddress;
- if (size < kMinimumDynamicStackSize) size = kMinimumDynamicStackSize;
if (thread_local_.memory_size_ < size) {
+ if (size < kMinimumDynamicStackSize) size = kMinimumDynamicStackSize;
byte* new_memory = NewArray<byte>(size);
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index d527f0e0fd3..adca683ff89 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -133,6 +133,9 @@ class RegExpStack {
// you have to call EnsureCapacity before using it again.
void Reset();
+ // Whether the ThreadLocal storage has been invalidated.
+ bool IsValid() const { return thread_local_.memory_ != nullptr; }
+
ThreadLocal thread_local_;
Isolate* isolate_;
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 07d1b5d8f38..8bb243d6110 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -173,8 +173,8 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// with the init order in the bootstrapper).
InternalIndex kExecIndex(JSRegExp::kExecFunctionDescriptorIndex);
DCHECK_EQ(*(isolate->factory()->exec_string()),
- proto_map.instance_descriptors(kRelaxedLoad).GetKey(kExecIndex));
- if (proto_map.instance_descriptors(kRelaxedLoad)
+ proto_map.instance_descriptors(isolate).GetKey(kExecIndex));
+ if (proto_map.instance_descriptors(isolate)
.GetDetails(kExecIndex)
.constness() != PropertyConstness::kConst) {
return false;
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 5f83269a8f1..e4ddb3ecb2b 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -6,6 +6,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/execution/interrupts-scope.h"
#include "src/heap/heap-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/regexp/experimental/experimental.h"
@@ -820,6 +821,8 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
return false;
}
+ if (FLAG_trace_regexp_graph) DotPrinter::DotPrint("Start", data->node);
+
// Create the correct assembler for the architecture.
std::unique_ptr<RegExpMacroAssembler> macro_assembler;
if (data->compilation_target == RegExpCompilationTarget::kNative) {
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
index aaffe920bb0..9fa42ec3a0a 100644
--- a/deps/v8/src/roots/OWNERS
+++ b/deps/v8/src/roots/OWNERS
@@ -5,5 +5,4 @@ ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
marja@chromium.org
-sigurds@chromium.org
ulan@chromium.org
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 547cb0cc8ca..64758f5efaf 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -110,7 +110,7 @@ class Symbol;
V(Map, source_text_module_map, SourceTextModuleMap) \
V(Map, swiss_name_dictionary_map, SwissNameDictionaryMap) \
V(Map, synthetic_module_map, SyntheticModuleMap) \
- V(Map, wasm_type_info_map, WasmTypeInfoMap) \
+ IF_WASM(V, Map, wasm_type_info_map, WasmTypeInfoMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
V(Map, ephemeron_hash_table_map, EphemeronHashTableMap) \
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 87456ad3a5f..120f2441a79 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -117,7 +117,7 @@ namespace {
template <typename Dictionary>
Handle<Name> KeyToName(Isolate* isolate, Handle<Object> key) {
- STATIC_ASSERT((std::is_same<Dictionary, OrderedNameDictionary>::value ||
+ STATIC_ASSERT((std::is_same<Dictionary, SwissNameDictionary>::value ||
std::is_same<Dictionary, NameDictionary>::value));
DCHECK(key->IsName());
return Handle<Name>::cast(key);
@@ -190,8 +190,7 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
Isolate* isolate, Handle<Dictionary> dictionary_template) {
Handle<Map> dictionary_map(dictionary_template->map(), isolate);
Handle<Dictionary> dictionary =
- Handle<Dictionary>::cast(isolate->factory()->CopyFixedArrayWithMap(
- dictionary_template, dictionary_map));
+ Dictionary::ShallowCopy(isolate, dictionary_template);
// Clone all AccessorPairs in the dictionary.
for (InternalIndex i : dictionary->IterateEntries()) {
Object value = dictionary->ValueAt(i);
@@ -529,9 +528,9 @@ bool InitClassPrototype(Isolate* isolate,
// Class prototypes do not have a name accessor.
const bool install_name_accessor = false;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> properties_dictionary_template =
- Handle<OrderedNameDictionary>::cast(properties_template);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> properties_dictionary_template =
+ Handle<SwissNameDictionary>::cast(properties_template);
return AddDescriptorsByTemplate(
isolate, map, properties_dictionary_template,
elements_dictionary_template, computed_properties, prototype,
@@ -590,9 +589,9 @@ bool InitClassConstructor(
// All class constructors have a name accessor.
const bool install_name_accessor = true;
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> properties_dictionary_template =
- Handle<OrderedNameDictionary>::cast(properties_template);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> properties_dictionary_template =
+ Handle<SwissNameDictionary>::cast(properties_template);
return AddDescriptorsByTemplate(
isolate, map, properties_dictionary_template,
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 0897f685fcc..090a9261c44 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -204,18 +204,18 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
memory = args.at<JSArrayBuffer>(3);
}
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (shared->HasAsmWasmData()) {
#if V8_ENABLE_WEBASSEMBLY
+ if (shared->HasAsmWasmData()) {
Handle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
isolate, shared, data, stdlib, foreign, memory);
if (!result.is_null()) return *result.ToHandleChecked();
-#endif
// Remove wasm data, mark as broken for asm->wasm, replace function code
// with UncompiledData, and return a smi 0 to indicate failure.
SharedFunctionInfo::DiscardCompiled(isolate, shared);
}
shared->set_is_asm_wasm_broken(true);
+#endif
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index e7ebfa2c189..4ffcaf23978 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -10,19 +10,19 @@
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
-#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
@@ -30,7 +30,11 @@
#include "src/runtime/runtime.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
#include "src/wasm/wasm-objects-inl.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -221,13 +225,16 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
factory->NewJSArrayWithElements(bound_arguments);
result->set(5, *arguments_array);
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSMapIterator()) {
+ }
+ if (object->IsJSMapIterator()) {
Handle<JSMapIterator> iterator = Handle<JSMapIterator>::cast(object);
return GetIteratorInternalProperties(isolate, iterator);
- } else if (object->IsJSSetIterator()) {
+ }
+ if (object->IsJSSetIterator()) {
Handle<JSSetIterator> iterator = Handle<JSSetIterator>::cast(object);
return GetIteratorInternalProperties(isolate, iterator);
- } else if (object->IsJSGeneratorObject()) {
+ }
+ if (object->IsJSGeneratorObject()) {
Handle<JSGeneratorObject> generator =
Handle<JSGeneratorObject>::cast(object);
@@ -257,7 +264,8 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(4, *receiver);
result->set(5, generator->receiver());
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSPromise()) {
+ }
+ if (object->IsJSPromise()) {
Handle<JSPromise> promise = Handle<JSPromise>::cast(object);
const char* status = JSPromise::Status(promise->status());
Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
@@ -276,7 +284,8 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(2, *promise_value);
result->set(3, *value_obj);
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSProxy()) {
+ }
+ if (object->IsJSProxy()) {
Handle<JSProxy> js_proxy = Handle<JSProxy>::cast(object);
Handle<FixedArray> result = factory->NewFixedArray(3 * 2);
@@ -295,7 +304,8 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(4, *is_revoked_str);
result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSPrimitiveWrapper()) {
+ }
+ if (object->IsJSPrimitiveWrapper()) {
Handle<JSPrimitiveWrapper> js_value =
Handle<JSPrimitiveWrapper>::cast(object);
@@ -305,7 +315,8 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(0, *primitive_value);
result->set(1, js_value->value());
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSArrayBuffer()) {
+ }
+ if (object->IsJSArrayBuffer()) {
Handle<JSArrayBuffer> js_array_buffer = Handle<JSArrayBuffer>::cast(object);
if (js_array_buffer->was_detached()) {
// Mark a detached JSArrayBuffer and such and don't even try to
@@ -366,6 +377,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(index++, *buffer_data_str);
result->set(index++, *buffer_data_obj);
+#if V8_ENABLE_WEBASSEMBLY
Handle<Symbol> memory_symbol = factory->array_buffer_wasm_memory_symbol();
Handle<Object> memory_object =
JSObject::GetDataProperty(js_array_buffer, memory_symbol);
@@ -377,15 +389,20 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(index++, *buffer_memory_str);
result->set(index++, *buffer_memory_obj);
}
+#endif // V8_ENABLE_WEBASSEMBLY
return factory->NewJSArrayWithElements(result, PACKED_ELEMENTS, index);
- } else if (object->IsWasmInstanceObject()) {
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (object->IsWasmInstanceObject()) {
return GetWasmInstanceObjectInternalProperties(
Handle<WasmInstanceObject>::cast(object));
- } else if (object->IsWasmModuleObject()) {
+ }
+ if (object->IsWasmModuleObject()) {
return GetWasmModuleObjectInternalProperties(
Handle<WasmModuleObject>::cast(object));
}
+#endif // V8_ENABLE_WEBASSEMBLY
return factory->NewJSArray(0);
}
@@ -563,10 +580,12 @@ namespace {
int ScriptLinePosition(Handle<Script> script, int line) {
if (line < 0) return -1;
+#if V8_ENABLE_WEBASSEMBLY
if (script->type() == Script::TYPE_WASM) {
// Wasm positions are relative to the start of the module.
return 0;
}
+#endif // V8_ENABLE_WEBASSEMBLY
Script::InitLineEnds(script->GetIsolate(), script);
@@ -603,12 +622,16 @@ Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
return isolate->factory()->null_value();
}
+#if V8_ENABLE_WEBASSEMBLY
+ const bool is_wasm_script = script->type() == Script::TYPE_WASM;
+#else
+ const bool is_wasm_script = false;
+#endif // V8_ENABLE_WEBASSEMBLY
Handle<String> sourceText =
- script->type() == Script::TYPE_WASM
- ? isolate->factory()->empty_string()
- : isolate->factory()->NewSubString(
- handle(String::cast(script->source()), isolate),
- info.line_start, info.line_end);
+ is_wasm_script ? isolate->factory()->empty_string()
+ : isolate->factory()->NewSubString(
+ handle(String::cast(script->source()), isolate),
+ info.line_start, info.line_end);
Handle<JSObject> jsinfo =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -918,7 +941,7 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
// Track the embedded blob size as well.
{
- i::EmbeddedData d = i::EmbeddedData::FromBlob();
+ i::EmbeddedData d = i::EmbeddedData::FromBlob(isolate);
PrintF("Embedded blob is %d bytes\n",
static_cast<int>(d.code_size() + d.data_size()));
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 3eebf507f0f..245d1fd77e1 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -337,14 +337,30 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
function->shared().is_compiled_scope(isolate));
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
DCHECK(is_compiled_scope.is_compiled());
- if (FLAG_sparkplug) {
- Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope);
- }
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
function->feedback_vector().set_invocation_count(1);
+ if (FLAG_sparkplug) {
+ if (Compiler::CompileBaseline(isolate, function,
+ Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ if (FLAG_use_osr) {
+ JavaScriptFrameIterator it(isolate);
+ DCHECK(it.frame()->is_unoptimized());
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
+ if (FLAG_trace_osr) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(
+ scope.file(),
+ "[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
+ frame->GetBytecodeOffset());
+ }
+ frame->GetBytecodeArray().set_osr_loop_nesting_level(
+ AbstractCode::kMaxLoopNestingMarker);
+ }
+ }
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
{
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index d0ca45d15a7..5ee7bcd4c53 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -111,8 +111,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (!copy->IsJSArray(isolate)) {
if (copy->HasFastProperties(isolate)) {
Handle<DescriptorArray> descriptors(
- copy->map(isolate).instance_descriptors(isolate, kRelaxedLoad),
- isolate);
+ copy->map(isolate).instance_descriptors(isolate), isolate);
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
DCHECK_EQ(kField, details.location());
@@ -133,9 +132,9 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
} else {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dict(
- copy->property_dictionary_ordered(isolate), isolate);
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dict(
+ copy->property_dictionary_swiss(isolate), isolate);
for (InternalIndex i : dict->IterateEntries()) {
Object raw = dict->ValueAt(i);
if (!raw.IsJSObject(isolate)) continue;
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index af7a26e869a..a98ad2e6dc4 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -18,6 +18,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
@@ -94,6 +95,51 @@ MaybeHandle<Object> Runtime::HasProperty(Isolate* isolate,
namespace {
+void GeneralizeAllTransitionsToFieldAsMutable(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name) {
+ InternalIndex descriptor(map->NumberOfOwnDescriptors());
+
+ Handle<Map> target_maps[kPropertyAttributesCombinationsCount];
+ int target_maps_count = 0;
+
+ // Collect all outgoing field transitions.
+ {
+ DisallowGarbageCollection no_gc;
+ TransitionsAccessor transitions(isolate, *map, &no_gc);
+ transitions.ForEachTransitionTo(
+ *name,
+ [&](Map target) {
+ DCHECK_EQ(descriptor, target.LastAdded());
+ DCHECK_EQ(*name, target.GetLastDescriptorName(isolate));
+ PropertyDetails details = target.GetLastDescriptorDetails(isolate);
+ // Currently, we track constness only for fields.
+ if (details.kind() == kData &&
+ details.constness() == PropertyConstness::kConst) {
+ target_maps[target_maps_count++] = handle(target, isolate);
+ }
+ DCHECK_IMPLIES(details.kind() == kAccessor,
+ details.constness() == PropertyConstness::kConst);
+ },
+ &no_gc);
+ CHECK_LE(target_maps_count, kPropertyAttributesCombinationsCount);
+ }
+
+ for (int i = 0; i < target_maps_count; i++) {
+ Handle<Map> target = target_maps[i];
+ PropertyDetails details =
+ target->instance_descriptors(isolate).GetDetails(descriptor);
+ Handle<FieldType> field_type(
+ target->instance_descriptors(isolate).GetFieldType(descriptor),
+ isolate);
+ Map::GeneralizeField(isolate, target, descriptor,
+ PropertyConstness::kMutable, details.representation(),
+ field_type);
+ DCHECK_EQ(PropertyConstness::kMutable, target->instance_descriptors(isolate)
+ .GetDetails(descriptor)
+ .constness());
+ }
+}
+
bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> raw_key) {
// This implements a special case for fast property deletion: when the
@@ -103,6 +149,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// (1) The receiver must be a regular object and the key a unique name.
Handle<Map> receiver_map(receiver->map(), isolate);
if (receiver_map->IsSpecialReceiverMap()) return false;
+ DCHECK(receiver_map->IsJSObjectMap());
+
if (!raw_key->IsUniqueName()) return false;
Handle<Name> key = Handle<Name>::cast(raw_key);
// (2) The property to be deleted must be the last property.
@@ -110,7 +158,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
if (nof == 0) return false;
InternalIndex descriptor(nof - 1);
Handle<DescriptorArray> descriptors(
- receiver_map->instance_descriptors(kRelaxedLoad), isolate);
+ receiver_map->instance_descriptors(isolate), isolate);
if (descriptors->GetKey(descriptor) != *key) return false;
// (3) The property to be deleted must be deletable.
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -125,26 +173,6 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Preconditions successful. No more bailouts after this point.
- // If the {descriptor} was "const" so far, we need to update the
- // {receiver_map} here, otherwise we could get the constants wrong, i.e.
- //
- // o.x = 1;
- // delete o.x;
- // o.x = 2;
- //
- // could trick V8 into thinking that `o.x` is still 1 even after the second
- // assignment.
- if (details.constness() == PropertyConstness::kConst &&
- details.location() == kField) {
- Handle<FieldType> field_type(descriptors->GetFieldType(descriptor),
- isolate);
- Map::GeneralizeField(isolate, receiver_map, descriptor,
- PropertyConstness::kMutable, details.representation(),
- field_type);
- DCHECK_EQ(PropertyConstness::kMutable,
- descriptors->GetDetails(descriptor).constness());
- }
-
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
if (details.location() == kField) {
@@ -191,6 +219,30 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
receiver->HeapObjectVerify(isolate);
receiver->property_array().PropertyArrayVerify(isolate);
#endif
+
+ // If the {descriptor} was "const" so far, we need to update the
+ // {receiver_map} here, otherwise we could get the constants wrong, i.e.
+ //
+ // o.x = 1;
+ // [change o.x's attributes or reconfigure property kind]
+ // delete o.x;
+ // o.x = 2;
+ //
+ // could trick V8 into thinking that `o.x` is still 1 even after the second
+ // assignment.
+
+ // Step 1: Migrate object to an up-to-date shape.
+ if (parent_map->is_deprecated()) {
+ JSObject::MigrateInstance(isolate, Handle<JSObject>::cast(receiver));
+ parent_map = handle(receiver->map(), isolate);
+ }
+
+ // Step 2: Mark outgoing transitions from the up-to-date version of the
+ // parent_map to same property name of any kind or attributes as mutable.
+ // Also migrate object to the up-to-date map to make the object shapes
+ // converge sooner.
+ GeneralizeAllTransitionsToFieldAsMutable(isolate, parent_map, key);
+
return true;
}
@@ -324,7 +376,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
Map map = js_obj->map();
if (!map.IsJSGlobalProxyMap() &&
- (key.is_element() && key.index() <= JSArray::kMaxArrayIndex
+ (key.is_element() && key.index() <= JSObject::kMaxElementIndex
? !map.has_indexed_interceptor()
: !map.has_named_interceptor())) {
return ReadOnlyRoots(isolate).false_value();
@@ -400,12 +452,11 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
PropertyDetails property_details(
kData, NONE, PropertyDetails::kConstIfDictConstnessTracking);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- receiver->property_dictionary_ordered(), isolate);
- dictionary = OrderedNameDictionary::Add(isolate, dictionary, name, value,
- property_details)
- .ToHandleChecked();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ Handle<SwissNameDictionary> dictionary(
+ receiver->property_dictionary_swiss(), isolate);
+ dictionary = SwissNameDictionary::Add(isolate, dictionary, name, value,
+ property_details);
receiver->SetProperties(*dictionary);
} else {
Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
@@ -680,9 +731,8 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
}
} else if (!holder->HasFastProperties()) {
// Attempt dictionary lookup.
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- OrderedNameDictionary dictionary =
- holder->property_dictionary_ordered();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ SwissNameDictionary dictionary = holder->property_dictionary_swiss();
InternalIndex entry = dictionary.FindEntry(isolate, *key);
if (entry.is_found() &&
(dictionary.DetailsAt(entry).kind() == kData)) {
@@ -810,24 +860,20 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
static_cast<LanguageMode>(language_mode));
}
-RUNTIME_FUNCTION(Runtime_ShrinkPropertyDictionary) {
+RUNTIME_FUNCTION(Runtime_ShrinkNameDictionary) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- Handle<OrderedNameDictionary> dictionary(
- receiver->property_dictionary_ordered(), isolate);
- Handle<OrderedNameDictionary> new_properties =
- OrderedNameDictionary::Shrink(isolate, dictionary);
- receiver->SetProperties(*new_properties);
- } else {
- Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
- Handle<NameDictionary> new_properties =
- NameDictionary::Shrink(isolate, dictionary);
- receiver->SetProperties(*new_properties);
- }
+ CONVERT_ARG_HANDLE_CHECKED(NameDictionary, dictionary, 0);
+
+ return *NameDictionary::Shrink(isolate, dictionary);
+}
+
+RUNTIME_FUNCTION(Runtime_ShrinkSwissNameDictionary) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, dictionary, 0);
- return Smi::zero();
+ return *SwissNameDictionary::Shrink(isolate, dictionary);
}
// ES6 section 12.9.3, operator in.
@@ -1365,5 +1411,122 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
return ReadOnlyRoots(isolate).undefined_value();
}
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableAllocate) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, at_least_space_for, 0);
+
+ return *isolate->factory()->NewSwissNameDictionary(
+ at_least_space_for->value(), AllocationType::kYoung);
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableAdd) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, details_smi, 3);
+
+ DCHECK(key->IsUniqueName());
+
+ return *SwissNameDictionary::Add(isolate, table, key, value,
+ PropertyDetails{*details_smi});
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableFindEntry) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+
+ InternalIndex index = table->FindEntry(isolate, *key);
+ return Smi::FromInt(index.is_found()
+ ? index.as_int()
+ : SwissNameDictionary::kNotFoundSentinel);
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableUpdate) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, details_smi, 3);
+
+ InternalIndex i(Smi::ToInt(*index));
+
+ table->ValueAtPut(i, *value);
+ table->DetailsAtPut(i, PropertyDetails{*details_smi});
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableDelete) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
+
+ InternalIndex i(Smi::ToInt(*entry));
+
+ return *SwissNameDictionary::DeleteEntry(isolate, table, i);
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableEquals) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, other, 1);
+
+ return Smi::FromInt(table->EqualsForTesting(*other));
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableElementsCount) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+
+ return Smi::FromInt(table->NumberOfElements());
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableKeyAt) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
+
+ return table->KeyAt(InternalIndex(Smi::ToInt(*entry)));
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableValueAt) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
+
+ return table->ValueAt(InternalIndex(Smi::ToInt(*entry)));
+}
+
+// TODO(v8:11330) This is only here while the CSA/Torque implementaton of
+// SwissNameDictionary is work in progress.
+RUNTIME_FUNCTION(Runtime_SwissTableDetailsAt) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SwissNameDictionary, table, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, entry, 1);
+
+ PropertyDetails d = table->DetailsAt(InternalIndex(Smi::ToInt(*entry)));
+ return d.AsSmi();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 54adc4c920f..c1ee96facca 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -29,8 +29,8 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
// undefined, which we interpret as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
}
- isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
isolate->debug()->OnPromiseReject(rejected_promise, value);
// Report only if we don't actually have a handler.
@@ -142,7 +142,7 @@ Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
// hook for the throwaway promise (passing the {promise} as its
// parent).
Handle<JSPromise> throwaway = isolate->factory()->NewJSPromiseWithoutHook();
- isolate->RunAllPromiseHooks(PromiseHookType::kInit, throwaway, promise);
+ isolate->RunPromiseHook(PromiseHookType::kInit, throwaway, promise);
// On inspector side we capture async stack trace and store it by
// outer_promise->async_task_id when async function is suspended first time.
@@ -204,7 +204,7 @@ RUNTIME_FUNCTION(Runtime_AwaitPromisesInitOld) {
// Fire the init hook for the wrapper promise (that we created for the
// {value} previously).
- isolate->RunAllPromiseHooks(PromiseHookType::kInit, promise, outer_promise);
+ isolate->RunPromiseHook(PromiseHookType::kInit, promise, outer_promise);
return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
reject_handler, is_predicted_as_caught);
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 403d83bef9f..d18602f58f7 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -4,6 +4,7 @@
#include <functional>
+#include "src/base/small-vector.h"
#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
@@ -17,13 +18,21 @@
#include "src/runtime/runtime-utils.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-search.h"
-#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
namespace {
+// Fairly arbitrary, but intended to fit:
+//
+// - captures
+// - results
+// - parsed replacement pattern parts
+//
+// for small, common cases.
+constexpr int kStaticVectorSlots = 8;
+
// Returns -1 for failure.
uint32_t GetArgcForReplaceCallable(uint32_t num_captures,
bool has_named_captures) {
@@ -67,9 +76,6 @@ int LookupNamedCapture(const std::function<bool(String)>& name_matches,
class CompiledReplacement {
public:
- explicit CompiledReplacement(Zone* zone)
- : parts_(zone), replacement_substrings_(zone) {}
-
// Return whether the replacement is simple.
bool Compile(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> replacement, int capture_count,
@@ -143,8 +149,7 @@ class CompiledReplacement {
};
template <typename Char>
- bool ParseReplacementPattern(ZoneChunkList<ReplacementPart>* parts,
- Vector<Char> characters,
+ bool ParseReplacementPattern(Vector<Char> characters,
FixedArray capture_name_map, int capture_count,
int subject_length) {
// Equivalent to String::GetSubstitution, except that this method converts
@@ -164,7 +169,7 @@ class CompiledReplacement {
case '$':
if (i > last) {
// There is a substring before. Include the first "$".
- parts->push_back(
+ parts_.emplace_back(
ReplacementPart::ReplacementSubString(last, next_index));
last = next_index + 1; // Continue after the second "$".
} else {
@@ -175,25 +180,28 @@ class CompiledReplacement {
break;
case '`':
if (i > last) {
- parts->push_back(ReplacementPart::ReplacementSubString(last, i));
+ parts_.emplace_back(
+ ReplacementPart::ReplacementSubString(last, i));
}
- parts->push_back(ReplacementPart::SubjectPrefix());
+ parts_.emplace_back(ReplacementPart::SubjectPrefix());
i = next_index;
last = i + 1;
break;
case '\'':
if (i > last) {
- parts->push_back(ReplacementPart::ReplacementSubString(last, i));
+ parts_.emplace_back(
+ ReplacementPart::ReplacementSubString(last, i));
}
- parts->push_back(ReplacementPart::SubjectSuffix(subject_length));
+ parts_.emplace_back(ReplacementPart::SubjectSuffix(subject_length));
i = next_index;
last = i + 1;
break;
case '&':
if (i > last) {
- parts->push_back(ReplacementPart::ReplacementSubString(last, i));
+ parts_.emplace_back(
+ ReplacementPart::ReplacementSubString(last, i));
}
- parts->push_back(ReplacementPart::SubjectMatch());
+ parts_.emplace_back(ReplacementPart::SubjectMatch());
i = next_index;
last = i + 1;
break;
@@ -226,11 +234,11 @@ class CompiledReplacement {
}
if (capture_ref > 0) {
if (i > last) {
- parts->push_back(
+ parts_.emplace_back(
ReplacementPart::ReplacementSubString(last, i));
}
DCHECK(capture_ref <= capture_count);
- parts->push_back(ReplacementPart::SubjectCapture(capture_ref));
+ parts_.emplace_back(ReplacementPart::SubjectCapture(capture_ref));
last = next_index + 1;
}
i = next_index;
@@ -281,9 +289,10 @@ class CompiledReplacement {
(1 <= capture_index && capture_index <= capture_count));
if (i > last) {
- parts->push_back(ReplacementPart::ReplacementSubString(last, i));
+ parts_.emplace_back(
+ ReplacementPart::ReplacementSubString(last, i));
}
- parts->push_back(
+ parts_.emplace_back(
(capture_index == -1)
? ReplacementPart::EmptyReplacement()
: ReplacementPart::SubjectCapture(capture_index));
@@ -302,14 +311,15 @@ class CompiledReplacement {
// Replacement is simple. Do not use Apply to do the replacement.
return true;
} else {
- parts->push_back(ReplacementPart::ReplacementSubString(last, length));
+ parts_.emplace_back(
+ ReplacementPart::ReplacementSubString(last, length));
}
}
return false;
}
- ZoneChunkList<ReplacementPart> parts_;
- ZoneVector<Handle<String>> replacement_substrings_;
+ base::SmallVector<ReplacementPart, kStaticVectorSlots> parts_;
+ base::SmallVector<Handle<String>, kStaticVectorSlots> replacement_substrings_;
};
bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -331,14 +341,13 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
bool simple;
if (content.IsOneByte()) {
- simple = ParseReplacementPattern(&parts_, content.ToOneByteVector(),
- capture_name_map, capture_count,
- subject_length);
+ simple =
+ ParseReplacementPattern(content.ToOneByteVector(), capture_name_map,
+ capture_count, subject_length);
} else {
DCHECK(content.IsTwoByte());
- simple = ParseReplacementPattern(&parts_, content.ToUC16Vector(),
- capture_name_map, capture_count,
- subject_length);
+ simple = ParseReplacementPattern(content.ToUC16Vector(), capture_name_map,
+ capture_count, subject_length);
}
if (simple) return true;
}
@@ -350,13 +359,13 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
if (tag <= 0) { // A replacement string slice.
int from = -tag;
int to = part.data;
- replacement_substrings_.push_back(
+ replacement_substrings_.emplace_back(
isolate->factory()->NewSubString(replacement, from, to));
part.tag = REPLACEMENT_SUBSTRING;
part.data = substring_index;
substring_index++;
} else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.push_back(replacement);
+ replacement_substrings_.emplace_back(replacement);
part.data = substring_index;
substring_index++;
}
@@ -511,12 +520,15 @@ std::vector<int>* GetRewoundRegexpIndicesList(Isolate* isolate) {
void TruncateRegexpIndicesList(Isolate* isolate) {
// Same size as smallest zone segment, preserving behavior from the
// runtime zone.
- static const int kMaxRegexpIndicesListCapacity = 8 * KB;
- std::vector<int>* indicies = isolate->regexp_indices();
- if (indicies->capacity() > kMaxRegexpIndicesListCapacity) {
+ // TODO(jgruber): Consider removing the reusable regexp_indices list and
+ // simply allocating a new list each time. It feels like we're needlessly
+ // optimizing an edge case.
+ static const int kMaxRegexpIndicesListCapacity = 8 * KB / kIntSize;
+ std::vector<int>* indices = isolate->regexp_indices();
+ if (indices->capacity() > kMaxRegexpIndicesListCapacity) {
// Throw away backing storage.
- indicies->clear();
- indicies->shrink_to_fit();
+ indices->clear();
+ indices->shrink_to_fit();
}
}
} // namespace
@@ -616,9 +628,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
return ReadOnlyRoots(isolate).exception();
}
- // CompiledReplacement uses zone allocation.
- Zone zone(isolate->allocator(), ZONE_NAME);
- CompiledReplacement compiled_replacement(&zone);
+ CompiledReplacement compiled_replacement;
const bool simple_replace = compiled_replacement.Compile(
isolate, regexp, replacement, capture_count, subject_length);
@@ -1048,8 +1058,7 @@ class VectorBackedMatch : public String::Match {
public:
VectorBackedMatch(Isolate* isolate, Handle<String> subject,
Handle<String> match, int match_position,
- ZoneVector<Handle<Object>>* captures,
- Handle<Object> groups_obj)
+ Vector<Handle<Object>> captures, Handle<Object> groups_obj)
: isolate_(isolate),
match_(match),
match_position_(match_position),
@@ -1075,10 +1084,10 @@ class VectorBackedMatch : public String::Match {
bool HasNamedCaptures() override { return has_named_captures_; }
- int CaptureCount() override { return static_cast<int>(captures_->size()); }
+ int CaptureCount() override { return captures_.length(); }
MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
- Handle<Object> capture_obj = captures_->at(i);
+ Handle<Object> capture_obj = captures_[i];
if (capture_obj->IsUndefined(isolate_)) {
*capture_exists = false;
return isolate_->factory()->empty_string();
@@ -1109,7 +1118,7 @@ class VectorBackedMatch : public String::Match {
Handle<String> subject_;
Handle<String> match_;
const int match_position_;
- ZoneVector<Handle<Object>>* captures_;
+ Vector<Handle<Object>> captures_;
bool has_named_captures_;
Handle<JSReceiver> groups_obj_;
@@ -1827,8 +1836,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
RegExpUtils::SetLastIndex(isolate, recv, 0));
}
- Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneVector<Handle<Object>> results(&zone);
+ base::SmallVector<Handle<Object>, kStaticVectorSlots> results;
while (true) {
Handle<Object> result;
@@ -1838,7 +1846,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
if (result->IsNull(isolate)) break;
- results.push_back(result);
+ results.emplace_back(result);
if (!global) break;
Handle<Object> match_obj;
@@ -1893,7 +1901,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
std::min(PositiveNumberToUint32(*position_obj), length);
// Do not reserve capacity since captures_length is user-controlled.
- ZoneVector<Handle<Object>> captures(&zone);
+ base::SmallVector<Handle<Object>, kStaticVectorSlots> captures;
for (uint32_t n = 0; n < captures_length; n++) {
Handle<Object> capture;
@@ -1904,7 +1912,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, capture,
Object::ToString(isolate, capture));
}
- captures.push_back(capture);
+ captures.emplace_back(capture);
}
Handle<Object> groups_obj = isolate->factory()->undefined_value();
@@ -1950,7 +1958,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, groups_obj, Object::ToObject(isolate, groups_obj));
}
- VectorBackedMatch m(isolate, string, match, position, &captures,
+ VectorBackedMatch m(isolate, string, match, position, VectorOf(captures),
groups_obj);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, replacement, String::GetSubstitution(isolate, &m, replace));
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
new file mode 100644
index 00000000000..9c07441de55
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -0,0 +1,488 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/memory.h"
+#include "src/base/platform/mutex.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/utils/ostreams.h"
+#include "src/wasm/memory-tracing.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-serialization.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+struct WasmCompileControls {
+ uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
+ bool AllowAnySizeForAsync = true;
+};
+using WasmCompileControlsMap = std::map<v8::Isolate*, WasmCompileControls>;
+
+// We need per-isolate controls, because we sometimes run tests in multiple
+// isolates concurrently. Methods need to hold the accompanying mutex on access.
+// To avoid upsetting the static initializer count, we lazy initialize this.
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(WasmCompileControlsMap,
+ GetPerIsolateWasmControls)
+base::LazyMutex g_PerIsolateWasmControlsMutex = LAZY_MUTEX_INITIALIZER;
+
+bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
+ bool is_async) {
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
+ const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
+ return (is_async && ctrls.AllowAnySizeForAsync) ||
+ (value->IsArrayBuffer() && value.As<v8::ArrayBuffer>()->ByteLength() <=
+ ctrls.MaxWasmBufferSize) ||
+ (value->IsArrayBufferView() &&
+ value.As<v8::ArrayBufferView>()->ByteLength() <=
+ ctrls.MaxWasmBufferSize);
+}
+
+// Use the compile controls for instantiation, too
+bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
+ v8::Local<v8::Value> module_or_bytes,
+ bool is_async) {
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
+ const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
+ if (is_async && ctrls.AllowAnySizeForAsync) return true;
+ if (!module_or_bytes->IsWasmModuleObject()) {
+ return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
+ }
+ v8::Local<v8::WasmModuleObject> module =
+ v8::Local<v8::WasmModuleObject>::Cast(module_or_bytes);
+ return static_cast<uint32_t>(
+ module->GetCompiledModule().GetWireBytesRef().size()) <=
+ ctrls.MaxWasmBufferSize;
+}
+
+v8::Local<v8::Value> NewRangeException(v8::Isolate* isolate,
+ const char* message) {
+ return v8::Exception::RangeError(
+ v8::String::NewFromOneByte(isolate,
+ reinterpret_cast<const uint8_t*>(message))
+ .ToLocalChecked());
+}
+
+void ThrowRangeException(v8::Isolate* isolate, const char* message) {
+ isolate->ThrowException(NewRangeException(isolate, message));
+}
+
+bool WasmModuleOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (IsWasmCompileAllowed(args.GetIsolate(), args[0], false)) return false;
+ ThrowRangeException(args.GetIsolate(), "Sync compile not allowed");
+ return true;
+}
+
+bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (IsWasmInstantiateAllowed(args.GetIsolate(), args[0], false)) return false;
+ ThrowRangeException(args.GetIsolate(), "Sync instantiate not allowed");
+ return true;
+}
+
+} // namespace
+
+// Returns a callable object. The object returns the difference of its two
+// parameters when it is called.
+RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
+ HandleScope scope(isolate);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ CHECK_EQ(args.length(), 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ WasmCompileControls& ctrl = (*GetPerIsolateWasmControls())[v8_isolate];
+ ctrl.AllowAnySizeForAsync = allow_async;
+ ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
+ v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
+ HandleScope scope(isolate);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ CHECK_EQ(args.length(), 0);
+ v8_isolate->SetWasmInstanceCallback(WasmInstanceOverride);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+namespace {
+
+void PrintIndentation(int stack_size) {
+ const int max_display = 80;
+ if (stack_size <= max_display) {
+ PrintF("%4d:%*s", stack_size, stack_size, "");
+ } else {
+ PrintF("%4d:%*s", stack_size, max_display, "...");
+ }
+}
+
+int WasmStackSize(Isolate* isolate) {
+ // TODO(wasm): Fix this for mixed JS/Wasm stacks with both --trace and
+ // --trace-wasm.
+ int n = 0;
+ for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (it.is_wasm()) n++;
+ }
+ return n;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ PrintIndentation(WasmStackSize(isolate));
+
+ // Find the caller wasm frame.
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+
+ // Find the function name.
+ int func_index = frame->function_index();
+ const wasm::WasmModule* module = frame->wasm_instance().module();
+ wasm::ModuleWireBytes wire_bytes =
+ wasm::ModuleWireBytes(frame->native_module()->wire_bytes());
+ wasm::WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(
+ wire_bytes, func_index, VectorOf(module->export_table));
+ wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
+
+ wasm::WasmCode* code = frame->wasm_code();
+ PrintF(code->is_liftoff() ? "~" : "*");
+
+ if (name.empty()) {
+ PrintF("wasm-function[%d] {\n", func_index);
+ } else {
+ PrintF("wasm-function[%d] \"%.*s\" {\n", func_index, name.length(),
+ name.begin());
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Smi, value_addr_smi, 0);
+
+ PrintIndentation(WasmStackSize(isolate));
+ PrintF("}");
+
+ // Find the caller wasm frame.
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+ int func_index = frame->function_index();
+ const wasm::FunctionSig* sig =
+ frame->wasm_instance().module()->functions[func_index].sig;
+
+ size_t num_returns = sig->return_count();
+ if (num_returns == 1) {
+ wasm::ValueType return_type = sig->GetReturn(0);
+ switch (return_type.kind()) {
+ case wasm::kI32: {
+ int32_t value = base::ReadUnalignedValue<int32_t>(value_addr_smi.ptr());
+ PrintF(" -> %d\n", value);
+ break;
+ }
+ case wasm::kI64: {
+ int64_t value = base::ReadUnalignedValue<int64_t>(value_addr_smi.ptr());
+ PrintF(" -> %" PRId64 "\n", value);
+ break;
+ }
+ case wasm::kF32: {
+ float_t value = base::ReadUnalignedValue<float_t>(value_addr_smi.ptr());
+ PrintF(" -> %f\n", value);
+ break;
+ }
+ case wasm::kF64: {
+ double_t value =
+ base::ReadUnalignedValue<double_t>(value_addr_smi.ptr());
+ PrintF(" -> %f\n", value);
+ break;
+ }
+ default:
+ PrintF(" -> Unsupported type\n");
+ break;
+ }
+ } else {
+ // TODO(wasm) Handle multiple return values.
+ PrintF("\n");
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ if (!function.shared().HasAsmWasmData()) {
+ return ReadOnlyRoots(isolate).false_value();
+ }
+ if (function.shared().HasBuiltinId() &&
+ function.shared().builtin_id() == Builtins::kInstantiateAsmJs) {
+ // Hasn't been compiled yet.
+ return ReadOnlyRoots(isolate).false_value();
+ }
+ return ReadOnlyRoots(isolate).true_value();
+}
+
+namespace {
+
+bool DisallowWasmCodegenFromStringsCallback(v8::Local<v8::Context> context,
+ v8::Local<v8::String> source) {
+ return false;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8_isolate->SetAllowWasmCodeGenerationCallback(
+ flag ? DisallowWasmCodegenFromStringsCallback : nullptr);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IsWasmCode) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ bool is_js_to_wasm =
+ function.code().kind() == CodeKind::JS_TO_WASM_FUNCTION ||
+ (function.code().is_builtin() &&
+ function.code().builtin_index() == Builtins::kGenericJSToWasmWrapper);
+ return isolate->heap()->ToBoolean(is_js_to_wasm);
+}
+
+RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
+ DisallowGarbageCollection no_gc;
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(trap_handler::IsTrapHandlerEnabled());
+}
+
+RUNTIME_FUNCTION(Runtime_IsThreadInWasm) {
+ DisallowGarbageCollection no_gc;
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(trap_handler::IsThreadInWasm());
+}
+
+RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ size_t trap_count = trap_handler::GetRecoveredTrapCount();
+ return *isolate->factory()->NewNumberFromSize(trap_count);
+}
+
+RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
+ Handle<Object> tag =
+ WasmExceptionPackage::GetExceptionTag(isolate, exception);
+ CHECK(tag->IsWasmExceptionTag());
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
+ }
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
+ Handle<Object> values_obj =
+ WasmExceptionPackage::GetExceptionValues(isolate, exception);
+ CHECK(values_obj->IsFixedArray()); // Only called with correct input.
+ Handle<FixedArray> values = Handle<FixedArray>::cast(values_obj);
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+// Wait until the given module is fully tiered up, then serialize it into an
+// array buffer.
+RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
+
+ wasm::NativeModule* native_module = module_obj->native_module();
+ native_module->compilation_state()->WaitForTopTierFinished();
+ DCHECK(!native_module->compilation_state()->failed());
+
+ wasm::WasmSerializer wasm_serializer(native_module);
+ size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize();
+
+ Handle<JSArrayBuffer> array_buffer =
+ isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(byte_length,
+ InitializedFlag::kUninitialized)
+ .ToHandleChecked();
+
+ CHECK(wasm_serializer.SerializeNativeModule(
+ {static_cast<uint8_t*>(array_buffer->backing_store()), byte_length}));
+ return *array_buffer;
+}
+
+// Take an array buffer and attempt to reconstruct a compiled wasm module.
+// Return undefined if unsuccessful.
+RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, wire_bytes, 1);
+ CHECK(!buffer->was_detached());
+ CHECK(!wire_bytes->WasDetached());
+
+ Handle<JSArrayBuffer> wire_bytes_buffer = wire_bytes->GetBuffer();
+ Vector<const uint8_t> wire_bytes_vec{
+ reinterpret_cast<const uint8_t*>(wire_bytes_buffer->backing_store()) +
+ wire_bytes->byte_offset(),
+ wire_bytes->byte_length()};
+ Vector<uint8_t> buffer_vec{
+ reinterpret_cast<uint8_t*>(buffer->backing_store()),
+ buffer->byte_length()};
+
+ // Note that {wasm::DeserializeNativeModule} will allocate. We assume the
+ // JSArrayBuffer backing store doesn't get relocated.
+ MaybeHandle<WasmModuleObject> maybe_module_object =
+ wasm::DeserializeNativeModule(isolate, buffer_vec, wire_bytes_vec, {});
+ Handle<WasmModuleObject> module_object;
+ if (!maybe_module_object.ToHandle(&module_object)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ return *module_object;
+}
+
+RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
+ int instance_count = 0;
+ WeakArrayList weak_instance_list =
+ module_obj->script().wasm_weak_instance_list();
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsWeak()) instance_count++;
+ }
+ return Smi::FromInt(instance_count);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, argument, 0);
+ Handle<WasmModuleObject> module;
+ if (argument->IsWasmInstanceObject()) {
+ module = handle(Handle<WasmInstanceObject>::cast(argument)->module_object(),
+ isolate);
+ } else if (argument->IsWasmModuleObject()) {
+ module = Handle<WasmModuleObject>::cast(argument);
+ }
+ size_t num_spaces =
+ module->native_module()->GetNumberOfCodeSpacesForTesting();
+ return *isolate->factory()->NewNumberFromSize(num_spaces);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Smi, info_addr, 0);
+
+ wasm::MemoryTracingInfo* info =
+ reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr.ptr());
+
+ // Find the caller wasm frame.
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+
+ uint8_t* mem_start = reinterpret_cast<uint8_t*>(
+ frame->wasm_instance().memory_object().array_buffer().backing_store());
+ int func_index = frame->function_index();
+ int pos = frame->position();
+ // TODO(titzer): eliminate dependency on WasmModule definition here.
+ int func_start =
+ frame->wasm_instance().module()->functions[func_index].code.offset();
+ wasm::ExecutionTier tier = frame->wasm_code()->is_liftoff()
+ ? wasm::ExecutionTier::kLiftoff
+ : wasm::ExecutionTier::kTurbofan;
+ wasm::TraceMemoryOperation(tier, info, func_index, pos - func_start,
+ mem_start);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_SMI_ARG_CHECKED(function_index, 1);
+ auto* native_module = instance->module_object().native_module();
+ isolate->wasm_engine()->CompileFunction(
+ isolate, native_module, function_index, wasm::ExecutionTier::kTurbofan);
+ CHECK(!native_module->compilation_state()->failed());
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTierDown) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTierUp) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ wasm::NativeModule* native_module =
+ exp_fun->instance().module_object().native_module();
+ uint32_t func_index = exp_fun->function_index();
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCode* code = native_module->GetCode(func_index);
+ return isolate->heap()->ToBoolean(code && code->is_liftoff());
+}
+
+RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
+ DCHECK_EQ(1, args.length());
+ DisallowGarbageCollection no_gc;
+ CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
+
+ instance.module_object().native_module()->set_lazy_compile_frozen(true);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 802c7f29979..0dd7368e39e 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <memory>
-#include <sstream>
-
#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
#include "src/codegen/assembler-inl.h"
@@ -30,88 +27,15 @@
#include "src/regexp/regexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
-#include "src/trap-handler/trap-handler.h"
-#include "src/utils/ostreams.h"
-#include "src/wasm/memory-tracing.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-code-manager.h"
+
+#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-serialization.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
namespace {
-struct WasmCompileControls {
- uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
- bool AllowAnySizeForAsync = true;
-};
-using WasmCompileControlsMap = std::map<v8::Isolate*, WasmCompileControls>;
-
-// We need per-isolate controls, because we sometimes run tests in multiple
-// isolates concurrently. Methods need to hold the accompanying mutex on access.
-// To avoid upsetting the static initializer count, we lazy initialize this.
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(WasmCompileControlsMap,
- GetPerIsolateWasmControls)
-base::LazyMutex g_PerIsolateWasmControlsMutex = LAZY_MUTEX_INITIALIZER;
-
-bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
- bool is_async) {
- base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
- DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
- const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
- return (is_async && ctrls.AllowAnySizeForAsync) ||
- (value->IsArrayBuffer() && value.As<v8::ArrayBuffer>()->ByteLength() <=
- ctrls.MaxWasmBufferSize) ||
- (value->IsArrayBufferView() &&
- value.As<v8::ArrayBufferView>()->ByteLength() <=
- ctrls.MaxWasmBufferSize);
-}
-
-// Use the compile controls for instantiation, too
-bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
- v8::Local<v8::Value> module_or_bytes,
- bool is_async) {
- base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
- DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
- const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
- if (is_async && ctrls.AllowAnySizeForAsync) return true;
- if (!module_or_bytes->IsWasmModuleObject()) {
- return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
- }
- v8::Local<v8::WasmModuleObject> module =
- v8::Local<v8::WasmModuleObject>::Cast(module_or_bytes);
- return static_cast<uint32_t>(
- module->GetCompiledModule().GetWireBytesRef().size()) <=
- ctrls.MaxWasmBufferSize;
-}
-
-v8::Local<v8::Value> NewRangeException(v8::Isolate* isolate,
- const char* message) {
- return v8::Exception::RangeError(
- v8::String::NewFromOneByte(isolate,
- reinterpret_cast<const uint8_t*>(message))
- .ToLocalChecked());
-}
-
-void ThrowRangeException(v8::Isolate* isolate, const char* message) {
- isolate->ThrowException(NewRangeException(isolate, message));
-}
-
-bool WasmModuleOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (IsWasmCompileAllowed(args.GetIsolate(), args[0], false)) return false;
- ThrowRangeException(args.GetIsolate(), "Sync compile not allowed");
- return true;
-}
-
-bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (IsWasmInstantiateAllowed(args.GetIsolate(), args[0], false)) return false;
- ThrowRangeException(args.GetIsolate(), "Sync instantiate not allowed");
- return true;
-}
-
V8_WARN_UNUSED_RESULT Object CrashUnlessFuzzing(Isolate* isolate) {
CHECK(FLAG_fuzzing);
return ReadOnlyRoots(isolate).undefined_value();
@@ -307,7 +231,9 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
return CrashUnlessFuzzing(isolate);
}
+#if V8_ENABLE_WEBASSEMBLY
if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
@@ -387,8 +313,12 @@ bool EnsureFeedbackVector(Isolate* isolate, Handle<JSFunction> function) {
RUNTIME_FUNCTION(Runtime_CompileBaseline) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (args.length() != 1) {
+ return CrashUnlessFuzzing(isolate);
+ }
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
IsCompiledScope is_compiled_scope =
function->shared(isolate).is_compiled_scope(isolate);
@@ -461,7 +391,9 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return CrashUnlessFuzzing(isolate);
}
+#if V8_ENABLE_WEBASSEMBLY
if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
@@ -708,28 +640,6 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
- HandleScope scope(isolate);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- CHECK_EQ(args.length(), 2);
- CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
- base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
- WasmCompileControls& ctrl = (*GetPerIsolateWasmControls())[v8_isolate];
- ctrl.AllowAnySizeForAsync = allow_async;
- ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
- v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
- HandleScope scope(isolate);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- CHECK_EQ(args.length(), 0);
- v8_isolate->SetWasmInstanceCallback(WasmInstanceOverride);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -1045,109 +955,6 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
return obj; // return TOS
}
-namespace {
-
-int WasmStackSize(Isolate* isolate) {
- // TODO(wasm): Fix this for mixed JS/Wasm stacks with both --trace and
- // --trace-wasm.
- int n = 0;
- for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
- if (it.is_wasm()) n++;
- }
- return n;
-}
-
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
- HandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- PrintIndentation(WasmStackSize(isolate));
-
- // Find the caller wasm frame.
- wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
- DCHECK(!it.done());
- DCHECK(it.is_wasm());
- WasmFrame* frame = WasmFrame::cast(it.frame());
-
- // Find the function name.
- int func_index = frame->function_index();
- const wasm::WasmModule* module = frame->wasm_instance().module();
- wasm::ModuleWireBytes wire_bytes =
- wasm::ModuleWireBytes(frame->native_module()->wire_bytes());
- wasm::WireBytesRef name_ref =
- module->lazily_generated_names.LookupFunctionName(
- wire_bytes, func_index, VectorOf(module->export_table));
- wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
-
- wasm::WasmCode* code = frame->wasm_code();
- PrintF(code->is_liftoff() ? "~" : "*");
-
- if (name.empty()) {
- PrintF("wasm-function[%d] {\n", func_index);
- } else {
- PrintF("wasm-function[%d] \"%.*s\" {\n", func_index, name.length(),
- name.begin());
- }
-
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Smi, value_addr_smi, 0);
-
- PrintIndentation(WasmStackSize(isolate));
- PrintF("}");
-
- // Find the caller wasm frame.
- wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
- DCHECK(!it.done());
- DCHECK(it.is_wasm());
- WasmFrame* frame = WasmFrame::cast(it.frame());
- int func_index = frame->function_index();
- const wasm::FunctionSig* sig =
- frame->wasm_instance().module()->functions[func_index].sig;
-
- size_t num_returns = sig->return_count();
- if (num_returns == 1) {
- wasm::ValueType return_type = sig->GetReturn(0);
- switch (return_type.kind()) {
- case wasm::kI32: {
- int32_t value = ReadUnalignedValue<int32_t>(value_addr_smi.ptr());
- PrintF(" -> %d\n", value);
- break;
- }
- case wasm::kI64: {
- int64_t value = ReadUnalignedValue<int64_t>(value_addr_smi.ptr());
- PrintF(" -> %" PRId64 "\n", value);
- break;
- }
- case wasm::kF32: {
- float_t value = ReadUnalignedValue<float_t>(value_addr_smi.ptr());
- PrintF(" -> %f\n", value);
- break;
- }
- case wasm::kF64: {
- double_t value = ReadUnalignedValue<double_t>(value_addr_smi.ptr());
- PrintF(" -> %f\n", value);
- break;
- }
- default:
- PrintF(" -> Unsupported type\n");
- break;
- }
- } else {
- // TODO(wasm) Handle multiple return values.
- PrintF("\n");
- }
-
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -1183,21 +990,6 @@ RUNTIME_FUNCTION(Runtime_InYoungGeneration) {
return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj));
}
-RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- if (!function.shared().HasAsmWasmData()) {
- return ReadOnlyRoots(isolate).false_value();
- }
- if (function.shared().HasBuiltinId() &&
- function.shared().builtin_id() == Builtins::kInstantiateAsmJs) {
- // Hasn't been compiled yet.
- return ReadOnlyRoots(isolate).false_value();
- }
- return ReadOnlyRoots(isolate).true_value();
-}
-
namespace {
v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
@@ -1206,11 +998,6 @@ v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
return {false, {}};
}
-bool DisallowWasmCodegenFromStringsCallback(v8::Local<v8::Context> context,
- v8::Local<v8::String> source) {
- return false;
-}
-
} // namespace
RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
@@ -1223,72 +1010,6 @@ RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8_isolate->SetAllowWasmCodeGenerationCallback(
- flag ? DisallowWasmCodegenFromStringsCallback : nullptr);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_IsWasmCode) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- bool is_js_to_wasm =
- function.code().kind() == CodeKind::JS_TO_WASM_FUNCTION ||
- (function.code().is_builtin() &&
- function.code().builtin_index() == Builtins::kGenericJSToWasmWrapper);
- return isolate->heap()->ToBoolean(is_js_to_wasm);
-}
-
-RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
- DisallowGarbageCollection no_gc;
- DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(trap_handler::IsTrapHandlerEnabled());
-}
-
-RUNTIME_FUNCTION(Runtime_IsThreadInWasm) {
- DisallowGarbageCollection no_gc;
- DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(trap_handler::IsThreadInWasm());
-}
-
-RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- size_t trap_count = trap_handler::GetRecoveredTrapCount();
- return *isolate->factory()->NewNumberFromSize(trap_count);
-}
-
-RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
- Handle<Object> tag =
- WasmExceptionPackage::GetExceptionTag(isolate, exception);
- CHECK(tag->IsWasmExceptionTag());
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
- }
- UNREACHABLE();
-}
-
-RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
- Handle<Object> values_obj =
- WasmExceptionPackage::GetExceptionValues(isolate, exception);
- CHECK(values_obj->IsFixedArray()); // Only called with correct input.
- Handle<FixedArray> values = Handle<FixedArray>::cast(values_obj);
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -1455,61 +1176,6 @@ RUNTIME_FUNCTION(Runtime_SerializeDeserializeNow) {
return ReadOnlyRoots(isolate).undefined_value();
}
-// Wait until the given module is fully tiered up, then serialize it into an
-// array buffer.
-RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
-
- wasm::NativeModule* native_module = module_obj->native_module();
- native_module->compilation_state()->WaitForTopTierFinished();
- DCHECK(!native_module->compilation_state()->failed());
-
- wasm::WasmSerializer wasm_serializer(native_module);
- size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize();
-
- Handle<JSArrayBuffer> array_buffer =
- isolate->factory()
- ->NewJSArrayBufferAndBackingStore(byte_length,
- InitializedFlag::kUninitialized)
- .ToHandleChecked();
-
- CHECK(wasm_serializer.SerializeNativeModule(
- {static_cast<uint8_t*>(array_buffer->backing_store()), byte_length}));
- return *array_buffer;
-}
-
-// Take an array buffer and attempt to reconstruct a compiled wasm module.
-// Return undefined if unsuccessful.
-RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, wire_bytes, 1);
- CHECK(!buffer->was_detached());
- CHECK(!wire_bytes->WasDetached());
-
- Handle<JSArrayBuffer> wire_bytes_buffer = wire_bytes->GetBuffer();
- Vector<const uint8_t> wire_bytes_vec{
- reinterpret_cast<const uint8_t*>(wire_bytes_buffer->backing_store()) +
- wire_bytes->byte_offset(),
- wire_bytes->byte_length()};
- Vector<uint8_t> buffer_vec{
- reinterpret_cast<uint8_t*>(buffer->backing_store()),
- buffer->byte_length()};
-
- // Note that {wasm::DeserializeNativeModule} will allocate. We assume the
- // JSArrayBuffer backing store doesn't get relocated.
- MaybeHandle<WasmModuleObject> maybe_module_object =
- wasm::DeserializeNativeModule(isolate, buffer_vec, wire_bytes_vec, {});
- Handle<WasmModuleObject> module_object;
- if (!maybe_module_object.ToHandle(&module_object)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
- return *module_object;
-}
-
RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1539,106 +1205,6 @@ RUNTIME_FUNCTION(Runtime_TypedArrayMaxLength) {
return *isolate->factory()->NewNumber(JSTypedArray::kMaxLength);
}
-RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
- int instance_count = 0;
- WeakArrayList weak_instance_list =
- module_obj->script().wasm_weak_instance_list();
- for (int i = 0; i < weak_instance_list.length(); ++i) {
- if (weak_instance_list.Get(i)->IsWeak()) instance_count++;
- }
- return Smi::FromInt(instance_count);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, argument, 0);
- Handle<WasmModuleObject> module;
- if (argument->IsWasmInstanceObject()) {
- module = handle(Handle<WasmInstanceObject>::cast(argument)->module_object(),
- isolate);
- } else if (argument->IsWasmModuleObject()) {
- module = Handle<WasmModuleObject>::cast(argument);
- }
- size_t num_spaces =
- module->native_module()->GetNumberOfCodeSpacesForTesting();
- return *isolate->factory()->NewNumberFromSize(num_spaces);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Smi, info_addr, 0);
-
- wasm::MemoryTracingInfo* info =
- reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr.ptr());
-
- // Find the caller wasm frame.
- wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
- DCHECK(!it.done());
- DCHECK(it.is_wasm());
- WasmFrame* frame = WasmFrame::cast(it.frame());
-
- uint8_t* mem_start = reinterpret_cast<uint8_t*>(
- frame->wasm_instance().memory_object().array_buffer().backing_store());
- int func_index = frame->function_index();
- int pos = frame->position();
- // TODO(titzer): eliminate dependency on WasmModule definition here.
- int func_start =
- frame->wasm_instance().module()->functions[func_index].code.offset();
- wasm::ExecutionTier tier = frame->wasm_code()->is_liftoff()
- ? wasm::ExecutionTier::kLiftoff
- : wasm::ExecutionTier::kTurbofan;
- wasm::TraceMemoryOperation(tier, info, func_index, pos - func_start,
- mem_start);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_SMI_ARG_CHECKED(function_index, 1);
- auto* native_module = instance->module_object().native_module();
- isolate->wasm_engine()->CompileFunction(
- isolate, native_module, function_index, wasm::ExecutionTier::kTurbofan);
- CHECK(!native_module->compilation_state()->failed());
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_WasmTierDown) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_WasmTierUp) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
- Handle<WasmExportedFunction> exp_fun =
- Handle<WasmExportedFunction>::cast(function);
- wasm::NativeModule* native_module =
- exp_fun->instance().module_object().native_module();
- uint32_t func_index = exp_fun->function_index();
- wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCode* code = native_module->GetCode(func_index);
- return isolate->heap()->ToBoolean(code && code->is_liftoff());
-}
-
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1649,15 +1215,6 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
- DCHECK_EQ(1, args.length());
- DisallowGarbageCollection no_gc;
- CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
-
- instance.module_object().native_module()->set_lazy_compile_frozen(true);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_TurbofanStaticAssert) {
SealHandleScope shs(isolate);
// Always lowered to StaticAssert node in Turbofan, so we never get here in
@@ -1687,9 +1244,11 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line, int column) final {
}
+#if V8_ENABLE_WEBASSEMBLY
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) final {}
+#endif // V8_ENABLE_WEBASSEMBLY
void CallbackEvent(Handle<Name> name, Address entry_point) final {}
void GetterCallbackEvent(Handle<Name> name, Address entry_point) final {}
@@ -1706,12 +1265,14 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> shared,
const char* reason) final {}
- void BytecodeFlushEvent(Address compiled_data_start) final {}
+ void WeakCodeClearEvent() final {}
bool is_listening_to_code_events() final { return true; }
};
static base::LeakyObject<NoopListener> noop_listener;
+#if V8_ENABLE_WEBASSEMBLY
isolate->wasm_engine()->EnableCodeLogging(isolate);
+#endif // V8_ENABLE_WEBASSEMBLY
isolate->code_event_dispatcher()->AddListener(noop_listener.get());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-trace.cc b/deps/v8/src/runtime/runtime-trace.cc
index 8cd141d33e4..3b9a039670f 100644
--- a/deps/v8/src/runtime/runtime-trace.cc
+++ b/deps/v8/src/runtime/runtime-trace.cc
@@ -41,8 +41,8 @@ void AdvanceToOffsetForTracing(
}
void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
- interpreter::BytecodeArrayAccessor&
- bytecode_accessor, // NOLINT(runtime/references)
+ interpreter::BytecodeArrayIterator&
+ bytecode_iterator, // NOLINT(runtime/references)
Handle<Object> accumulator) {
static const char kAccumulator[] = "accumulator";
static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
@@ -54,7 +54,7 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
os << (is_input ? kInputColourCode : kOutputColourCode);
}
- interpreter::Bytecode bytecode = bytecode_accessor.current_bytecode();
+ interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
// Print accumulator.
if ((is_input && interpreter::Bytecodes::ReadsAccumulator(bytecode)) ||
@@ -75,14 +75,14 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
: interpreter::Bytecodes::IsRegisterOutputOperandType(operand_type);
if (should_print) {
interpreter::Register first_reg =
- bytecode_accessor.GetRegisterOperand(operand_index);
- int range = bytecode_accessor.GetRegisterOperandRange(operand_index);
+ bytecode_iterator.GetRegisterOperand(operand_index);
+ int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
for (int reg_index = first_reg.index();
reg_index < first_reg.index() + range; reg_index++) {
Object reg_object = frame->ReadInterpreterRegister(reg_index);
os << " [ " << std::setw(kRegFieldWidth)
<< interpreter::Register(reg_index).ToString(
- bytecode_accessor.bytecode_array()->parameter_count())
+ bytecode_iterator.bytecode_array()->parameter_count())
<< kArrowDirection;
reg_object.ShortPrint(os);
os << " ]" << std::endl;
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 9ba26c23ad0..40af2938351 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -30,15 +30,17 @@ namespace internal {
namespace {
-template <typename FrameType, StackFrame::Type... skipped_frame_types>
+template <typename FrameType>
class FrameFinder {
- static_assert(sizeof...(skipped_frame_types) > 0,
- "Specify at least one frame to skip");
-
public:
- explicit FrameFinder(Isolate* isolate)
+ explicit FrameFinder(Isolate* isolate,
+ std::initializer_list<StackFrame::Type>
+ skipped_frame_types = {StackFrame::EXIT})
: frame_iterator_(isolate, isolate->thread_local_top()) {
- for (auto type : {skipped_frame_types...}) {
+ // We skip at least one frame.
+ DCHECK_LT(0, skipped_frame_types.size());
+
+ for (auto type : skipped_frame_types) {
DCHECK_EQ(type, frame_iterator_.frame()->type());
USE(type);
frame_iterator_.Advance();
@@ -54,9 +56,7 @@ class FrameFinder {
};
WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
- return FrameFinder<WasmFrame, StackFrame::EXIT>(isolate)
- .frame()
- ->wasm_instance();
+ return FrameFinder<WasmFrame>(isolate).frame()->wasm_instance();
}
Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
@@ -127,6 +127,7 @@ RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
isolate, handle(instance->memory_object(), isolate), delta_pages);
// The WasmMemoryGrow builtin which calls this runtime function expects us to
// always return a Smi.
+ DCHECK(!isolate->has_pending_exception());
return Smi::FromInt(ret);
}
@@ -211,7 +212,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
CONVERT_SMI_ARG_CHECKED(func_index, 1);
#ifdef DEBUG
- FrameFinder<WasmCompileLazyFrame, StackFrame::EXIT> frame_finder(isolate);
+ FrameFinder<WasmCompileLazyFrame> frame_finder(isolate);
DCHECK_EQ(*instance, frame_finder.frame()->wasm_instance());
#endif
@@ -300,7 +301,7 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- FrameFinder<WasmFrame, StackFrame::EXIT> frame_finder(isolate);
+ FrameFinder<WasmFrame> frame_finder(isolate);
int func_index = frame_finder.frame()->function_index();
auto* native_module = instance->module_object().native_module();
@@ -498,14 +499,13 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_ARG_CHECKED(Object, value_raw, 1);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 1);
+ CONVERT_ARG_CHECKED(Object, value_raw, 2);
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> value(value_raw, isolate);
- CONVERT_UINT32_ARG_CHECKED(delta, 2);
+ CONVERT_UINT32_ARG_CHECKED(delta, 3);
Handle<WasmTableObject> table(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
@@ -517,15 +517,14 @@ RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
RUNTIME_FUNCTION(Runtime_WasmTableFill) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(start, 1);
- CONVERT_ARG_CHECKED(Object, value_raw, 2);
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(start, 2);
+ CONVERT_ARG_CHECKED(Object, value_raw, 3);
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> value(value_raw, isolate);
- CONVERT_UINT32_ARG_CHECKED(count, 3);
+ CONVERT_UINT32_ARG_CHECKED(count, 4);
Handle<WasmTableObject> table(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
@@ -551,14 +550,12 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- FrameFinder<WasmFrame, StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK>
- frame_finder(isolate);
- auto instance = handle(frame_finder.frame()->wasm_instance(), isolate);
- auto script = handle(instance->module_object().script(), isolate);
+ FrameFinder<WasmFrame> frame_finder(
+ isolate, {StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK});
WasmFrame* frame = frame_finder.frame();
- int position = frame->position();
- auto frame_id = frame->id();
- auto* debug_info = frame->native_module()->GetDebugInfo();
+ auto instance = handle(frame->wasm_instance(), isolate);
+ auto script = handle(instance->module_object().script(), isolate);
+ auto* debug_info = instance->module_object().native_module()->GetDebugInfo();
isolate->set_context(instance->native_context());
// Stepping can repeatedly create code, and code GC requires stack guards to
@@ -570,11 +567,12 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
DebugScope debug_scope(isolate->debug());
// Check for instrumentation breakpoint.
- DCHECK_EQ(script->break_on_entry(), instance->break_on_entry());
+ DCHECK_EQ(script->break_on_entry(), !!instance->break_on_entry());
if (script->break_on_entry()) {
MaybeHandle<FixedArray> maybe_on_entry_breakpoints =
- WasmScript::CheckBreakPoints(
- isolate, script, WasmScript::kOnEntryBreakpointPosition, frame_id);
+ WasmScript::CheckBreakPoints(isolate, script,
+ WasmScript::kOnEntryBreakpointPosition,
+ frame->id());
script->set_break_on_entry(false);
// Update the "break_on_entry" flag on all live instances.
i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
@@ -607,7 +605,8 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
// Check whether we hit a breakpoint.
Handle<FixedArray> breakpoints;
- if (WasmScript::CheckBreakPoints(isolate, script, position, frame_id)
+ if (WasmScript::CheckBreakPoints(isolate, script, frame->position(),
+ frame->id())
.ToHandle(&breakpoints)) {
debug_info->ClearStepping(isolate);
StepAction step_action = isolate->debug()->last_step_action();
@@ -616,8 +615,14 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
// We hit one or several breakpoints. Notify the debug listeners.
isolate->debug()->OnDebugBreak(breakpoints, step_action);
}
+ return ReadOnlyRoots(isolate).undefined_value();
}
+ // We did not hit a breakpoint. If we are in stepping code, but the user did
+ // not request stepping, clear this (to save further calls into this runtime
+ // function).
+ debug_info->ClearStepping(frame);
+
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 4dcb99f0a15..c1b287cae37 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -138,8 +138,10 @@ bool Runtime::NeedsExactContext(FunctionId id) {
case Runtime::kThrowThrowMethodMissing:
case Runtime::kThrowTypeError:
case Runtime::kThrowUnsupportedSuperError:
+#if V8_ENABLE_WEBASSEMBLY
case Runtime::kThrowWasmError:
case Runtime::kThrowWasmStackOverflow:
+#endif // V8_ENABLE_WEBASSEMBLY
return false;
default:
return true;
@@ -173,8 +175,10 @@ bool Runtime::IsNonReturning(FunctionId id) {
case Runtime::kThrowSymbolAsyncIteratorInvalid:
case Runtime::kThrowTypeError:
case Runtime::kThrowConstAssignError:
+#if V8_ENABLE_WEBASSEMBLY
case Runtime::kThrowWasmError:
case Runtime::kThrowWasmStackOverflow:
+#endif // V8_ENABLE_WEBASSEMBLY
return true;
default:
return false;
@@ -215,6 +219,8 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kHeapObjectVerify:
case Runtime::kIsBeingInterpreted:
return !FLAG_allow_natives_for_differential_fuzzing;
+ case Runtime::kCompileBaseline:
+ return FLAG_sparkplug;
default:
return false;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 912808ab3c6..578156f0942 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -335,7 +335,8 @@ namespace internal {
F(SetNamedProperty, 3, 1) \
F(SetOwnPropertyIgnoreAttributes, 4, 1) \
F(StoreDataPropertyInLiteral, 3, 1) \
- F(ShrinkPropertyDictionary, 1, 1) \
+ F(ShrinkNameDictionary, 1, 1) \
+ F(ShrinkSwissNameDictionary, 1, 1) \
F(ToFastProperties, 1, 1) \
I(ToLength, 1, 1) \
F(ToName, 1, 1) \
@@ -343,7 +344,17 @@ namespace internal {
F(ToNumeric, 1, 1) \
I(ToObject, 1, 1) \
I(ToString, 1, 1) \
- F(TryMigrateInstance, 1, 1)
+ F(TryMigrateInstance, 1, 1) \
+ F(SwissTableAdd, 4, 1) \
+ F(SwissTableAllocate, 1, 1) \
+ F(SwissTableDelete, 2, 1) \
+ F(SwissTableDetailsAt, 2, 1) \
+ F(SwissTableElementsCount, 1, 1) \
+ F(SwissTableEquals, 2, 1) \
+ F(SwissTableFindEntry, 2, 1) \
+ F(SwissTableUpdate, 4, 1) \
+ F(SwissTableValueAt, 2, 1) \
+ F(SwissTableKeyAt, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F, I) \
F(Add, 2, 1) \
@@ -449,119 +460,97 @@ namespace internal {
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
-#define FOR_EACH_INTRINSIC_TEST(F, I) \
- F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
- F(AbortCSAAssert, 1, 1) \
- F(ArraySpeciesProtector, 0, 1) \
- F(ClearFunctionFeedback, 1, 1) \
- F(ClearMegamorphicStubCache, 0, 1) \
- F(CompleteInobjectSlackTracking, 1, 1) \
- F(ConstructConsString, 2, 1) \
- F(ConstructDouble, 2, 1) \
- F(ConstructSlicedString, 2, 1) \
- F(DebugPrint, 1, 1) \
- F(DebugPrintPtr, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(DebugTrackRetainingPath, -1, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(DeserializeWasmModule, 2, 1) \
- F(DisallowCodegenFromStrings, 1, 1) \
- F(DisallowWasmCodegen, 1, 1) \
- F(DisassembleFunction, 1, 1) \
- F(DynamicCheckMapsEnabled, 0, 1) \
- F(IsTopTierTurboprop, 0, 1) \
- F(IsMidTierTurboprop, 0, 1) \
- F(EnableCodeLoggingForTesting, 0, 1) \
- F(EnsureFeedbackVectorForFunction, 1, 1) \
- F(FreezeWasmLazyCompilation, 1, 1) \
- F(GetCallable, 0, 1) \
- F(GetInitializerFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(GetUndetectable, 0, 1) \
- F(GetWasmExceptionId, 2, 1) \
- F(GetWasmExceptionValues, 1, 1) \
- F(GetWasmRecoveredTrapCount, 0, 1) \
- F(GlobalPrint, 1, 1) \
- F(HasDictionaryElements, 1, 1) \
- F(HasDoubleElements, 1, 1) \
- F(HasElementsInALargeObjectSpace, 1, 1) \
- F(HasFastElements, 1, 1) \
- F(HasFastProperties, 1, 1) \
- F(HasOwnConstDataProperty, 2, 1) \
- F(HasFixedBigInt64Elements, 1, 1) \
- F(HasFixedBigUint64Elements, 1, 1) \
- F(HasFixedFloat32Elements, 1, 1) \
- F(HasFixedFloat64Elements, 1, 1) \
- F(HasFixedInt16Elements, 1, 1) \
- F(HasFixedInt32Elements, 1, 1) \
- F(HasFixedInt8Elements, 1, 1) \
- F(HasFixedUint16Elements, 1, 1) \
- F(HasFixedUint32Elements, 1, 1) \
- F(HasFixedUint8ClampedElements, 1, 1) \
- F(HasFixedUint8Elements, 1, 1) \
- F(HasHoleyElements, 1, 1) \
- F(HasObjectElements, 1, 1) \
- F(HasPackedElements, 1, 1) \
- F(HasSloppyArgumentsElements, 1, 1) \
- F(HasSmiElements, 1, 1) \
- F(HasSmiOrObjectElements, 1, 1) \
- F(HaveSameMap, 2, 1) \
- F(HeapObjectVerify, 1, 1) \
- F(ICsAreEnabled, 0, 1) \
- F(InLargeObjectSpace, 1, 1) \
- F(InYoungGeneration, 1, 1) \
- F(IsAsmWasmCode, 1, 1) \
- F(IsBeingInterpreted, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(IsDictPropertyConstTrackingEnabled, 0, 1) \
- F(IsLiftoffFunction, 1, 1) \
- F(IsThreadInWasm, 0, 1) \
- F(IsWasmCode, 1, 1) \
- F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(RegexpHasBytecode, 2, 1) \
- F(RegexpHasNativeCode, 2, 1) \
- F(RegexpTypeTag, 1, 1) \
- F(RegexpIsUnmodified, 1, 1) \
- F(MapIteratorProtector, 0, 1) \
- F(ArrayIteratorProtector, 0, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(NotifyContextDisposed, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(TierupFunctionOnNextCall, -1, 1) \
- F(OptimizeOsr, -1, 1) \
- F(NewRegExpWithBacktrackLimit, 3, 1) \
- F(PrepareFunctionForOptimization, -1, 1) \
- F(PrintWithNameForAssert, 2, 1) \
- F(RunningInSimulator, 0, 1) \
- F(RuntimeEvaluateREPL, 1, 1) \
- F(SerializeDeserializeNow, 0, 1) \
- F(SerializeWasmModule, 1, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(SetForceSlowPath, 1, 1) \
- F(SetIteratorProtector, 0, 1) \
- F(SetWasmCompileControls, 2, 1) \
- F(SetWasmInstantiateControls, 0, 1) \
- F(SimulateNewspaceFull, 0, 1) \
- F(ScheduleGCInStackCheck, 0, 1) \
- F(StringIteratorProtector, 0, 1) \
- F(SystemBreak, 0, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(TurbofanStaticAssert, 1, 1) \
- F(TypedArraySpeciesProtector, 0, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
- F(WasmGetNumberOfInstances, 1, 1) \
- F(WasmNumCodeSpaces, 1, 1) \
- F(WasmTierDown, 0, 1) \
- F(WasmTierUp, 0, 1) \
- F(WasmTierUpFunction, 2, 1) \
- F(WasmTraceEnter, 0, 1) \
- F(WasmTraceExit, 1, 1) \
- F(WasmTraceMemory, 1, 1) \
- I(DeoptimizeNow, 0, 1) \
- F(PromiseSpeciesProtector, 0, 1) \
- F(IsConcatSpreadableProtector, 0, 1) \
+#define FOR_EACH_INTRINSIC_TEST(F, I) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
+ F(AbortCSAAssert, 1, 1) \
+ F(ArraySpeciesProtector, 0, 1) \
+ F(ClearFunctionFeedback, 1, 1) \
+ F(ClearMegamorphicStubCache, 0, 1) \
+ F(CompleteInobjectSlackTracking, 1, 1) \
+ F(ConstructConsString, 2, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(ConstructSlicedString, 2, 1) \
+ F(DebugPrint, 1, 1) \
+ F(DebugPrintPtr, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(DebugTrackRetainingPath, -1, 1) \
+ F(DeoptimizeFunction, 1, 1) \
+ F(DisallowCodegenFromStrings, 1, 1) \
+ F(DisassembleFunction, 1, 1) \
+ F(DynamicCheckMapsEnabled, 0, 1) \
+ F(IsTopTierTurboprop, 0, 1) \
+ F(IsMidTierTurboprop, 0, 1) \
+ F(EnableCodeLoggingForTesting, 0, 1) \
+ F(EnsureFeedbackVectorForFunction, 1, 1) \
+ F(GetCallable, 0, 1) \
+ F(GetInitializerFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
+ F(GetUndetectable, 0, 1) \
+ F(GlobalPrint, 1, 1) \
+ F(HasDictionaryElements, 1, 1) \
+ F(HasDoubleElements, 1, 1) \
+ F(HasElementsInALargeObjectSpace, 1, 1) \
+ F(HasFastElements, 1, 1) \
+ F(HasFastProperties, 1, 1) \
+ F(HasOwnConstDataProperty, 2, 1) \
+ F(HasFixedBigInt64Elements, 1, 1) \
+ F(HasFixedBigUint64Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasHoleyElements, 1, 1) \
+ F(HasObjectElements, 1, 1) \
+ F(HasPackedElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasSmiElements, 1, 1) \
+ F(HasSmiOrObjectElements, 1, 1) \
+ F(HaveSameMap, 2, 1) \
+ F(HeapObjectVerify, 1, 1) \
+ F(ICsAreEnabled, 0, 1) \
+ F(InLargeObjectSpace, 1, 1) \
+ F(InYoungGeneration, 1, 1) \
+ F(IsBeingInterpreted, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsDictPropertyConstTrackingEnabled, 0, 1) \
+ F(RegexpHasBytecode, 2, 1) \
+ F(RegexpHasNativeCode, 2, 1) \
+ F(RegexpTypeTag, 1, 1) \
+ F(RegexpIsUnmodified, 1, 1) \
+ F(MapIteratorProtector, 0, 1) \
+ F(ArrayIteratorProtector, 0, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(TierupFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, -1, 1) \
+ F(NewRegExpWithBacktrackLimit, 3, 1) \
+ F(PrepareFunctionForOptimization, -1, 1) \
+ F(PrintWithNameForAssert, 2, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(RuntimeEvaluateREPL, 1, 1) \
+ F(SerializeDeserializeNow, 0, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetForceSlowPath, 1, 1) \
+ F(SetIteratorProtector, 0, 1) \
+ F(SimulateNewspaceFull, 0, 1) \
+ F(ScheduleGCInStackCheck, 0, 1) \
+ F(StringIteratorProtector, 0, 1) \
+ F(SystemBreak, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(TurbofanStaticAssert, 1, 1) \
+ F(TypedArraySpeciesProtector, 0, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
+ I(DeoptimizeNow, 0, 1) \
+ F(PromiseSpeciesProtector, 0, 1) \
+ F(IsConcatSpreadableProtector, 0, 1) \
F(RegExpSpeciesProtector, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
@@ -571,31 +560,55 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmI32AtomicWait, 4, 1) \
- F(WasmI64AtomicWait, 5, 1) \
- F(WasmAtomicNotify, 3, 1) \
- F(WasmMemoryGrow, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrow, 2, 1) \
- F(WasmReThrow, 1, 1) \
- F(WasmThrowJSTypeError, 0, 1) \
- F(WasmRefFunc, 1, 1) \
- F(WasmFunctionTableGet, 3, 1) \
- F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 6, 1) \
- F(WasmTableCopy, 6, 1) \
- F(WasmTableGrow, 3, 1) \
- F(WasmTableFill, 4, 1) \
- F(WasmIsValidRefValue, 3, 1) \
- F(WasmCompileLazy, 2, 1) \
- F(WasmCompileWrapper, 2, 1) \
- F(WasmTriggerTierUp, 1, 1) \
- F(WasmDebugBreak, 0, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicNotify, 3, 1) \
+ F(WasmMemoryGrow, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrow, 2, 1) \
+ F(WasmReThrow, 1, 1) \
+ F(WasmThrowJSTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
+ F(WasmFunctionTableGet, 3, 1) \
+ F(WasmFunctionTableSet, 4, 1) \
+ F(WasmTableInit, 6, 1) \
+ F(WasmTableCopy, 6, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 5, 1) \
+ F(WasmIsValidRefValue, 3, 1) \
+ F(WasmCompileLazy, 2, 1) \
+ F(WasmCompileWrapper, 2, 1) \
+ F(WasmTriggerTierUp, 1, 1) \
+ F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 2, 1)
+#define FOR_EACH_INTRINSIC_WASM_TEST(F, I) \
+ F(DeserializeWasmModule, 2, 1) \
+ F(DisallowWasmCodegen, 1, 1) \
+ F(FreezeWasmLazyCompilation, 1, 1) \
+ F(GetWasmExceptionId, 2, 1) \
+ F(GetWasmExceptionValues, 1, 1) \
+ F(GetWasmRecoveredTrapCount, 0, 1) \
+ F(IsAsmWasmCode, 1, 1) \
+ F(IsLiftoffFunction, 1, 1) \
+ F(IsThreadInWasm, 0, 1) \
+ F(IsWasmCode, 1, 1) \
+ F(IsWasmTrapHandlerEnabled, 0, 1) \
+ F(SerializeWasmModule, 1, 1) \
+ F(SetWasmCompileControls, 2, 1) \
+ F(SetWasmInstantiateControls, 0, 1) \
+ F(WasmGetNumberOfInstances, 1, 1) \
+ F(WasmNumCodeSpaces, 1, 1) \
+ F(WasmTierDown, 0, 1) \
+ F(WasmTierUp, 0, 1) \
+ F(WasmTierUpFunction, 2, 1) \
+ F(WasmTraceEnter, 0, 1) \
+ F(WasmTraceExit, 1, 1) \
+ F(WasmTraceMemory, 1, 1)
+
#define FOR_EACH_INTRINSIC_WEAKREF(F, I) \
F(JSFinalizationRegistryRegisterWeakCellWithUnregisterToken, 4, 1) \
F(JSWeakRefAddToKeptObjects, 1, 1) \
@@ -661,7 +674,8 @@ namespace internal {
FOR_EACH_INTRINSIC_SYMBOL(F, I) \
FOR_EACH_INTRINSIC_TEST(F, I) \
FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
- FOR_EACH_INTRINSIC_WASM(F, I) \
+ IF_WASM(FOR_EACH_INTRINSIC_WASM, F, I) \
+ IF_WASM(FOR_EACH_INTRINSIC_WASM_TEST, F, I) \
FOR_EACH_INTRINSIC_WEAKREF(F, I)
// Defines the list of all intrinsics, coming in 2 flavors, either returning an
@@ -761,9 +775,9 @@ class Runtime : public AllStatic {
// Get the runtime intrinsic function table.
static const Function* RuntimeFunctionTable(Isolate* isolate);
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteObjectProperty(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
- LanguageMode language_mode);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool>
+ DeleteObjectProperty(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<Object> key, LanguageMode language_mode);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
SetObjectProperty(Isolate* isolate, Handle<Object> object, Handle<Object> key,
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 1e5c51c0728..a4641baabf2 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -56,9 +56,11 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
script->name().ShortPrint();
PrintF("]\n");
}
+#if V8_ENABLE_WEBASSEMBLY
// TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
// context independent.
if (script->ContainsAsmModule()) return nullptr;
+#endif // V8_ENABLE_WEBASSEMBLY
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
@@ -158,9 +160,12 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (obj->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> sfi = Handle<SharedFunctionInfo>::cast(obj);
+ DCHECK(!sfi->IsApiFunction());
+#if V8_ENABLE_WEBASSEMBLY
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
- DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
+ DCHECK(!sfi->HasAsmWasmData());
+#endif // V8_ENABLE_WEBASSEMBLY
DebugInfo debug_info;
BytecodeArray debug_bytecode_array;
@@ -252,9 +257,8 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
int line_num = script->GetLineNumber(info->StartPosition()) + 1;
int column_num = script->GetColumnNumber(info->StartPosition()) + 1;
PROFILE(isolate,
- CodeCreateEvent(CodeEventListener::INTERPRETED_FUNCTION_TAG,
- abstract_code, info, name_handle, line_num,
- column_num));
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, abstract_code,
+ info, name_handle, line_num, column_num));
}
}
#endif // V8_TARGET_ARCH_ARM
@@ -385,11 +389,13 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
script->GetLineNumber(shared_info->StartPosition()) + 1;
int column_num =
script->GetColumnNumber(shared_info->StartPosition()) + 1;
- PROFILE(isolate,
- CodeCreateEvent(
- CodeEventListener::SCRIPT_TAG,
- handle(shared_info->abstract_code(isolate), isolate),
- shared_info, name, line_num, column_num));
+ PROFILE(
+ isolate,
+ CodeCreateEvent(
+ shared_info->is_toplevel() ? CodeEventListener::SCRIPT_TAG
+ : CodeEventListener::FUNCTION_TAG,
+ handle(shared_info->abstract_code(isolate), isolate),
+ shared_info, name, line_num, column_num));
}
}
}
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index 5ae6dcd0eb7..04756b5ffe8 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -7,6 +7,7 @@
#include "src/api/api-inl.h"
#include "src/common/assert-scope.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 80059e1e951..4cf57defa07 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -177,6 +177,9 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfBytecodeFlushed();
if (closure->is_compiled()) {
+ if (closure->shared().HasBaselineData()) {
+ closure->shared().flush_baseline_data();
+ }
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index c378653a270..ecfa889f1e3 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -690,7 +690,7 @@ void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
DCHECK(Builtins::IsBuiltinId(builtin_index));
CHECK_NOT_NULL(isolate()->embedded_blob_code());
- EmbeddedData d = EmbeddedData::FromBlob();
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
Address address = d.InstructionStartOfBuiltin(builtin_index);
CHECK_NE(kNullAddress, address);
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 03702bf3313..2a0549cfbb7 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -13,19 +13,12 @@
namespace v8 {
namespace internal {
-// static
-bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
- const Address start =
- reinterpret_cast<Address>(isolate->embedded_blob_code());
- return start <= pc && pc < start + isolate->embedded_blob_code_size();
-}
+namespace {
-// static
-Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
- if (!PcIsOffHeap(isolate, address)) return Code();
+Builtins::Name TryLookupCode(const EmbeddedData& d, Address address) {
+ if (!d.IsInCodeRange(address)) return Builtins::kNoBuiltinId;
- EmbeddedData d = EmbeddedData::FromBlob();
- if (address < d.InstructionStartOfBuiltin(0)) return Code();
+ if (address < d.InstructionStartOfBuiltin(0)) return Builtins::kNoBuiltinId;
// Note: Addresses within the padding section between builtins (i.e. within
// start + size <= address < start + padded_size) are interpreted as belonging
@@ -42,13 +35,67 @@ Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
} else if (address >= end) {
l = mid + 1;
} else {
- return isolate->builtins()->builtin(mid);
+ return static_cast<Builtins::Name>(mid);
}
}
UNREACHABLE();
}
+} // namespace
+
+// static
+bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
+ // Mksnapshot calls this while the embedded blob is not available yet.
+ if (isolate->embedded_blob_code() == nullptr) return false;
+ DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
+
+ if (EmbeddedData::FromBlob(isolate).IsInCodeRange(pc)) return true;
+ return isolate->is_short_builtin_calls_enabled() &&
+ EmbeddedData::FromBlob().IsInCodeRange(pc);
+}
+
+// static
+bool InstructionStream::TryGetAddressForHashing(Isolate* isolate,
+ Address address,
+ uint32_t* hashable_address) {
+ // Mksnapshot calls this while the embedded blob is not available yet.
+ if (isolate->embedded_blob_code() == nullptr) return false;
+ DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
+
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
+ if (d.IsInCodeRange(address)) {
+ *hashable_address = d.AddressForHashing(address);
+ return true;
+ }
+
+ if (isolate->is_short_builtin_calls_enabled()) {
+ d = EmbeddedData::FromBlob();
+ if (d.IsInCodeRange(address)) {
+ *hashable_address = d.AddressForHashing(address);
+ return true;
+ }
+ }
+ return false;
+}
+
+// static
+Builtins::Name InstructionStream::TryLookupCode(Isolate* isolate,
+ Address address) {
+ // Mksnapshot calls this while the embedded blob is not available yet.
+ if (isolate->embedded_blob_code() == nullptr) return Builtins::kNoBuiltinId;
+ DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
+
+ Builtins::Name builtin =
+ i::TryLookupCode(EmbeddedData::FromBlob(isolate), address);
+
+ if (isolate->is_short_builtin_calls_enabled() &&
+ !Builtins::IsBuiltinId(builtin)) {
+ builtin = i::TryLookupCode(EmbeddedData::FromBlob(), address);
+ }
+ return builtin;
+}
+
// static
void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
uint8_t** code,
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index d8d2dd822de..6518c38d025 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -23,8 +23,15 @@ class InstructionStream final : public AllStatic {
// Returns true, iff the given pc points into an off-heap instruction stream.
static bool PcIsOffHeap(Isolate* isolate, Address pc);
- // Returns the corresponding Code object if it exists, and nullptr otherwise.
- static Code TryLookupCode(Isolate* isolate, Address address);
+ // If the address belongs to the embedded code blob, predictably converts it
+ // to uint32 by calculating offset from the embedded code blob start and
+ // returns true, and false otherwise.
+ static bool TryGetAddressForHashing(Isolate* isolate, Address address,
+ uint32_t* hashable_address);
+
+ // Returns the corresponding builtin ID if lookup succeeds, and kNoBuiltinId
+ // otherwise.
+ static Builtins::Name TryLookupCode(Isolate* isolate, Address address);
// During snapshot creation, we first create an executable off-heap area
// containing all off-heap code. The area is guaranteed to be contiguous.
@@ -60,6 +67,32 @@ class EmbeddedData final {
const uint8_t* data() const { return data_; }
uint32_t data_size() const { return data_size_; }
+ bool IsInCodeRange(Address pc) const {
+ Address start = reinterpret_cast<Address>(code_);
+ return (start <= pc) && (pc < start + code_size_);
+ }
+
+ // When short builtin calls optimization is enabled for the Isolate, there
+ // will be two builtins instruction streams executed: the embedded one and
+ // the one un-embedded into the per-Isolate code range. In most of the cases,
+ // the per-Isolate instructions will be used but in some cases (like builtin
+ // calls from Wasm) the embedded instruction stream could be used.
+ // If the requested PC belongs to the embedded code blob - it'll be returned,
+ // and the per-Isolate blob otherwise.
+ // See http://crbug.com/v8/11527 for details.
+ inline static EmbeddedData GetEmbeddedDataForPC(Isolate* isolate,
+ Address maybe_builtin_pc) {
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
+ if (isolate->is_short_builtin_calls_enabled() &&
+ !d.IsInCodeRange(maybe_builtin_pc)) {
+ EmbeddedData global_d = EmbeddedData::FromBlob();
+ // If the pc does not belong to the embedded code blob we should be using
+ // the un-embedded one.
+ if (global_d.IsInCodeRange(maybe_builtin_pc)) return global_d;
+ }
+ return d;
+ }
+
void Dispose() {
delete[] code_;
code_ = nullptr;
@@ -77,8 +110,8 @@ class EmbeddedData final {
uint32_t MetadataSizeOfBuiltin(int i) const;
uint32_t AddressForHashing(Address addr) {
+ DCHECK(IsInCodeRange(addr));
Address start = reinterpret_cast<Address>(code_);
- DCHECK(base::IsInRange(addr, start, start + code_size_));
return static_cast<uint32_t>(addr - start);
}
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h b/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h
new file mode 100644
index 00000000000..0873eecbb75
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h
@@ -0,0 +1,56 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_INTERFACE_H_
+#define V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_INTERFACE_H_
+
+#include <string>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+namespace internal {
+
+class Builtins;
+
+#if defined(V8_OS_WIN64)
+namespace win64_unwindinfo {
+class BuiltinUnwindInfo;
+}
+#endif // V8_OS_WIN64
+
+static constexpr char kDefaultEmbeddedVariant[] = "Default";
+
+struct LabelInfo {
+ int offset;
+ std::string name;
+};
+
+// Detailed source-code information about builtins can only be obtained by
+// registration on the isolate during compilation.
+class EmbeddedFileWriterInterface {
+ public:
+ // We maintain a database of filenames to synthetic IDs.
+ virtual int LookupOrAddExternallyCompiledFilename(const char* filename) = 0;
+ virtual const char* GetExternallyCompiledFilename(int index) const = 0;
+ virtual int GetExternallyCompiledFilenameCount() const = 0;
+
+ // The isolate will call the method below just prior to replacing the
+ // compiled builtin Code objects with trampolines.
+ virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
+
+ virtual void PrepareBuiltinLabelInfoMap(int create_offset,
+ int invoke_offset) = 0;
+
+#if defined(V8_OS_WIN64)
+ virtual void SetBuiltinUnwindData(
+ int builtin_index,
+ const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) = 0;
+#endif // V8_OS_WIN64
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_INTERFACE_H_
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 6bf5f84088e..0df841261dd 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -289,9 +289,9 @@ void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
// Verify that the code object is still the "real code" and not a
// trampoline (which wouldn't have source positions).
DCHECK(!code.is_off_heap_trampoline());
- std::vector<unsigned char> data(
- code.SourcePositionTable().GetDataStartAddress(),
- code.SourcePositionTable().GetDataEndAddress());
+ ByteArray source_position_table = code.source_position_table();
+ std::vector<unsigned char> data(source_position_table.GetDataStartAddress(),
+ source_position_table.GetDataEndAddress());
source_positions_[i] = data;
}
}
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index 6e7ec59f44a..0650f070a24 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -12,6 +12,7 @@
#include "src/common/globals.h"
#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-file-writer-interface.h"
#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
#if defined(V8_OS_WIN64)
@@ -21,37 +22,6 @@
namespace v8 {
namespace internal {
-
-static constexpr char kDefaultEmbeddedVariant[] = "Default";
-
-struct LabelInfo {
- int offset;
- std::string name;
-};
-
-// Detailed source-code information about builtins can only be obtained by
-// registration on the isolate during compilation.
-class EmbeddedFileWriterInterface {
- public:
- // We maintain a database of filenames to synthetic IDs.
- virtual int LookupOrAddExternallyCompiledFilename(const char* filename) = 0;
- virtual const char* GetExternallyCompiledFilename(int index) const = 0;
- virtual int GetExternallyCompiledFilenameCount() const = 0;
-
- // The isolate will call the method below just prior to replacing the
- // compiled builtin Code objects with trampolines.
- virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
-
- virtual void PrepareBuiltinLabelInfoMap(int create_offset,
- int invoke_offset) = 0;
-
-#if defined(V8_OS_WIN64)
- virtual void SetBuiltinUnwindData(
- int builtin_index,
- const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) = 0;
-#endif // V8_OS_WIN64
-};
-
// Generates the embedded.S file which is later compiled into the final v8
// binary. Its contents are exported through two symbols:
//
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 5747f705ae5..929996ee106 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/snapshot/code-serializer.h"
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 720ffbe7415..89c5485d62c 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -1003,11 +1003,12 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
Address addr = rinfo->target_off_heap_target();
CHECK_NE(kNullAddress, addr);
- Code target = InstructionStream::TryLookupCode(isolate(), addr);
- CHECK(Builtins::IsIsolateIndependentBuiltin(target));
+ Builtins::Name builtin = InstructionStream::TryLookupCode(isolate(), addr);
+ CHECK(Builtins::IsBuiltinId(builtin));
+ CHECK(Builtins::IsIsolateIndependent(builtin));
sink_->Put(kOffHeapTarget, "OffHeapTarget");
- sink_->PutInt(target.builtin_index(), "builtin index");
+ sink_->PutInt(builtin, "builtin index");
}
void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index d523401942b..3743fa02c90 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -124,10 +124,12 @@ class CodeAddressMap : public CodeEventLogger {
address_to_name_map_.Insert(code->address(), name, length);
}
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override {
UNREACHABLE();
}
+#endif // V8_ENABLE_WEBASSEMBLY
NameMap address_to_name_map_;
};
@@ -150,9 +152,18 @@ class ObjectCacheIndexMap {
return find_result.already_exists;
}
- private:
- DISALLOW_GARBAGE_COLLECTION(no_gc_)
+ bool Lookup(Handle<HeapObject> obj, int* index_out) const {
+ int* index = map_.Find(obj);
+ if (index == nullptr) {
+ return false;
+ }
+ *index_out = *index;
+ return true;
+ }
+ int size() const { return next_index_; }
+
+ private:
IdentityMap<int, base::DefaultAllocationPolicy> map_;
int next_index_;
};
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 360bddc4b9a..b78e6a70d6e 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -260,9 +260,14 @@ void Snapshot::ClearReconstructableDataForSerialization(
}
#ifdef DEBUG
if (clear_recompilable_data) {
+#if V8_ENABLE_WEBASSEMBLY
DCHECK(fun.shared().HasWasmExportedFunctionData() ||
fun.shared().HasBuiltinId() || fun.shared().IsApiFunction() ||
fun.shared().HasUncompiledDataWithoutPreparseData());
+#else
+ DCHECK(fun.shared().HasBuiltinId() || fun.shared().IsApiFunction() ||
+ fun.shared().HasUncompiledDataWithoutPreparseData());
+#endif // V8_ENABLE_WEBASSEMBLY
}
#endif // DEBUG
}
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
index ac020e24a94..1e47ce760ff 100644
--- a/deps/v8/src/strings/OWNERS
+++ b/deps/v8/src/strings/OWNERS
@@ -1,4 +1,3 @@
-bmeurer@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 84756896af4..ed1939ae0f9 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -298,7 +298,7 @@ void StringStream::PrintName(Object name) {
void StringStream::PrintUsingMap(JSObject js_object) {
Map map = js_object.map();
- DescriptorArray descs = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descs = map.instance_descriptors(js_object.GetIsolate());
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
diff --git a/deps/v8/src/third_party/siphash/OWNERS b/deps/v8/src/third_party/siphash/OWNERS
index f0ea8b5f4f9..81e8577125f 100644
--- a/deps/v8/src/third_party/siphash/OWNERS
+++ b/deps/v8/src/third_party/siphash/OWNERS
@@ -1,2 +1 @@
-sigurds@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/third_party/utf8-decoder/OWNERS b/deps/v8/src/third_party/utf8-decoder/OWNERS
index c008e4cbce8..88ceed43c20 100644
--- a/deps/v8/src/third_party/utf8-decoder/OWNERS
+++ b/deps/v8/src/third_party/utf8-decoder/OWNERS
@@ -1,2 +1 @@
-mathias@chromium.org
marja@chromium.org
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index db4f80c32d7..2e1aed3ec9b 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -924,9 +924,18 @@ struct Annotation {
base::Optional<AnnotationParameter> param;
};
+struct ClassFieldIndexInfo {
+ // The expression that can compute how many items are in the indexed field.
+ Expression* expr;
+
+ // Whether the field was declared as optional, meaning it can only hold zero
+ // or one values, and thus should not require an index expression to access.
+ bool optional;
+};
+
struct ClassFieldExpression {
NameAndTypeExpression name_and_type;
- base::Optional<Expression*> index;
+ base::Optional<ClassFieldIndexInfo> index;
std::vector<ConditionalAnnotation> conditions;
bool weak;
bool const_qualified;
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
index 5eea56654db..0dea634ba47 100644
--- a/deps/v8/src/torque/cc-generator.cc
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -386,10 +386,10 @@ void CCGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
out() << " " << result_name << " = ";
if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
// Currently, all of the tagged loads we emit are for smi values, so there
- // is no point in providing an IsolateRoot. If at some point we start
+ // is no point in providing an PtrComprCageBase. If at some point we start
// emitting loads for tagged fields which might be HeapObjects, then we
- // should plumb an IsolateRoot through the generated functions that need
- // it.
+ // should plumb an PtrComprCageBase through the generated functions that
+ // need it.
if (!instruction.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
Error(
"Not supported in C++ output: LoadReference on non-smi tagged "
@@ -472,33 +472,38 @@ void CCGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
ReportError("Not supported in C++ output: StoreBitField");
}
-// static
-void CCGenerator::EmitCCValue(VisitResult result,
- const Stack<std::string>& values,
- std::ostream& out) {
+namespace {
+
+void CollectAllFields(const VisitResult& result,
+ const Stack<std::string>& values,
+ std::vector<std::string>& all_fields) {
if (!result.IsOnStack()) {
- out << result.constexpr_value();
+ all_fields.push_back(result.constexpr_value());
} else if (auto struct_type = result.type()->StructSupertype()) {
- out << "std::tuple_cat(";
- bool first = true;
- for (auto& field : (*struct_type)->fields()) {
- if (!first) {
- out << ", ";
- }
- first = false;
- if (!field.name_and_type.type->IsStructType()) {
- out << "std::make_tuple(";
- }
- EmitCCValue(ProjectStructField(result, field.name_and_type.name), values,
- out);
- if (!field.name_and_type.type->IsStructType()) {
- out << ")";
- }
+ for (const Field& field : (*struct_type)->fields()) {
+ CollectAllFields(ProjectStructField(result, field.name_and_type.name),
+ values, all_fields);
}
- out << ")";
} else {
DCHECK_EQ(1, result.stack_range().Size());
- out << values.Peek(result.stack_range().begin());
+ all_fields.push_back(values.Peek(result.stack_range().begin()));
+ }
+}
+
+} // namespace
+
+// static
+void CCGenerator::EmitCCValue(VisitResult result,
+ const Stack<std::string>& values,
+ std::ostream& out) {
+ std::vector<std::string> all_fields;
+ CollectAllFields(result, values, all_fields);
+ if (all_fields.size() == 1) {
+ out << all_fields[0];
+ } else {
+ out << "std::make_tuple(";
+ PrintCommaSeparatedList(out, all_fields);
+ out << ")";
}
}
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
index fe2a85fcd0f..19ac671bd79 100644
--- a/deps/v8/src/torque/class-debug-reader-generator.cc
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -545,19 +545,18 @@ void ImplementationVisitor::GenerateClassDebugReaders(
h_contents
<< "\n#include \"tools/debug_helper/debug-helper-internal.h\"\n\n";
- h_contents << "// Unset a windgi.h macro that causes conflicts.\n";
- h_contents << "#ifdef GetBValue\n";
- h_contents << "#undef GetBValue\n";
- h_contents << "#endif\n\n";
-
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
- }
- cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
- cc_contents << "#include \"torque-generated/"
- << "debug-macros"
- << ".h\"\n";
- cc_contents << "#include \"include/v8-internal.h\"\n\n";
+ const char* kWingdiWorkaround =
+ "// Unset a wingdi.h macro that causes conflicts.\n"
+ "#ifdef GetBValue\n"
+ "#undef GetBValue\n"
+ "#endif\n\n";
+
+ h_contents << kWingdiWorkaround;
+
+ cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
+ cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
+ cc_contents << "#include \"torque-generated/debug-macros.h\"\n\n";
+ cc_contents << kWingdiWorkaround;
cc_contents << "namespace i = v8::internal;\n\n";
NamespaceScope h_namespaces(h_contents,
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index bd720bf0ac2..2a4e6fcb5b3 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -49,6 +49,7 @@ static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
static const char* const INTPTR_TYPE_STRING = "intptr";
static const char* const UINTPTR_TYPE_STRING = "uintptr";
static const char* const INT64_TYPE_STRING = "int64";
+static const char* const UINT64_TYPE_STRING = "uint64";
static const char* const INT31_TYPE_STRING = "int31";
static const char* const INT32_TYPE_STRING = "int32";
static const char* const UINT31_TYPE_STRING = "uint31";
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 9254a7ea6b2..2f840c1918b 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -279,6 +279,10 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
out() << "ca_.Int32Constant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetUint32Type())) {
out() << "ca_.Uint32Constant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetInt64Type())) {
+ out() << "ca_.Int64Constant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetUint64Type())) {
+ out() << "ca_.Uint64Constant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetBoolType())) {
out() << "ca_.BoolConstant";
} else {
@@ -747,7 +751,7 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
Stack<std::string>* stack) {
out() << " ca_.Branch(" << stack->Pop() << ", &"
- << BlockName(instruction.if_true) << ", std::vector<Node*>{";
+ << BlockName(instruction.if_true) << ", std::vector<compiler::Node*>{";
const auto& true_definitions = instruction.if_true->InputDefinitions();
DCHECK_EQ(stack->Size(), true_definitions.Size());
@@ -760,7 +764,8 @@ void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
}
}
- out() << "}, &" << BlockName(instruction.if_false) << ", std::vector<Node*>{";
+ out() << "}, &" << BlockName(instruction.if_false)
+ << ", std::vector<compiler::Node*>{";
const auto& false_definitions = instruction.if_false->InputDefinitions();
DCHECK_EQ(stack->Size(), false_definitions.Size());
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index faf46b18e97..71cde509635 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -141,7 +141,7 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
ReportError(
"runtime functions can only return strong tagged values, but "
"found type ",
- signature.return_type);
+ *signature.return_type);
}
for (const Type* parameter_type : signature.parameter_types.types) {
if (!parameter_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType())) {
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index 35ddb1d2e26..a70e8ec41fc 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -14,6 +14,7 @@ DEFINE_CONTEXTUAL_VARIABLE(TargetArchitecture)
GlobalContext::GlobalContext(Ast ast)
: collect_language_server_data_(false),
force_assert_statements_(false),
+ annotate_ir_(false),
ast_(std::move(ast)) {
CurrentScope::Scope current_scope(nullptr);
CurrentSourcePosition::Scope current_source_position(
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index cd6ddef8b22..403502b67bf 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -54,6 +54,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool force_assert_statements() {
return Get().force_assert_statements_;
}
+ static void SetAnnotateIR() { Get().annotate_ir_ = true; }
+ static bool annotate_ir() { return Get().annotate_ir_; }
static Ast* ast() { return &Get().ast_; }
static std::string MakeUniqueName(const std::string& base) {
return base + "_" + std::to_string(Get().fresh_ids_[base]++);
@@ -106,6 +108,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
private:
bool collect_language_server_data_;
bool force_assert_statements_;
+ bool annotate_ir_;
Namespace* default_namespace_;
Ast ast_;
std::vector<std::unique_ptr<Declarable>> declarables_;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 8769e8c9a97..a2cf0fee866 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -20,6 +20,7 @@
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
#include "src/torque/types.h"
+#include "src/torque/utils.h"
namespace v8 {
namespace internal {
@@ -154,6 +155,8 @@ void ImplementationVisitor::BeginDebugMacrosFile() {
std::ostream& header = debug_macros_h_;
source << "#include \"torque-generated/debug-macros.h\"\n\n";
+ source << "#include \"src/objects/swiss-name-dictionary.h\"\n";
+ source << "#include \"src/objects/ordered-hash-table.h\"\n";
source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
source << "#include \"include/v8-internal.h\"\n";
source << "\n";
@@ -166,13 +169,12 @@ void ImplementationVisitor::BeginDebugMacrosFile() {
const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
header << "#ifndef " << kHeaderDefine << "\n";
header << "#define " << kHeaderDefine << "\n\n";
- header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
header << "\n";
header << "namespace v8 {\n"
<< "namespace internal {\n"
- << "namespace debug_helper_internal{\n"
+ << "namespace debug_helper_internal {\n"
<< "\n";
}
@@ -1381,10 +1383,19 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
}
LocationReference ImplementationVisitor::GenerateFieldReference(
- VisitResult object, const Field& field, const ClassType* class_type) {
+ VisitResult object, const Field& field, const ClassType* class_type,
+ bool treat_optional_as_indexed) {
if (field.index.has_value()) {
- return LocationReference::HeapSlice(
+ LocationReference slice = LocationReference::HeapSlice(
GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
+ if (field.index->optional && !treat_optional_as_indexed) {
+ // This field was declared using optional syntax, so any reference to it
+ // is implicitly a reference to the first item.
+ return GenerateReferenceToItemInHeapSlice(
+ slice, {TypeOracle::GetConstInt31Type(), "0"});
+ } else {
+ return slice;
+ }
}
DCHECK(field.offset.has_value());
StackRange result_range = assembler().TopRange(0);
@@ -1481,18 +1492,25 @@ VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
if (field.name_and_type.name == f.name_and_type.name) {
before_current = false;
}
+ // We can't generate field references eagerly here, because some preceding
+ // fields might be optional, and attempting to get a reference to an
+ // optional field can crash the program if the field isn't present.
+ // Instead, we use the lazy form of LocalValue to only generate field
+ // references if they are used in the length expression.
bindings.insert(
{f.name_and_type.name,
f.const_qualified
? (before_current
- ? LocalValue{GenerateFieldReference(object, f, class_type)}
+ ? LocalValue{[=]() {
+ return GenerateFieldReference(object, f, class_type);
+ }}
: LocalValue("Array lengths may only refer to fields "
"defined earlier"))
: LocalValue(
"Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
- GenerateArrayLength(*field.index, class_type->nspace(), bindings));
+ GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
}
VisitResult ImplementationVisitor::GenerateArrayLength(
@@ -1515,7 +1533,7 @@ VisitResult ImplementationVisitor::GenerateArrayLength(
"Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
- GenerateArrayLength(*field.index, class_type->nspace(), bindings));
+ GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
}
LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
@@ -2284,22 +2302,27 @@ LocationReference ImplementationVisitor::GetLocationReference(
LocationReference reference = GetLocationReference(expr->array);
VisitResult index = Visit(expr->index);
if (reference.IsHeapSlice()) {
- Arguments arguments{{index}, {}};
- const StructType* slice_type =
- *reference.heap_slice().type()->StructSupertype();
- Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
- // The reference has to be treated like a normal value when calling methods
- // on the underlying slice implementation.
- LocationReference slice_value = LocationReference::Temporary(
- reference.GetVisitResult(), "slice as value");
- return LocationReference::HeapReference(
- GenerateCall(method, std::move(slice_value), arguments, {}, false));
+ return GenerateReferenceToItemInHeapSlice(reference, index);
} else {
return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
index);
}
}
+LocationReference ImplementationVisitor::GenerateReferenceToItemInHeapSlice(
+ LocationReference slice, VisitResult index) {
+ DCHECK(slice.IsHeapSlice());
+ Arguments arguments{{index}, {}};
+ const StructType* slice_type = *slice.heap_slice().type()->StructSupertype();
+ Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
+ // The reference has to be treated like a normal value when calling methods
+ // on the underlying slice implementation.
+ LocationReference slice_value =
+ LocationReference::Temporary(slice.GetVisitResult(), "slice as value");
+ return LocationReference::HeapReference(
+ GenerateCall(method, std::move(slice_value), arguments, {}, false));
+}
+
LocationReference ImplementationVisitor::GetLocationReference(
IdentifierExpression* expr) {
if (expr->namespace_qualification.empty()) {
@@ -2665,7 +2688,7 @@ VisitResult ImplementationVisitor::GenerateCall(
if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
ReportError("this parameter must be a subtype of ",
*method->aggregate_type(), " but it is of type ",
- this_value.type());
+ *this_value.type());
}
} else {
AddCallParameter(callable, this_value, method->aggregate_type(),
@@ -2997,6 +3020,21 @@ VisitResult ImplementationVisitor::GenerateCall(
assembler().Emit(MakeLazyNodeInstruction{getter, return_type,
constexpr_arguments_for_getter});
return VisitResult(return_type, assembler().TopRange(1));
+ } else if (intrinsic->ExternalName() == "%FieldSlice") {
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%FieldSlice must take a class type parameter");
+ }
+ const Field& field =
+ class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
+ LocationReference ref = GenerateFieldReference(
+ VisitResult(type, argument_range), field, class_type,
+ /*treat_optional_as_indexed=*/true);
+ if (!ref.IsHeapSlice()) {
+ ReportError("%FieldSlice expected an indexed or optional field");
+ }
+ return ref.heap_slice();
} else {
assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
constexpr_arguments});
@@ -3885,7 +3923,7 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
std::set<std::string> index_names;
for (const Field& field : type.ComputeAllFields()) {
if (field.index) {
- auto name_and_type = ExtractSimpleFieldArraySize(type, *field.index);
+ auto name_and_type = ExtractSimpleFieldArraySize(type, field.index->expr);
if (!name_and_type) {
return base::nullopt;
}
@@ -4004,7 +4042,7 @@ void CppClassGenerator::GenerateClass() {
for (const Field& field : type_->ComputeAllFields()) {
if (field.index) {
auto index_name_and_type =
- *ExtractSimpleFieldArraySize(*type_, *field.index);
+ *ExtractSimpleFieldArraySize(*type_, field.index->expr);
size_t field_size = 0;
std::tie(field_size, std::ignore) = field.GetFieldSizeInformation();
hdr_ << " size += " << index_name_and_type.name << " * "
@@ -4123,7 +4161,7 @@ void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
os << " DCHECK_GE(" << index << ", 0);\n";
std::string length_expression;
if (base::Optional<NameAndType> array_length =
- ExtractSimpleFieldArraySize(*type, *f.index)) {
+ ExtractSimpleFieldArraySize(*type, f.index->expr)) {
length_expression = "this ->" + array_length->name + "()";
} else {
// The length is element 2 in the flattened field slice.
@@ -4164,7 +4202,7 @@ void CppClassGenerator::GenerateFieldAccessors(
return;
}
- bool indexed = class_field.index.has_value();
+ bool indexed = class_field.index && !class_field.index->optional;
std::string type_name = GetTypeNameForAccessor(innermost_field);
bool can_contain_heap_objects = CanContainHeapObjects(field_type);
@@ -4185,8 +4223,9 @@ void CppClassGenerator::GenerateFieldAccessors(
hdr_ << " inline " << type_name << " " << name << "("
<< (indexed ? "int i" : "") << ") const;\n";
if (can_contain_heap_objects) {
- hdr_ << " inline " << type_name << " " << name << "(IsolateRoot isolate"
- << (indexed ? ", int i" : "") << ") const;\n";
+ hdr_ << " inline " << type_name << " " << name
+ << "(PtrComprCageBase cage_base" << (indexed ? ", int i" : "")
+ << ") const;\n";
}
hdr_ << " inline void set_" << name << "(" << (indexed ? "int i, " : "")
<< type_name << " value"
@@ -4195,14 +4234,14 @@ void CppClassGenerator::GenerateFieldAccessors(
: "")
<< ");\n\n";
- // For tagged data, generate the extra getter that derives an IsolateRoot from
- // the current object's pointer.
+ // For tagged data, generate the extra getter that derives an PtrComprCageBase
+ // from the current object's pointer.
if (can_contain_heap_objects) {
inl_ << "template <class D, class P>\n";
inl_ << type_name << " " << gen_name_ << "<D, P>::" << name << "("
<< (indexed ? "int i" : "") << ") const {\n";
- inl_ << " IsolateRoot isolate = GetIsolateForPtrCompr(*this);\n";
- inl_ << " return " << gen_name_ << "::" << name << "(isolate"
+ inl_ << " PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
+ inl_ << " return " << gen_name_ << "::" << name << "(cage_base"
<< (indexed ? ", i" : "") << ");\n";
inl_ << "}\n";
}
@@ -4210,7 +4249,7 @@ void CppClassGenerator::GenerateFieldAccessors(
// Generate the getter implementation.
inl_ << "template <class D, class P>\n";
inl_ << type_name << " " << gen_name_ << "<D, P>::" << name << "(";
- if (can_contain_heap_objects) inl_ << "IsolateRoot isolate";
+ if (can_contain_heap_objects) inl_ << "PtrComprCageBase cage_base";
if (can_contain_heap_objects && indexed) inl_ << ", ";
if (indexed) inl_ << "int i";
inl_ << ") const {\n";
@@ -4288,9 +4327,10 @@ void CppClassGenerator::EmitLoadFieldStatement(
std::string offset = field_offset;
if (class_field.index) {
- GenerateBoundsDCheck(inl_, "i", type_, class_field);
- inl_ << " int offset = " << field_offset << " + i * " << class_field_size
- << ";\n";
+ const char* index = class_field.index->optional ? "0" : "i";
+ GenerateBoundsDCheck(inl_, index, type_, class_field);
+ inl_ << " int offset = " << field_offset << " + " << index << " * "
+ << class_field_size << ";\n";
offset = "offset";
}
@@ -4322,10 +4362,11 @@ void CppClassGenerator::EmitLoadFieldStatement(
bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
const std::string load_type = is_smi ? "Smi" : type_name;
const char* postfix = is_smi ? ".value()" : "";
- const char* optional_isolate = is_smi ? "" : "isolate, ";
+ const char* optional_cage_base = is_smi ? "" : "cage_base, ";
inl_ << "TaggedField<" << load_type << ">::" << load << "("
- << optional_isolate << "*this, " << offset << ")" << postfix << ";\n";
+ << optional_cage_base << "*this, " << offset << ")" << postfix
+ << ";\n";
}
if (CanContainHeapObjects(field_type)) {
@@ -4353,9 +4394,10 @@ void CppClassGenerator::EmitStoreFieldStatement(
std::string offset = field_offset;
if (class_field.index) {
- GenerateBoundsDCheck(inl_, "i", type_, class_field);
- inl_ << " int offset = " << field_offset << " + i * " << class_field_size
- << ";\n";
+ const char* index = class_field.index->optional ? "0" : "i";
+ GenerateBoundsDCheck(inl_, index, type_, class_field);
+ inl_ << " int offset = " << field_offset << " + " << index << " * "
+ << class_field_size << ";\n";
offset = "offset";
}
@@ -4929,14 +4971,10 @@ void ImplementationVisitor::GenerateClassVerifiers(
IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
- cc_contents << "\n#include \"src/objects/objects.h\"\n";
+ h_contents << "#include \"src/base/macros.h\"\n\n";
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
- }
- cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
- cc_contents << "#include "
- "\"src/objects/all-objects-inl.h\"\n";
+ cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
+ cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -5123,6 +5161,24 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
}
+namespace {
+
+void CollectAllFields(const std::string& path, const Field& field,
+ std::vector<std::string>& result) {
+ if (field.name_and_type.type->StructSupertype()) {
+ std::string next_path = path + field.name_and_type.name + ".";
+ const StructType* struct_type =
+ StructType::DynamicCast(field.name_and_type.type);
+ for (const auto& inner_field : struct_type->fields()) {
+ CollectAllFields(next_path, inner_field, result);
+ }
+ } else {
+ result.push_back(path + field.name_and_type.name);
+ }
+}
+
+} // namespace
+
void ImplementationVisitor::GenerateCSATypes(
const std::string& output_directory) {
std::string file_name = "csa-types";
@@ -5153,20 +5209,13 @@ void ImplementationVisitor::GenerateCSATypes(
first = false;
h_contents << type->GetGeneratedTypeName();
}
- h_contents << "> Flatten() const {\n"
- << " return std::tuple_cat(";
- first = true;
+ std::vector<std::string> all_fields;
for (auto& field : struct_type->fields()) {
- if (!first) {
- h_contents << ", ";
- }
- first = false;
- if (field.name_and_type.type->StructSupertype()) {
- h_contents << field.name_and_type.name << ".Flatten()";
- } else {
- h_contents << "std::make_tuple(" << field.name_and_type.name << ")";
- }
+ CollectAllFields("", field, all_fields);
}
+ h_contents << "> Flatten() const {\n"
+ " return std::make_tuple(";
+ PrintCommaSeparatedList(h_contents, all_fields);
h_contents << ");\n";
h_contents << " }\n";
h_contents << "};\n";
@@ -5200,6 +5249,7 @@ void ReportAllUnusedMacros() {
"FromConstexpr<"};
const std::string name = macro->ReadableName();
const bool ignore =
+ StartsWithSingleUnderscore(name) ||
std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
[&name](const std::string& prefix) {
return StringStartsWith(name, prefix);
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 8b32de07889..f5bc53b8ca7 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -234,7 +234,7 @@ template <class T>
class BindingsManager {
public:
base::Optional<Binding<T>*> TryLookup(const std::string& name) {
- if (name.length() >= 2 && name[0] == '_' && name[1] != '_') {
+ if (StartsWithSingleUnderscore(name)) {
Error("Trying to reference '", name, "' which is marked as unused.")
.Throw();
}
@@ -361,6 +361,8 @@ class LocalValue {
: value(std::move(reference)) {}
explicit LocalValue(std::string inaccessible_explanation)
: inaccessible_explanation(std::move(inaccessible_explanation)) {}
+ explicit LocalValue(std::function<LocationReference()> lazy)
+ : lazy(std::move(lazy)) {}
LocationReference GetLocationReference(Binding<LocalValue>* binding) {
if (value) {
@@ -370,16 +372,19 @@ class LocalValue {
return LocationReference::VariableAccess(ref.GetVisitResult(), binding);
}
return ref;
+ } else if (lazy) {
+ return (*lazy)();
} else {
Error("Cannot access ", binding->name(), ": ", inaccessible_explanation)
.Throw();
}
}
- bool IsAccessible() const { return value.has_value(); }
+ bool IsAccessibleNonLazy() const { return value.has_value(); }
private:
base::Optional<LocationReference> value;
+ base::Optional<std::function<LocationReference()>> lazy;
std::string inaccessible_explanation;
};
@@ -400,7 +405,7 @@ template <>
inline bool Binding<LocalValue>::CheckWritten() const {
// Do the check only for non-const variables and non struct types.
auto binding = *manager_->current_bindings_[name_];
- if (!binding->IsAccessible()) return false;
+ if (!binding->IsAccessibleNonLazy()) return false;
const LocationReference& ref = binding->GetLocationReference(binding);
if (!ref.IsVariableAccess()) return false;
return !ref.GetVisitResult().type()->StructSupertype();
@@ -469,9 +474,9 @@ class ImplementationVisitor {
InitializerResults VisitInitializerResults(
const ClassType* class_type,
const std::vector<NameAndExpression>& expressions);
- LocationReference GenerateFieldReference(VisitResult object,
- const Field& field,
- const ClassType* class_type);
+ LocationReference GenerateFieldReference(
+ VisitResult object, const Field& field, const ClassType* class_type,
+ bool treat_optional_as_indexed = false);
LocationReference GenerateFieldReferenceForInit(
VisitResult object, const Field& field,
const LayoutForInitialization& layout);
@@ -502,6 +507,8 @@ class ImplementationVisitor {
bool ignore_stuct_field_constness = false,
base::Optional<SourcePosition> pos = {});
LocationReference GetLocationReference(ElementAccessExpression* expr);
+ LocationReference GenerateReferenceToItemInHeapSlice(LocationReference slice,
+ VisitResult index);
VisitResult GenerateFetchFromLocation(const LocationReference& reference);
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index ea7676ea440..52f0f819765 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -129,6 +129,11 @@ DefinitionLocation NamespaceConstantInstruction::GetValueDefinition(
return DefinitionLocation::Instruction(this, index);
}
+std::ostream& operator<<(std::ostream& os,
+ const NamespaceConstantInstruction& instruction) {
+ return os << "NamespaceConstant " << instruction.constant->external_name();
+}
+
void InstructionBase::InvalidateTransientTypes(
Stack<const Type*>* stack) const {
auto current = stack->begin();
@@ -183,6 +188,22 @@ DefinitionLocation CallIntrinsicInstruction::GetValueDefinition(
return DefinitionLocation::Instruction(this, index);
}
+std::ostream& operator<<(std::ostream& os,
+ const CallIntrinsicInstruction& instruction) {
+ os << "CallIntrinsic " << instruction.intrinsic->ReadableName();
+ if (!instruction.specialization_types.empty()) {
+ os << "<";
+ PrintCommaSeparatedList(
+ os, instruction.specialization_types,
+ [](const Type* type) -> const Type& { return *type; });
+ os << ">";
+ }
+ os << "(";
+ PrintCommaSeparatedList(os, instruction.constexpr_arguments);
+ os << ")";
+ return os;
+}
+
void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> parameter_types =
@@ -243,6 +264,18 @@ DefinitionLocation CallCsaMacroInstruction::GetValueDefinition(
return DefinitionLocation::Instruction(this, index);
}
+std::ostream& operator<<(std::ostream& os,
+ const CallCsaMacroInstruction& instruction) {
+ os << "CallCsaMacro " << instruction.macro->ReadableName();
+ os << "(";
+ PrintCommaSeparatedList(os, instruction.constexpr_arguments);
+ os << ")";
+ if (instruction.catch_block) {
+ os << ", catch block " << (*instruction.catch_block)->id();
+ }
+ return os;
+}
+
void CallCsaMacroAndBranchInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
std::vector<const Type*> parameter_types =
@@ -363,6 +396,26 @@ CallCsaMacroAndBranchInstruction::GetExceptionObjectDefinition() const {
return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
}
+std::ostream& operator<<(std::ostream& os,
+ const CallCsaMacroAndBranchInstruction& instruction) {
+ os << "CallCsaMacroAndBranch " << instruction.macro->ReadableName();
+ os << "(";
+ PrintCommaSeparatedList(os, instruction.constexpr_arguments);
+ os << ")";
+ if (instruction.return_continuation) {
+ os << ", return continuation " << (*instruction.return_continuation)->id();
+ }
+ if (!instruction.label_blocks.empty()) {
+ os << ", label blocks ";
+ PrintCommaSeparatedList(os, instruction.label_blocks,
+ [](Block* block) { return block->id(); });
+ }
+ if (instruction.catch_block) {
+ os << ", catch block " << (*instruction.catch_block)->id();
+ }
+ return os;
+}
+
void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
@@ -447,6 +500,19 @@ DefinitionLocation CallBuiltinPointerInstruction::GetValueDefinition(
return DefinitionLocation::Instruction(this, index);
}
+std::ostream& operator<<(std::ostream& os,
+ const CallBuiltinInstruction& instruction) {
+ os << "CallBuiltin " << instruction.builtin->ReadableName()
+ << ", argc: " << instruction.argc;
+ if (instruction.is_tailcall) {
+ os << ", is_tailcall";
+ }
+ if (instruction.catch_block) {
+ os << ", catch block " << (*instruction.catch_block)->id();
+ }
+ return os;
+}
+
void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
@@ -507,6 +573,19 @@ CallRuntimeInstruction::GetExceptionObjectDefinition() const {
return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
}
+std::ostream& operator<<(std::ostream& os,
+ const CallRuntimeInstruction& instruction) {
+ os << "CallRuntime " << instruction.runtime_function->ReadableName()
+ << ", argc: " << instruction.argc;
+ if (instruction.is_tailcall) {
+ os << ", is_tailcall";
+ }
+ if (instruction.catch_block) {
+ os << ", catch block " << (*instruction.catch_block)->id();
+ }
+ return os;
+}
+
void BranchInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
const Type* condition_type = stack->Pop();
@@ -524,6 +603,12 @@ void BranchInstruction::RecomputeDefinitionLocations(
if_false->MergeInputDefinitions(*locations, worklist);
}
+std::ostream& operator<<(std::ostream& os,
+ const BranchInstruction& instruction) {
+ return os << "Branch true: " << instruction.if_true->id()
+ << ", false: " << instruction.if_false->id();
+}
+
void ConstexprBranchInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
if_true->SetInputTypes(*stack);
@@ -536,6 +621,13 @@ void ConstexprBranchInstruction::RecomputeDefinitionLocations(
if_false->MergeInputDefinitions(*locations, worklist);
}
+std::ostream& operator<<(std::ostream& os,
+ const ConstexprBranchInstruction& instruction) {
+ return os << "ConstexprBranch " << instruction.condition
+ << ", true: " << instruction.if_true->id()
+ << ", false: " << instruction.if_false->id();
+}
+
void GotoInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
destination->SetInputTypes(*stack);
@@ -546,6 +638,10 @@ void GotoInstruction::RecomputeDefinitionLocations(
destination->MergeInputDefinitions(*locations, worklist);
}
+std::ostream& operator<<(std::ostream& os, const GotoInstruction& instruction) {
+ return os << "Goto " << instruction.destination->id();
+}
+
void GotoExternalInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
if (variable_names.size() != stack->Size()) {
@@ -693,6 +789,16 @@ DefinitionLocation MakeLazyNodeInstruction::GetValueDefinition() const {
return DefinitionLocation::Instruction(this, 0);
}
+std::ostream& operator<<(std::ostream& os,
+ const MakeLazyNodeInstruction& instruction) {
+ os << "MakeLazyNode " << instruction.macro->ReadableName() << ", "
+ << *instruction.result_type;
+ for (const std::string& arg : instruction.constexpr_arguments) {
+ os << ", " << arg;
+ }
+ return os;
+}
+
bool CallRuntimeInstruction::IsBlockTerminator() const {
return is_tailcall || runtime_function->signature().return_type ==
TypeOracle::GetNeverType();
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 85fd7f897cd..88736a4ace9 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -288,6 +288,15 @@ struct PeekInstruction : InstructionBase {
base::Optional<const Type*> widened_type;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const PeekInstruction& instruction) {
+ os << "Peek " << instruction.slot;
+ if (instruction.widened_type) {
+ os << ", " << **instruction.widened_type;
+ }
+ return os;
+}
+
struct PokeInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
@@ -298,6 +307,15 @@ struct PokeInstruction : InstructionBase {
base::Optional<const Type*> widened_type;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const PokeInstruction& instruction) {
+ os << "Poke " << instruction.slot;
+ if (instruction.widened_type) {
+ os << ", " << **instruction.widened_type;
+ }
+ return os;
+}
+
// Preserve the top {preserved_slots} number of slots, and delete
// {deleted_slots} number or slots below.
struct DeleteRangeInstruction : InstructionBase {
@@ -307,6 +325,11 @@ struct DeleteRangeInstruction : InstructionBase {
StackRange range;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const DeleteRangeInstruction& instruction) {
+ return os << "DeleteRange " << instruction.range;
+}
+
struct PushUninitializedInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit PushUninitializedInstruction(const Type* type) : type(type) {}
@@ -316,6 +339,11 @@ struct PushUninitializedInstruction : InstructionBase {
const Type* type;
};
+inline std::ostream& operator<<(
+ std::ostream& os, const PushUninitializedInstruction& instruction) {
+ return os << "PushUninitialized " << *instruction.type;
+}
+
struct PushBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
PushBuiltinPointerInstruction(std::string external_name, const Type* type)
@@ -329,6 +357,13 @@ struct PushBuiltinPointerInstruction : InstructionBase {
const Type* type;
};
+inline std::ostream& operator<<(
+ std::ostream& os, const PushBuiltinPointerInstruction& instruction) {
+ return os << "PushBuiltinPointer "
+ << StringLiteralQuote(instruction.external_name) << ", "
+ << *instruction.type;
+}
+
struct NamespaceConstantInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit NamespaceConstantInstruction(NamespaceConstant* constant)
@@ -340,6 +375,9 @@ struct NamespaceConstantInstruction : InstructionBase {
NamespaceConstant* constant;
};
+std::ostream& operator<<(std::ostream& os,
+ const NamespaceConstantInstruction& instruction);
+
struct LoadReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit LoadReferenceInstruction(const Type* type) : type(type) {}
@@ -349,12 +387,22 @@ struct LoadReferenceInstruction : InstructionBase {
const Type* type;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const LoadReferenceInstruction& instruction) {
+ return os << "LoadReference " << *instruction.type;
+}
+
struct StoreReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit StoreReferenceInstruction(const Type* type) : type(type) {}
const Type* type;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const StoreReferenceInstruction& instruction) {
+ return os << "StoreReference " << *instruction.type;
+}
+
// Pops a bitfield struct; pushes a bitfield value extracted from it.
struct LoadBitFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
@@ -368,6 +416,12 @@ struct LoadBitFieldInstruction : InstructionBase {
BitField bit_field;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const LoadBitFieldInstruction& instruction) {
+ return os << "LoadBitField " << *instruction.bit_field_struct_type << ", "
+ << instruction.bit_field.name_and_type.name;
+}
+
// Pops a bitfield value and a bitfield struct; pushes a new bitfield struct
// containing the updated value.
struct StoreBitFieldInstruction : InstructionBase {
@@ -386,6 +440,16 @@ struct StoreBitFieldInstruction : InstructionBase {
bool starts_as_zero;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const StoreBitFieldInstruction& instruction) {
+ os << "StoreBitField " << *instruction.bit_field_struct_type << ", "
+ << instruction.bit_field.name_and_type.name;
+ if (instruction.starts_as_zero) {
+ os << ", starts_as_zero";
+ }
+ return os;
+}
+
struct CallIntrinsicInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CallIntrinsicInstruction(Intrinsic* intrinsic,
@@ -403,6 +467,9 @@ struct CallIntrinsicInstruction : InstructionBase {
std::vector<std::string> constexpr_arguments;
};
+std::ostream& operator<<(std::ostream& os,
+ const CallIntrinsicInstruction& instruction);
+
struct CallCsaMacroInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CallCsaMacroInstruction(Macro* macro,
@@ -424,6 +491,9 @@ struct CallCsaMacroInstruction : InstructionBase {
base::Optional<Block*> catch_block;
};
+std::ostream& operator<<(std::ostream& os,
+ const CallCsaMacroInstruction& instruction);
+
struct CallCsaMacroAndBranchInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CallCsaMacroAndBranchInstruction(Macro* macro,
@@ -458,6 +528,9 @@ struct CallCsaMacroAndBranchInstruction : InstructionBase {
base::Optional<Block*> catch_block;
};
+std::ostream& operator<<(std::ostream& os,
+ const CallCsaMacroAndBranchInstruction& instruction);
+
struct MakeLazyNodeInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
MakeLazyNodeInstruction(Macro* macro, const Type* result_type,
@@ -473,6 +546,9 @@ struct MakeLazyNodeInstruction : InstructionBase {
std::vector<std::string> constexpr_arguments;
};
+std::ostream& operator<<(std::ostream& os,
+ const MakeLazyNodeInstruction& instruction);
+
struct CallBuiltinInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
@@ -496,6 +572,9 @@ struct CallBuiltinInstruction : InstructionBase {
base::Optional<Block*> catch_block;
};
+std::ostream& operator<<(std::ostream& os,
+ const CallBuiltinInstruction& instruction);
+
struct CallBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
@@ -511,6 +590,16 @@ struct CallBuiltinPointerInstruction : InstructionBase {
size_t argc;
};
+inline std::ostream& operator<<(
+ std::ostream& os, const CallBuiltinPointerInstruction& instruction) {
+ os << "CallBuiltinPointer " << *instruction.type
+ << ", argc: " << instruction.argc;
+ if (instruction.is_tailcall) {
+ os << ", is_tailcall";
+ }
+ return os;
+}
+
struct CallRuntimeInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override;
@@ -535,6 +624,9 @@ struct CallRuntimeInstruction : InstructionBase {
base::Optional<Block*> catch_block;
};
+std::ostream& operator<<(std::ostream& os,
+ const CallRuntimeInstruction& instruction);
+
struct BranchInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return true; }
@@ -550,6 +642,9 @@ struct BranchInstruction : InstructionBase {
Block* if_false;
};
+std::ostream& operator<<(std::ostream& os,
+ const BranchInstruction& instruction);
+
struct ConstexprBranchInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return true; }
@@ -567,6 +662,9 @@ struct ConstexprBranchInstruction : InstructionBase {
Block* if_false;
};
+std::ostream& operator<<(std::ostream& os,
+ const ConstexprBranchInstruction& instruction);
+
struct GotoInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return true; }
@@ -579,6 +677,8 @@ struct GotoInstruction : InstructionBase {
Block* destination;
};
+std::ostream& operator<<(std::ostream& os, const GotoInstruction& instruction);
+
struct GotoExternalInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return true; }
@@ -592,6 +692,15 @@ struct GotoExternalInstruction : InstructionBase {
std::vector<std::string> variable_names;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const GotoExternalInstruction& instruction) {
+ os << "GotoExternal " << instruction.destination;
+ for (const std::string& name : instruction.variable_names) {
+ os << ", " << name;
+ }
+ return os;
+}
+
struct ReturnInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit ReturnInstruction(size_t count) : count(count) {}
@@ -600,6 +709,11 @@ struct ReturnInstruction : InstructionBase {
size_t count; // How many values to return.
};
+inline std::ostream& operator<<(std::ostream& os,
+ const ReturnInstruction& instruction) {
+ return os << "Return count: " << instruction.count;
+}
+
struct PrintConstantStringInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit PrintConstantStringInstruction(std::string message)
@@ -608,17 +722,39 @@ struct PrintConstantStringInstruction : InstructionBase {
std::string message;
};
+inline std::ostream& operator<<(
+ std::ostream& os, const PrintConstantStringInstruction& instruction) {
+ return os << "PrintConstantString "
+ << StringLiteralQuote(instruction.message);
+}
+
struct AbortInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
enum class Kind { kDebugBreak, kUnreachable, kAssertionFailure };
bool IsBlockTerminator() const override { return kind != Kind::kDebugBreak; }
explicit AbortInstruction(Kind kind, std::string message = "")
: kind(kind), message(std::move(message)) {}
+ static const char* KindToString(Kind kind) {
+ switch (kind) {
+ case Kind::kDebugBreak:
+ return "kDebugBreak";
+ case Kind::kUnreachable:
+ return "kUnreachable";
+ case Kind::kAssertionFailure:
+ return "kAssertionFailure";
+ }
+ }
Kind kind;
std::string message;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const AbortInstruction& instruction) {
+ return os << "Abort " << AbortInstruction::KindToString(instruction.kind)
+ << ", " << StringLiteralQuote(instruction.message);
+}
+
struct UnsafeCastInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit UnsafeCastInstruction(const Type* destination_type)
@@ -629,6 +765,11 @@ struct UnsafeCastInstruction : InstructionBase {
const Type* destination_type;
};
+inline std::ostream& operator<<(std::ostream& os,
+ const UnsafeCastInstruction& instruction) {
+ return os << "UnsafeCast " << *instruction.destination_type;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/torque-code-generator.cc b/deps/v8/src/torque/torque-code-generator.cc
index 46763be4682..9108d9a7318 100644
--- a/deps/v8/src/torque/torque-code-generator.cc
+++ b/deps/v8/src/torque/torque-code-generator.cc
@@ -4,6 +4,8 @@
#include "src/torque/torque-code-generator.h"
+#include "src/torque/global-context.h"
+
namespace v8 {
namespace internal {
namespace torque {
@@ -31,8 +33,11 @@ void TorqueCodeGenerator::EmitInstruction(const Instruction& instruction,
#endif
switch (instruction.kind()) {
-#define ENUM_ITEM(T) \
- case InstructionKind::k##T: \
+#define ENUM_ITEM(T) \
+ case InstructionKind::k##T: \
+ if (GlobalContext::annotate_ir()) { \
+ EmitIRAnnotation(instruction.Cast<T>(), stack); \
+ } \
return EmitInstruction(instruction.Cast<T>(), stack);
TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
#undef ENUM_ITEM
diff --git a/deps/v8/src/torque/torque-code-generator.h b/deps/v8/src/torque/torque-code-generator.h
index ddbd5309c9e..ed9e70f377c 100644
--- a/deps/v8/src/torque/torque-code-generator.h
+++ b/deps/v8/src/torque/torque-code-generator.h
@@ -74,6 +74,12 @@ class TorqueCodeGenerator {
void EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack);
+ template <typename T>
+ void EmitIRAnnotation(const T& instruction, Stack<std::string>* stack) {
+ out() << " // " << instruction
+ << ", starting stack size: " << stack->Size() << "\n";
+ }
+
#define EMIT_INSTRUCTION_DECLARATION(T) \
void EmitInstruction(const T& instruction, Stack<std::string>* stack);
TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 6da18dd526e..64bad91cab8 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -53,6 +53,9 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
if (options.force_assert_statements) {
GlobalContext::SetForceAssertStatements();
}
+ if (options.annotate_ir) {
+ GlobalContext::SetAnnotateIR();
+ }
TargetArchitecture::Scope target_architecture(options.force_32bit_output);
TypeOracle::Scope type_oracle;
CurrentScope::Scope current_namespace(GlobalContext::GetDefaultNamespace());
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index df81d60d3ee..0e8f3b42ae0 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -30,6 +30,9 @@ struct TorqueCompilerOptions {
// architectures. Note that this does not needed in Chromium/V8 land, since we
// always build with the same bit width as the target architecture.
bool force_32bit_output = false;
+
+ // Adds extra comments in output that show Torque intermediate representation.
+ bool annotate_ir = false;
};
struct TorqueCompilerResult {
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 9a81d402ead..cab0182677a 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -1987,10 +1987,27 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
auto weak = child_results->NextAs<bool>();
auto const_qualified = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
+ auto optional = child_results->NextAs<bool>();
auto index = child_results->NextAs<base::Optional<Expression*>>();
+ if (optional && !index) {
+ Error(
+ "Fields using optional specifier must also provide an expression "
+ "indicating the condition for whether the field is present");
+ }
+ base::Optional<ClassFieldIndexInfo> index_info;
+ if (index) {
+ if (optional) {
+ // Internally, an optional field is just an indexed field where the count
+ // is zero or one.
+ index = MakeNode<ConditionalExpression>(
+ *index, MakeNode<NumberLiteralExpression>(1),
+ MakeNode<NumberLiteralExpression>(0));
+ }
+ index_info = ClassFieldIndexInfo{*index, optional};
+ }
auto type = child_results->NextAs<TypeExpression*>();
return ParseResult{ClassFieldExpression{{name, type},
- index,
+ index_info,
std::move(conditions),
weak,
const_qualified,
@@ -2268,7 +2285,8 @@ struct TorqueGrammar : Grammar {
// Result: ClassFieldExpression
Symbol classField = {
Rule({annotations, CheckIf(Token("weak")), CheckIf(Token("const")), &name,
- optionalArraySpecifier, Token(":"), &type, Token(";")},
+ CheckIf(Token("?")), optionalArraySpecifier, Token(":"), &type,
+ Token(";")},
MakeClassField)};
// Result: StructFieldExpression
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index ad7551f8aa4..4e71c430140 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -34,6 +34,8 @@ int WrappedMain(int argc, const char** argv) {
options.v8_root = std::string(argv[++i]);
} else if (argument == "-m32") {
options.force_32bit_output = true;
+ } else if (argument == "-annotate-ir") {
+ options.annotate_ir = true;
} else {
// Otherwise it's a .tq file. Remember it for compilation.
files.emplace_back(std::move(argument));
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index e0d67415012..e184bc0f72c 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -267,6 +267,14 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UINTPTR_TYPE_STRING);
}
+ static const Type* GetInt64Type() {
+ return Get().GetBuiltinType(INT64_TYPE_STRING);
+ }
+
+ static const Type* GetUint64Type() {
+ return Get().GetBuiltinType(UINT64_TYPE_STRING);
+ }
+
static const Type* GetInt32Type() {
return Get().GetBuiltinType(INT32_TYPE_STRING);
}
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 2673faf32d3..3b94d6a512f 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -420,7 +420,7 @@ void TypeVisitor::VisitClassFieldsAndMethods(
ReportError("in-object properties cannot be weak");
}
}
- base::Optional<Expression*> array_length = field_expression.index;
+ base::Optional<ClassFieldIndexInfo> array_length = field_expression.index;
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
@@ -440,7 +440,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
field.ValidateAlignment(class_offset +
field_size * ResidueClass::Unknown());
- if (auto literal = NumberLiteralExpression::DynamicCast(*field.index)) {
+ if (auto literal =
+ NumberLiteralExpression::DynamicCast(field.index->expr)) {
size_t value = static_cast<size_t>(literal->number);
if (value != literal->number) {
Error("non-integral array length").Position(field.pos);
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index ff2531ebd15..5ea7fe73caf 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -743,12 +743,16 @@ void ClassType::GenerateAccessors() {
continue;
}
+ // An explicit index is only used for indexed fields not marked as optional.
+ // Optional fields implicitly load or store item zero.
+ bool use_index = field.index && !field.index->optional;
+
// Load accessor
std::string load_macro_name = "Load" + this->name() + camel_field_name;
Signature load_signature;
load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
load_signature.parameter_types.types.push_back(this);
- if (field.index) {
+ if (use_index) {
load_signature.parameter_names.push_back(MakeNode<Identifier>("i"));
load_signature.parameter_types.types.push_back(
TypeOracle::GetIntPtrType());
@@ -758,7 +762,7 @@ void ClassType::GenerateAccessors() {
Expression* load_expression =
MakeFieldAccessExpression(parameter, field.name_and_type.name);
- if (field.index) {
+ if (use_index) {
load_expression =
MakeNode<ElementAccessExpression>(load_expression, index);
}
@@ -773,7 +777,7 @@ void ClassType::GenerateAccessors() {
Signature store_signature;
store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
store_signature.parameter_types.types.push_back(this);
- if (field.index) {
+ if (use_index) {
store_signature.parameter_names.push_back(MakeNode<Identifier>("i"));
store_signature.parameter_types.types.push_back(
TypeOracle::GetIntPtrType());
@@ -785,7 +789,7 @@ void ClassType::GenerateAccessors() {
store_signature.return_type = TypeOracle::GetVoidType();
Expression* store_expression =
MakeFieldAccessExpression(parameter, field.name_and_type.name);
- if (field.index) {
+ if (use_index) {
store_expression =
MakeNode<ElementAccessExpression>(store_expression, index);
}
@@ -806,23 +810,23 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
//
// If the field has a known offset (in this example, 16):
// FieldSliceClassNameFieldName(o: ClassName) {
- // return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
- // object: o,
- // offset: 16,
- // length: torque_internal::%IndexedFieldLength<ClassName>(
- // o, "field_name")
+ // return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
+ // /*object:*/ o,
+ // /*offset:*/ 16,
+ // /*length:*/ torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")
// );
// }
//
// If the field has an unknown offset, and the previous field is named p, and
// an item in the previous field has size 4:
// FieldSliceClassNameFieldName(o: ClassName) {
- // const previous = &o.p;
- // return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
- // object: o,
- // offset: previous.offset + 4 * previous.length,
- // length: torque_internal::%IndexedFieldLength<ClassName>(
- // o, "field_name")
+ // const previous = %FieldSlice<ClassName>(o, "p");
+ // return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
+ // /*object:*/ o,
+ // /*offset:*/ previous.offset + 4 * previous.length,
+ // /*length:*/ torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")
// );
// }
const Field& field = fields_[field_index];
@@ -849,14 +853,14 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
const Field* previous = GetFieldPreceding(field_index);
DCHECK_NOT_NULL(previous);
- // o.p
- Expression* previous_expression =
- MakeFieldAccessExpression(parameter, previous->name_and_type.name);
-
- // &o.p
- previous_expression = MakeCallExpression("&", {previous_expression});
+ // %FieldSlice<ClassName>(o, "p")
+ Expression* previous_expression = MakeCallExpression(
+ MakeIdentifierExpression({"torque_internal"}, "%FieldSlice",
+ {MakeNode<PrecomputedTypeExpression>(this)}),
+ {parameter, MakeNode<StringLiteralExpression>(
+ StringLiteralQuote(previous->name_and_type.name))});
- // const previous = &o.p;
+ // const previous = %FieldSlice<ClassName>(o, "p");
Statement* define_previous =
MakeConstDeclarationStatement("previous", previous_expression);
statements.push_back(define_previous);
@@ -896,10 +900,10 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
StringLiteralQuote(field.name_and_type.name))});
// torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
- // object: o,
- // offset: <<offset_expression>>,
- // length: torque_internal::%IndexedFieldLength<ClassName>(
- // o, "field_name")
+ // /*object:*/ o,
+ // /*offset:*/ <<offset_expression>>,
+ // /*length:*/ torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")
// )
IdentifierExpression* new_struct = MakeIdentifierExpression(
{"torque_internal", "unsafe"},
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index ac6c5e3263f..e231fb9431d 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -215,7 +215,7 @@ struct Field {
SourcePosition pos;
const AggregateType* aggregate;
- base::Optional<Expression*> index;
+ base::Optional<ClassFieldIndexInfo> index;
NameAndType name_and_type;
// The byte offset of this field from the beginning of the containing class or
@@ -785,6 +785,20 @@ inline std::ostream& operator<<(std::ostream& os, const Type& t) {
return os;
}
+template <bool success = false>
+std::ostream& operator<<(std::ostream& os, const Type* t) {
+ static_assert(success,
+ "Using Type* with an ostream is usually a mistake. Did you "
+ "mean to use Type& instead? If you actually intended to print "
+ "a pointer, use static_cast<const void*>.");
+ return os;
+}
+
+// Don't emit an error if a Type* is printed due to CHECK macros.
+inline std::ostream& operator<<(base::CheckMessageStream& os, const Type* t) {
+ return os << static_cast<const void*>(t);
+}
+
class VisitResult {
public:
VisitResult() = default;
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 6118341e1e0..ab88f6aef66 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -192,19 +192,28 @@ bool IsKeywordLikeName(const std::string& s) {
// Untagged/MachineTypes like 'int32', 'intptr' etc. follow a 'all-lowercase'
// naming convention and are those exempt from the normal type convention.
bool IsMachineType(const std::string& s) {
- static const char* const machine_types[]{
- VOID_TYPE_STRING, NEVER_TYPE_STRING,
- INT8_TYPE_STRING, UINT8_TYPE_STRING,
- INT16_TYPE_STRING, UINT16_TYPE_STRING,
- INT31_TYPE_STRING, UINT31_TYPE_STRING,
- INT32_TYPE_STRING, UINT32_TYPE_STRING,
- INT64_TYPE_STRING, INTPTR_TYPE_STRING,
- UINTPTR_TYPE_STRING, FLOAT32_TYPE_STRING,
- FLOAT64_TYPE_STRING, FLOAT64_OR_HOLE_TYPE_STRING,
- BOOL_TYPE_STRING, "string",
- BINT_TYPE_STRING, CHAR8_TYPE_STRING,
- CHAR16_TYPE_STRING};
-
+ static const char* const machine_types[]{VOID_TYPE_STRING,
+ NEVER_TYPE_STRING,
+ INT8_TYPE_STRING,
+ UINT8_TYPE_STRING,
+ INT16_TYPE_STRING,
+ UINT16_TYPE_STRING,
+ INT31_TYPE_STRING,
+ UINT31_TYPE_STRING,
+ INT32_TYPE_STRING,
+ UINT32_TYPE_STRING,
+ INT64_TYPE_STRING,
+ UINT64_TYPE_STRING,
+ INTPTR_TYPE_STRING,
+ UINTPTR_TYPE_STRING,
+ FLOAT32_TYPE_STRING,
+ FLOAT64_TYPE_STRING,
+ FLOAT64_OR_HOLE_TYPE_STRING,
+ BOOL_TYPE_STRING,
+ "string",
+ BINT_TYPE_STRING,
+ CHAR8_TYPE_STRING,
+ CHAR16_TYPE_STRING};
return std::find(std::begin(machine_types), std::end(machine_types), s) !=
std::end(machine_types);
}
@@ -313,6 +322,10 @@ std::string UnderlinifyPath(std::string path) {
return path;
}
+bool StartsWithSingleUnderscore(const std::string& str) {
+ return str.length() >= 2 && str[0] == '_' && str[1] != '_';
+}
+
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents) {
std::ifstream old_contents_stream(file_path.c_str());
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 0198ca2b5fd..327e1946c52 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -105,6 +105,8 @@ std::string SnakeifyString(const std::string& camel_string);
std::string DashifyString(const std::string& underscore_string);
std::string UnderlinifyPath(std::string path);
+bool StartsWithSingleUnderscore(const std::string& str);
+
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents);
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 6afd4d0fee4..7c8128c2f2e 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1,2 +1 @@
-alph@chromium.org
-petermarshall@chromium.org
+cbruni@chromium.org
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
index 11b786ffbb3..e2f03c201dd 100644
--- a/deps/v8/src/tracing/trace-categories.h
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -28,11 +28,9 @@ PERFETTO_DEFINE_TEST_CATEGORY_PREFIXES("v8-cat", "cat", "v8.Test2");
// clang-format off
PERFETTO_DEFINE_CATEGORIES(
perfetto::Category("cppgc"),
- perfetto::Category("V8.HandleInterrupts"),
perfetto::Category("v8"),
perfetto::Category("v8.console"),
perfetto::Category("v8.execute"),
- perfetto::Category("v8.runtime"),
perfetto::Category("v8.wasm"),
perfetto::Category::Group("devtools.timeline,v8"),
perfetto::Category::Group("devtools.timeline,"
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index 7035a46ab6c..40e2deff77d 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,5 +1,5 @@
-titzer@chromium.org
ahaas@chromium.org
+thibaudm@chromium.org
# Changes to this directory should also be reviewed by:
#
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index e22fbd547e1..b0d7910aa99 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -213,6 +213,13 @@ Wide AddLong(Narrow a, Narrow b) {
return static_cast<Wide>(a) + static_cast<Wide>(b);
}
+template <typename T>
+inline T RoundingAverageUnsigned(T a, T b) {
+ static_assert(std::is_unsigned<T>::value, "Only for unsiged types");
+ static_assert(sizeof(T) < sizeof(uint64_t), "Must be smaller than uint64_t");
+ return (static_cast<uint64_t>(a) + static_cast<uint64_t>(b) + 1) >> 1;
+}
+
// Helper macros for defining a contiguous sequence of field offset constants.
// Example: (backslashes at the ends of respective lines of this multi-line
// macro definition are omitted here to please the compiler)
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 38224181e92..d0de7de9357 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,10 +1,7 @@
ahaas@chromium.org
bbudge@chromium.org
-binji@chromium.org
clemensb@chromium.org
gdeepti@chromium.org
jkummerow@chromium.org
thibaudm@chromium.org
zhin@chromium.org
-
-per-file wasm-js.*=adamk@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b8c4911722c..62917ab0a34 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -544,7 +544,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return kind == kS128 || is_reference_type(kind);
+ return kind == kS128 || is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -716,6 +716,14 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
offset_imm, LoadType::kI32Load, pinned);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, no_reg, offset_imm);
+ ldr(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -759,13 +767,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- // If offset_imm cannot be converted to int32 safely, we abort as a separate
- // check should cause this code to never be executed.
- // TODO(7881): Support when >2GB is required.
- if (!is_uint31(offset_imm)) {
- TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
- return;
- }
+ // Offsets >=2GB are statically OOB on 32-bit systems.
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
liftoff::LoadInternal(this, dst, src_addr, offset_reg,
static_cast<int32_t>(offset_imm), type, pinned,
protected_load_pc, is_load_mem);
@@ -775,13 +778,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- // If offset_imm cannot be converted to int32 safely, we abort as a separate
- // check should cause this code to never be executed.
- // TODO(7881): Support when >2GB is required.
- if (!is_uint31(offset_imm)) {
- TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
- return;
- }
+ // Offsets >=2GB are statically OOB on 32-bit systems.
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
UseScratchRegisterScope temps(this);
if (type.value() == StoreType::kF64Store) {
Register actual_dst_addr = liftoff::CalculateActualAddress(
@@ -1373,7 +1371,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(kind == kI32 || is_reference_type(kind));
+ DCHECK(kind == kI32 || is_reference(kind));
TurboAssembler::Move(dst, src);
}
@@ -2193,9 +2191,8 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0));
} else {
- DCHECK(kind == kI32 ||
- (is_reference_type(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ DCHECK(kind == kI32 || (is_reference(kind) && (liftoff_cond == kEqual ||
+ liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
b(label, cond);
@@ -2600,23 +2597,20 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
- LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
- vcvt_f64_s32(dst.low_fp(), src_d.low());
- vcvt_f64_s32(dst.high_fp(), src_d.high());
+ F64x2ConvertLowI32x4S(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
- vcvt_f64_u32(dst.low_fp(), src_d.low());
- vcvt_f64_u32(dst.high_fp(), src_d.high());
+ F64x2ConvertLowI32x4U(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
- vcvt_f64_f32(dst.low_fp(), src_d.low());
- vcvt_f64_f32(dst.high_fp(), src_d.high());
+ F64x2PromoteLowF32x4(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -2838,9 +2832,9 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- V64x2AllTrue(dst.gp(), liftoff::GetSimd128Register(src));
+ I64x2AllTrue(dst.gp(), liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3016,7 +3010,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
@@ -3151,12 +3145,14 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+ vpaddl(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+ vpaddl(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
@@ -3198,7 +3194,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
@@ -3369,12 +3365,14 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+ vpaddl(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+ vpaddl(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3505,7 +3503,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
liftoff::EmitAnyTrue(this, dst, src);
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
@@ -3611,12 +3609,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- vmul(Neon8, liftoff::GetSimd128Register(dst),
- liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3771,7 +3763,8 @@ void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ I64x2Ne(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4025,7 +4018,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2.abs");
+ I64x2Abs(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4173,7 +4166,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
switch (out_argument_kind) {
case kI32:
ldr(result_reg->gp(), MemOperand(sp));
@@ -4234,8 +4227,15 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size));
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
@@ -4247,6 +4247,7 @@ void LiftoffStackSlots::Construct() {
case kF32:
case kRef:
case kOptRef: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
@@ -4254,12 +4255,14 @@ void LiftoffStackSlots::Construct() {
asm_->Push(scratch);
} break;
case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch);
} break;
case kS128: {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_);
UseScratchRegisterScope temps(asm_);
Register addr = liftoff::CalculateActualAddress(
@@ -4274,7 +4277,9 @@ void LiftoffStackSlots::Construct() {
}
break;
}
- case LiftoffAssembler::VarState::kRegister:
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
switch (src.kind()) {
case kI64: {
LiftoffRegister reg =
@@ -4299,7 +4304,9 @@ void LiftoffStackSlots::Construct() {
UNREACHABLE();
}
break;
+ }
case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a2fe80891c1..39ef8528e52 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -109,11 +109,15 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
case kI32:
return temps->AcquireW();
case kI64:
+ case kRef:
+ case kOptRef:
return temps->AcquireX();
case kF32:
return temps->AcquireS();
case kF64:
return temps->AcquireD();
+ case kS128:
+ return temps->AcquireQ();
default:
UNREACHABLE();
}
@@ -369,7 +373,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return kind == kS128 || is_reference_type(kind);
+ return kind == kS128 || is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -440,6 +444,14 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
LoadTaggedPointerField(dst, src_op);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, no_reg, offset_imm);
+ Ldr(dst.X(), src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -869,7 +881,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
if (kind == kI32) {
Mov(dst.W(), src.W());
} else {
- DCHECK(kI64 == kind || is_reference_type(kind));
+ DCHECK(kI64 == kind || is_reference(kind));
Mov(dst.X(), src.X());
}
}
@@ -1845,14 +1857,14 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
- Sxtl(dst.fp(), src.fp().V2S());
- Scvtf(dst.fp(), dst.fp());
+ Sxtl(dst.fp().V2D(), src.fp().V2S());
+ Scvtf(dst.fp().V2D(), dst.fp().V2D());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- Uxtl(dst.fp(), src.fp().V2S());
- Ucvtf(dst.fp(), dst.fp());
+ Uxtl(dst.fp().V2D(), src.fp().V2S());
+ Ucvtf(dst.fp().V2D(), dst.fp().V2D());
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -2010,9 +2022,9 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
Neg(dst.fp().V2D(), src.fp().V2D());
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- V64x2AllTrue(dst.gp(), src.fp());
+ I64x2AllTrue(dst.gp(), src.fp());
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2161,7 +2173,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
Neg(dst.fp().V4S(), src.fp().V4S());
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat4S);
}
@@ -2273,12 +2285,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+ Saddlp(dst.fp().V4S(), src.fp().V8H());
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+ Uaddlp(dst.fp().V4S(), src.fp().V8H());
}
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
@@ -2337,7 +2349,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
Neg(dst.fp().V8H(), src.fp().V8H());
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat8H);
}
@@ -2543,7 +2555,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
liftoff::EmitAnyTrue(this, dst, src);
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat16B);
}
@@ -2633,11 +2645,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Mul(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2989,7 +2996,7 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- Fcvtzs(dst.fp().V2D(), src.fp().V2D());
+ Fcvtzu(dst.fp().V2D(), src.fp().V2D());
Uqxtn(dst.fp().V2S(), dst.fp().V2D());
}
@@ -3023,12 +3030,12 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+ Saddlp(dst.fp().V8H(), src.fp().V16B());
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+ Uaddlp(dst.fp().V8H(), src.fp().V16B());
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3068,7 +3075,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2.abs");
+ Abs(dst.fp().V2D(), src.fp().V2D());
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -3160,7 +3167,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_kind), 0);
}
@@ -3214,16 +3221,12 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1);
}
-void LiftoffStackSlots::Construct() {
- size_t num_slots = 0;
- for (auto& slot : slots_) {
- num_slots += slot.src_.kind() == kS128 ? 2 : 1;
- }
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.
- asm_->Claim(RoundUp(num_slots, 2));
- size_t poke_offset = num_slots * kXRegSize;
+ asm_->Claim(RoundUp(param_slots, 2));
for (auto& slot : slots_) {
- poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
+ int poke_offset = slot.dst_slot_ * kSystemPointerSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index e0c27b25e81..83b00d4a2ad 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -98,6 +98,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
Operand dst(base, offset);
switch (kind) {
case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->mov(dst, src.gp());
break;
case kI64:
@@ -118,27 +122,30 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind,
+ int padding = 0) {
switch (kind) {
case kI32:
case kRef:
case kOptRef:
+ assm->AllocateStackSpace(padding);
assm->push(reg.gp());
break;
case kI64:
+ assm->AllocateStackSpace(padding);
assm->push(reg.high_gp());
assm->push(reg.low_gp());
break;
case kF32:
- assm->AllocateStackSpace(sizeof(float));
+ assm->AllocateStackSpace(sizeof(float) + padding);
assm->movss(Operand(esp, 0), reg.fp());
break;
case kF64:
- assm->AllocateStackSpace(sizeof(double));
+ assm->AllocateStackSpace(sizeof(double) + padding);
assm->movsd(Operand(esp, 0), reg.fp());
break;
case kS128:
- assm->AllocateStackSpace(sizeof(double) * 2);
+ assm->AllocateStackSpace(sizeof(double) * 2 + padding);
assm->movdqu(Operand(esp, 0), reg.fp());
break;
default:
@@ -262,12 +269,11 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
- return is_reference_type(kind) ? kSystemPointerSize
- : element_size_bytes(kind);
+ return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return is_reference_type(kind);
+ return is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -340,6 +346,11 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load, pinned);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ mov(dst, Operand(src_addr, offset_imm));
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -378,13 +389,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- if (offset_imm > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {
- // We do not generate code here, because such an offset should never pass
- // the bounds check. However, the spec requires us to compile code with such
- // an offset.
- Trap();
- return;
- }
+ // Offsets >=2GB are statically OOB on 32-bit systems.
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());
Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm)
@@ -460,6 +466,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
DCHECK_EQ(type.value_type() == kWasmI64, src.is_gp_pair());
+ // Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
@@ -1120,7 +1127,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(kI32 == kind || is_reference_type(kind));
+ DCHECK(kI32 == kind || is_reference(kind));
mov(dst, src);
}
@@ -2882,13 +2889,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- XMMRegister mask = liftoff::kScratchDoubleReg;
- // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- TurboAssembler::Move(mask, uint32_t{0x70707070});
- Pshufd(mask, mask, uint8_t{0x0});
- Paddusb(mask, rhs.fp());
- Pshufb(dst.fp(), lhs.fp(), mask);
+ Register scratch = GetUnusedRegister(RegClass::kGpReg, {}).gp();
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg,
+ scratch);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -2910,21 +2913,21 @@ void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), 0);
- Pshufd(dst.fp(), dst.fp(), 0);
+ Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
+ Pshufd(dst.fp(), dst.fp(), uint8_t{0});
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
- Pshufd(dst.fp(), dst.fp(), 0);
+ Pshufd(dst.fp(), dst.fp(), uint8_t{0});
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
Pinsrd(dst.fp(), src.low_gp(), 0);
Pinsrd(dst.fp(), src.high_gp(), 1);
- Pshufd(dst.fp(), dst.fp(), 0x44);
+ Pshufd(dst.fp(), dst.fp(), uint8_t{0x44});
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -3145,7 +3148,7 @@ void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
} else if (CpuFeatures::IsSupported(SSE4_2)) {
// 2. SSE4_2, dst == lhs.
if (dst != lhs) {
- movdqa(dst.fp(), lhs.fp());
+ movaps(dst.fp(), lhs.fp());
}
I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
} else {
@@ -3174,7 +3177,7 @@ void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegList::ForRegs(lhs));
// macro-assembler uses kScratchDoubleReg, so don't use it.
I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
- movdqa(dst.fp(), tmp.fp());
+ movaps(dst.fp(), tmp.fp());
} else {
I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
@@ -3290,11 +3293,11 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- // Ensure that we don't overwrite any inputs with the movdqu below.
+ // Ensure that we don't overwrite any inputs with the movaps below.
DCHECK_NE(dst, src1);
DCHECK_NE(dst, src2);
if (!CpuFeatures::IsSupported(AVX) && dst != mask) {
- movdqu(dst.fp(), mask.fp());
+ movaps(dst.fp(), mask.fp());
S128Select(dst.fp(), dst.fp(), src1.fp(), src2.fp(),
liftoff::kScratchDoubleReg);
} else {
@@ -3319,7 +3322,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
liftoff::EmitAnyTrue(this, dst, src);
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
}
@@ -3350,7 +3353,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
} else {
if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- pand(dst.fp(), liftoff::kScratchDoubleReg);
+ andps(dst.fp(), liftoff::kScratchDoubleReg);
}
sub(tmp.gp(), Immediate(8));
Movd(tmp_simd.fp(), tmp.gp());
@@ -3412,7 +3415,7 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
mov(tmp, mask);
Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 0);
+ Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
Pand(dst.fp(), liftoff::kScratchDoubleReg);
}
@@ -3457,68 +3460,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
rhs);
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kS128);
- LiftoffRegister tmp =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- vpsrlw(tmp.fp(), lhs.fp(), 8);
- vpsrlw(liftoff::kScratchDoubleReg, rhs.fp(), 8);
- // t = I16x8Mul(t0, t1)
- //    => __PP __PP ...  __PP  __PP
- vpmullw(tmp.fp(), tmp.fp(), liftoff::kScratchDoubleReg);
- // s = left * 256
- vpsllw(liftoff::kScratchDoubleReg, lhs.fp(), 8);
- // dst = I16x8Mul(left * 256, right)
- //    => pp__ pp__ ...  pp__  pp__
- vpmullw(dst.fp(), liftoff::kScratchDoubleReg, rhs.fp());
- // dst = I16x8Shr(dst, 8)
- //    => 00pp 00pp ...  00pp  00pp
- vpsrlw(dst.fp(), dst.fp(), 8);
- // t = I16x8Shl(t, 8)
- //    => PP00 PP00 ...  PP00  PP00
- vpsllw(tmp.fp(), tmp.fp(), 8);
- // dst = I16x8Or(dst, t)
- //    => PPpp PPpp ...  PPpp  PPpp
- vpor(dst.fp(), dst.fp(), tmp.fp());
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- movaps(tmp.fp(), dst.fp());
- movaps(liftoff::kScratchDoubleReg, rhs.fp());
- psrlw(tmp.fp(), 8);
- psrlw(liftoff::kScratchDoubleReg, 8);
- // dst = left * 256
- psllw(dst.fp(), 8);
- // t = I16x8Mul(t, s)
- //    => __PP __PP ...  __PP  __PP
- pmullw(tmp.fp(), liftoff::kScratchDoubleReg);
- // dst = I16x8Mul(left * 256, right)
- //    => pp__ pp__ ...  pp__  pp__
- pmullw(dst.fp(), rhs.fp());
- // t = I16x8Shl(t, 8)
- //    => PP00 PP00 ...  PP00  PP00
- psllw(tmp.fp(), 8);
- // dst = I16x8Shr(dst, 8)
- //    => 00pp 00pp ...  00pp  00pp
- psrlw(dst.fp(), 8);
- // dst = I16x8Or(dst, t)
- //    => PPpp PPpp ...  PPpp  PPpp
- por(dst.fp(), tmp.fp());
- }
-}
-
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3558,7 +3499,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
}
@@ -3699,29 +3640,27 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
- /*low=*/true, /*is_signed=*/true);
+ I16x8ExtMulLow(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*is_signed=*/true);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
- /*low=*/true, /*is_signed=*/false);
+ I16x8ExtMulLow(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*is_signed=*/false);
}
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
- /*low=*/false, /*is_signed=*/true);
+ I16x8ExtMulHighS(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
- /*low=*/false, /*is_signed=*/false);
+ I16x8ExtMulHighU(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
@@ -3741,7 +3680,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
}
@@ -3922,7 +3861,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
}
@@ -4193,7 +4132,7 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
Orps(liftoff::kScratchDoubleReg, dst.fp());
Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), liftoff::kScratchDoubleReg);
+ Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4224,7 +4163,7 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), liftoff::kScratchDoubleReg);
+ Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4424,7 +4363,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
movaps(liftoff::kScratchDoubleReg, src.fp());
cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- pand(dst.fp(), liftoff::kScratchDoubleReg);
+ andps(dst.fp(), liftoff::kScratchDoubleReg);
}
// Set top bit if >= 0 (but not -0.0!).
Pxor(liftoff::kScratchDoubleReg, dst.fp());
@@ -4913,7 +4852,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
liftoff::Load(this, *next_result_reg, esp, 0, out_argument_kind);
}
@@ -4967,36 +4906,49 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes.
if (src.kind() == kS128) {
- asm_->AllocateStackSpace(sizeof(double) * 2);
+ asm_->AllocateStackSpace(stack_decrement);
asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break;
}
if (src.kind() == kF64) {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
+ stack_decrement = kSystemPointerSize;
}
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
- kI32);
+ kI32, stack_decrement - kSystemPointerSize);
} else {
- liftoff::push(asm_, src.reg(), src.kind());
+ int pushed_bytes = SlotSizeInBytes(slot);
+ liftoff::push(asm_, src.reg(), src.kind(),
+ stack_decrement - pushed_bytes);
}
break;
case LiftoffAssembler::VarState::kIntConst:
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// The high word is the sign extension of the low word.
asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 11b2e4993c8..9ed45932b3e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -48,9 +48,9 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
#elif V8_TARGET_ARCH_ARM
-// r7: cp, r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
+// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r0, r1, r2, r3, r4, r5, r6, r8, r9);
+ Register::ListOf(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9);
// d13: zero, d14-d15: scratch
constexpr RegList kLiftoffAssemblerFpCacheRegs = LowDwVfpRegister::ListOf(
@@ -58,11 +58,11 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = LowDwVfpRegister::ListOf(
#elif V8_TARGET_ARCH_ARM64
-// x16: ip0, x17: ip1, x18: platform register, x26: root, x27: cp, x29: fp,
+// x16: ip0, x17: ip1, x18: platform register, x26: root, x28: base, x29: fp,
// x30: lr, x31: xzr.
constexpr RegList kLiftoffAssemblerGpCacheRegs =
CPURegister::ListOf(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12,
- x13, x14, x15, x19, x20, x21, x22, x23, x24, x25, x28);
+ x13, x14, x15, x19, x20, x21, x22, x23, x24, x25, x27);
// d15: fp_zero, d30-d31: macro-assembler scratch V Registers.
constexpr RegList kLiftoffAssemblerFpCacheRegs = CPURegister::ListOf(
@@ -72,7 +72,7 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = CPURegister::ListOf(
#elif V8_TARGET_ARCH_S390X
constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r2, r3, r4, r5, r6, r7, r8);
+ Register::ListOf(r2, r3, r4, r5, r6, r7, r8, cp);
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 3a8d7ba01eb..a544460ab98 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -25,7 +25,9 @@ namespace wasm {
using VarState = LiftoffAssembler::VarState;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
-constexpr ValueKind LiftoffAssembler::kIntPtr;
+constexpr ValueKind LiftoffAssembler::kPointerKind;
+constexpr ValueKind LiftoffAssembler::kTaggedKind;
+constexpr ValueKind LiftoffAssembler::kSmiKind;
namespace {
@@ -91,21 +93,6 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
-#if DEBUG
- bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
- if (is_object_reference_type(dst)) {
- // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
- // edges), we only care that pointer types stay amongst pointer types.
- // It's fine if ref/optref overwrite each other.
- DCHECK(is_object_reference_type(src));
- } else {
- // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
- DCHECK_EQ(dst, src);
- }
- return true; // Dummy so this can be called via DCHECK.
- }
-#endif
-
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
if (dst.is_reg()) {
@@ -533,7 +520,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
ZoneVector<int>* slots, LiftoffRegList* spills,
SpillLocation spill_location) {
for (const auto& slot : stack_state) {
- if (!is_reference_type(slot.kind())) continue;
+ if (!is_reference(slot.kind())) continue;
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
// Registers get spilled just before the call to the runtime. In {spills}
@@ -550,10 +537,22 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (const auto& slot : stack_state) {
- DCHECK(!slot.is_reg());
+ if (is_reference(slot.kind())) {
+ DCHECK(slot.is_stack());
+ safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
+ }
+ }
+}
- if (is_reference_type(slot.kind())) {
+void LiftoffAssembler::CacheState::DefineSafepointWithCalleeSavedRegisters(
+ Safepoint& safepoint) {
+ for (const auto& slot : stack_state) {
+ if (!is_reference(slot.kind())) continue;
+ if (slot.is_stack()) {
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
+ } else {
+ DCHECK(slot.is_reg());
+ safepoint.DefineRegister(slot.reg().gp().code());
}
}
}
@@ -582,8 +581,8 @@ LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
}
LiftoffAssembler::~LiftoffAssembler() {
- if (num_locals_ > kInlineLocalTypes) {
- base::Free(more_local_types_);
+ if (num_locals_ > kInlineLocalKinds) {
+ base::Free(more_local_kinds_);
}
}
@@ -642,6 +641,15 @@ void LiftoffAssembler::DropValues(int count) {
}
}
+void LiftoffAssembler::DropValue(int depth) {
+ auto* dropped = cache_state_.stack_state.begin() + depth;
+ if (dropped->is_reg()) {
+ cache_state_.dec_used(dropped->reg());
+ }
+ std::copy(dropped + 1, cache_state_.stack_state.end(), dropped);
+ cache_state_.stack_state.pop_back();
+}
+
void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
@@ -700,15 +708,15 @@ void LiftoffAssembler::MergeFullStackWith(CacheState& target,
transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
}
+ // Full stack merging is only done for forward jumps, so we can just clear the
+ // instance cache register at the target in case of mismatch.
if (source.cached_instance != target.cached_instance) {
- // Backward jumps (to loop headers) do not have a cached instance anyway, so
- // ignore this. On forward jumps, jump reset the cached instance in the
- // target state.
target.ClearCachedInstanceRegister();
}
}
-void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
+void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
+ JumpDirection jump_direction) {
// Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
@@ -730,11 +738,21 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
cache_state_.stack_state[stack_base + i]);
}
- if (cache_state_.cached_instance != target.cached_instance) {
- // Backward jumps (to loop headers) do not have a cached instance anyway, so
- // ignore this. On forward jumps, jump reset the cached instance in the
- // target state.
- target.ClearCachedInstanceRegister();
+ if (cache_state_.cached_instance != target.cached_instance &&
+ target.cached_instance != no_reg) {
+ if (jump_direction == kForwardJump) {
+ // On forward jumps, just reset the cached instance in the target state.
+ target.ClearCachedInstanceRegister();
+ } else {
+ // On backward jumps, we already generated code assuming that the instance
+ // is available in that register. Thus move it there.
+ if (cache_state_.cached_instance == no_reg) {
+ LoadInstanceFromFrame(target.cached_instance);
+ } else {
+ Move(target.cached_instance, cache_state_.cached_instance,
+ kPointerKind);
+ }
+ }
}
}
@@ -785,7 +803,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
- Move(replacement, reg, LiftoffAssembler::kIntPtr);
+ Move(replacement, reg, kPointerKind);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
@@ -799,8 +817,9 @@ void PrepareStackTransfers(const ValueKindSig* sig,
LiftoffStackSlots* stack_slots,
StackTransferRecipe* stack_transfers,
LiftoffRegList* param_regs) {
- // Process parameters backwards, such that pushes of caller frame slots are
- // in the correct order.
+ // Process parameters backwards, to reduce the amount of Slot sorting for
+ // the most common case - a normal Wasm Call. Slots will be mostly unsorted
+ // in the Builtin call case.
uint32_t call_desc_input_idx =
static_cast<uint32_t>(call_descriptor->InputCount());
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
@@ -834,7 +853,8 @@ void PrepareStackTransfers(const ValueKindSig* sig,
}
} else {
DCHECK(loc.IsCallerFrameSlot());
- stack_slots->Add(slot, stack_offset, half);
+ int param_offset = -loc.GetLocation() - 1;
+ stack_slots->Add(slot, stack_offset, half, param_offset);
}
}
}
@@ -851,10 +871,10 @@ void LiftoffAssembler::PrepareBuiltinCall(
PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots,
&stack_transfers, &param_regs);
SpillAllRegisters();
- // Create all the slots.
- // Builtin stack parameters are pushed in reversed order.
- stack_slots.Reverse();
- stack_slots.Construct();
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
+ if (param_slots > 0) {
+ stack_slots.Construct(param_slots);
+ }
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
@@ -894,9 +914,11 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
- LiftoffRegister(*target_instance), kIntPtr);
+ LiftoffRegister(*target_instance),
+ kPointerKind);
}
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
if (num_params) {
uint32_t param_base = cache_state_.stack_height() - num_params;
PrepareStackTransfers(sig, call_descriptor,
@@ -912,17 +934,19 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
- kIntPtr);
+ kPointerKind);
*target = new_target.gp();
} else {
- stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
- LiftoffRegister(*target), 0));
+ stack_slots.Add(VarState(kPointerKind, LiftoffRegister(*target), 0),
+ param_slots);
+ param_slots++;
*target = no_reg;
}
}
- // Create all the slots.
- stack_slots.Construct();
+ if (param_slots > 0) {
+ stack_slots.Construct(param_slots);
+ }
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
// Pop parameters from the value stack.
@@ -977,7 +1001,7 @@ void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
reg_pair[1].gp()));
}
}
- int return_slots = static_cast<int>(call_descriptor->StackReturnCount());
+ int return_slots = static_cast<int>(call_descriptor->ReturnSlotCount());
RecordUsedSpillOffset(TopSpillOffset() + return_slots * kSystemPointerSize);
}
@@ -1203,10 +1227,10 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
- if (num_locals > kInlineLocalTypes) {
- more_local_types_ = reinterpret_cast<ValueKind*>(
+ if (num_locals > kInlineLocalKinds) {
+ more_local_kinds_ = reinterpret_cast<ValueKind*>(
base::Malloc(num_locals * sizeof(ValueKind)));
- DCHECK_NOT_NULL(more_local_types_);
+ DCHECK_NOT_NULL(more_local_kinds_);
}
}
@@ -1223,6 +1247,21 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
UNREACHABLE();
}
+#if DEBUG
+bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b) {
+ if (is_object_reference(a)) {
+ // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
+ // edges), we only care that pointer types stay amongst pointer types.
+ // It's fine if ref/optref overwrite each other.
+ DCHECK(is_object_reference(b));
+ } else {
+ // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
+ DCHECK_EQ(a, b);
+ }
+ return true; // Dummy so this can be called via DCHECK.
+}
+#endif
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 13c0d45c1e0..3090bc81659 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -73,7 +73,11 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8;
- static constexpr ValueKind kIntPtr = kSystemPointerSize == 8 ? kI64 : kI32;
+ static constexpr ValueKind kPointerKind =
+ kSystemPointerSize == kInt32Size ? kI32 : kI64;
+ static constexpr ValueKind kTaggedKind =
+ kTaggedSize == kInt32Size ? kI32 : kI64;
+ static constexpr ValueKind kSmiKind = kTaggedKind;
using ValueKindSig = Signature<ValueKind>;
@@ -189,6 +193,8 @@ class LiftoffAssembler : public TurboAssembler {
void DefineSafepoint(Safepoint& safepoint);
+ void DefineSafepointWithCalleeSavedRegisters(Safepoint& safepoint);
+
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
@@ -271,11 +277,17 @@ class LiftoffAssembler : public TurboAssembler {
Register TrySetCachedInstanceRegister(LiftoffRegList pinned) {
DCHECK_EQ(no_reg, cached_instance);
- LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
- if (!has_unused_register(candidates)) return no_reg;
- SetInstanceCacheRegister(unused_register(candidates).gp());
- DCHECK_NE(no_reg, cached_instance);
- return cached_instance;
+ LiftoffRegList available_regs =
+ kGpCacheRegList.MaskOut(pinned).MaskOut(used_registers);
+ if (available_regs.is_empty()) return no_reg;
+ // Prefer the {kWasmInstanceRegister}, because that's where the instance
+ // initially is, and where it needs to be for calls.
+ Register new_cache_reg = available_regs.has(kWasmInstanceRegister)
+ ? kWasmInstanceRegister
+ : available_regs.GetFirstRegSet().gp();
+ SetInstanceCacheRegister(new_cache_reg);
+ DCHECK_EQ(new_cache_reg, cached_instance);
+ return new_cache_reg;
}
void ClearCachedInstanceRegister() {
@@ -410,6 +422,10 @@ class LiftoffAssembler : public TurboAssembler {
void DropValues(int count);
+ // Careful: this indexes "from the other end", i.e. depth=0 is the value
+ // at the bottom of the stack!
+ void DropValue(int depth);
+
// Ensure that the loop inputs are either in a register or spilled to the
// stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num);
@@ -434,6 +450,16 @@ class LiftoffAssembler : public TurboAssembler {
cache_state_.stack_state.emplace_back(kind, reg, NextSpillOffset(kind));
}
+ // Assumes that the exception is in {kReturnRegister0}. This is where the
+ // exception is stored by the unwinder after a throwing call.
+ void PushException() {
+ LiftoffRegister reg{kReturnRegister0};
+ // This is used after a call, so {kReturnRegister0} is not used yet.
+ DCHECK(cache_state_.is_free(reg));
+ cache_state_.inc_used(reg);
+ cache_state_.stack_state.emplace_back(kRef, reg, NextSpillOffset(kRef));
+ }
+
void PushConstant(ValueKind kind, int32_t i32_const) {
DCHECK(kind == kI32 || kind == kI64);
cache_state_.stack_state.emplace_back(kind, i32_const,
@@ -497,8 +523,9 @@ class LiftoffAssembler : public TurboAssembler {
void MaterializeMergedConstants(uint32_t arity);
+ enum JumpDirection { kForwardJump, kBackwardJump };
void MergeFullStackWith(CacheState& target, const CacheState& source);
- void MergeStackWith(CacheState& target, uint32_t arity);
+ void MergeStackWith(CacheState& target, uint32_t arity, JumpDirection);
void Spill(VarState* slot);
void SpillLocals();
@@ -610,6 +637,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
+ inline void LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm);
enum SkipWriteBarrier : bool {
kSkipWriteBarrier = true,
kNoSkipWriteBarrier = false
@@ -621,10 +650,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
LiftoffRegList pinned) {
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
- LoadTaggedSignedAsInt32(dst, array, offset, pinned);
+ LoadSmiAsInt32(dst, array, offset, pinned);
}
- inline void LoadTaggedSignedAsInt32(LiftoffRegister dst, Register src_addr,
- int32_t offset, LiftoffRegList pinned) {
+ inline void LoadSmiAsInt32(LiftoffRegister dst, Register src_addr,
+ int32_t offset, LiftoffRegList pinned) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
@@ -1026,7 +1055,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src2, LiftoffRegister mask);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1052,8 +1081,6 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs);
inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1063,7 +1090,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1119,7 +1146,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1166,7 +1193,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v64x2_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1368,7 +1395,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this
- // region is passed to the C function. If {out_argument_kind != kStmt},
+ // region is passed to the C function. If {out_argument_kind != kVoid},
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
@@ -1400,16 +1427,16 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSize() const { return max_used_spill_offset_; }
- ValueKind local_type(uint32_t index) {
+ ValueKind local_kind(uint32_t index) {
DCHECK_GT(num_locals_, index);
ValueKind* locals =
- num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
+ num_locals_ <= kInlineLocalKinds ? local_kinds_ : more_local_kinds_;
return locals[index];
}
- void set_local_type(uint32_t index, ValueKind kind) {
+ void set_local_kind(uint32_t index, ValueKind kind) {
ValueKind* locals =
- num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
+ num_locals_ <= kInlineLocalKinds ? local_kinds_ : more_local_kinds_;
locals[index] = kind;
}
@@ -1432,10 +1459,10 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
- static constexpr uint32_t kInlineLocalTypes = 16;
+ static constexpr uint32_t kInlineLocalKinds = 16;
union {
- ValueKind local_types_[kInlineLocalTypes];
- ValueKind* more_local_types_;
+ ValueKind local_kinds_[kInlineLocalKinds];
+ ValueKind* more_local_kinds_;
};
static_assert(sizeof(ValueKind) == 1,
"Reconsider this inlining if ValueKind gets bigger");
@@ -1565,32 +1592,58 @@ class LiftoffStackSlots {
LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete;
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
- RegPairHalf half) {
- slots_.emplace_back(src, src_offset, half);
+ RegPairHalf half, int dst_slot) {
+ DCHECK_LE(0, dst_slot);
+ slots_.emplace_back(src, src_offset, half, dst_slot);
}
- void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
- void Reverse() { std::reverse(slots_.begin(), slots_.end()); }
+ void Add(const LiftoffAssembler::VarState& src, int dst_slot) {
+ DCHECK_LE(0, dst_slot);
+ slots_.emplace_back(src, dst_slot);
+ }
+
+ void SortInPushOrder() {
+ std::sort(slots_.begin(), slots_.end(), [](const Slot& a, const Slot& b) {
+ return a.dst_slot_ > b.dst_slot_;
+ });
+ }
- inline void Construct();
+ inline void Construct(int param_slots);
private:
+ // A logical slot, which may occupy multiple stack slots.
struct Slot {
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
- RegPairHalf half)
- : src_(src), src_offset_(src_offset), half_(half) {}
- explicit Slot(const LiftoffAssembler::VarState& src)
- : src_(src), half_(kLowWord) {}
+ RegPairHalf half, int dst_slot)
+ : src_(src),
+ src_offset_(src_offset),
+ half_(half),
+ dst_slot_(dst_slot) {}
+ Slot(const LiftoffAssembler::VarState& src, int dst_slot)
+ : src_(src), half_(kLowWord), dst_slot_(dst_slot) {}
LiftoffAssembler::VarState src_;
uint32_t src_offset_ = 0;
RegPairHalf half_;
+ int dst_slot_ = 0;
};
+ // Returns the size in bytes of the given logical slot.
+ static int SlotSizeInBytes(const Slot& slot) {
+ const ValueKind kind = slot.src_.kind();
+ if (kind == kS128) return kSimd128Size;
+ if (kind == kF64) return kDoubleSize;
+ return kSystemPointerSize;
+ }
+
base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
};
+#if DEBUG
+bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
+#endif
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 01264e4e38e..a26df172252 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -4,6 +4,7 @@
#include "src/wasm/baseline/liftoff-compiler.h"
+#include "src/base/enum-set.h"
#include "src/base/optional.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
@@ -86,13 +87,12 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
-constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
+constexpr ValueKind kPointerKind = LiftoffAssembler::kPointerKind;
+constexpr ValueKind kSmiKind = LiftoffAssembler::kSmiKind;
+constexpr ValueKind kTaggedKind = LiftoffAssembler::kTaggedKind;
-#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
-constexpr ValueKind kSmiValueType = kI32;
-#else
-constexpr ValueKind kSmiValueType = kI64;
-#endif
+// Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
+using MakeSig = FixedSizeSignature<ValueKind>;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
@@ -213,21 +213,17 @@ class DebugSideTableBuilder {
};
// Adds a new entry in regular code.
- void NewEntry(int pc_offset, Vector<LiftoffAssembler::VarState> stack_state,
- AssumeSpilling assume_spilling) {
- entries_.emplace_back(
- pc_offset, static_cast<int>(stack_state.size()),
- GetChangedStackValues(last_values_, stack_state, assume_spilling));
+ void NewEntry(int pc_offset, Vector<DebugSideTable::Entry::Value> values) {
+ entries_.emplace_back(pc_offset, static_cast<int>(values.size()),
+ GetChangedStackValues(last_values_, values));
}
// Adds a new entry for OOL code, and returns a pointer to a builder for
// modifying that entry.
- EntryBuilder* NewOOLEntry(Vector<LiftoffAssembler::VarState> stack_state,
- AssumeSpilling assume_spilling) {
+ EntryBuilder* NewOOLEntry(Vector<DebugSideTable::Entry::Value> values) {
constexpr int kNoPcOffsetYet = -1;
- ool_entries_.emplace_back(
- kNoPcOffsetYet, static_cast<int>(stack_state.size()),
- GetChangedStackValues(last_ool_values_, stack_state, assume_spilling));
+ ool_entries_.emplace_back(kNoPcOffsetYet, static_cast<int>(values.size()),
+ GetChangedStackValues(last_ool_values_, values));
return &ool_entries_.back();
}
@@ -260,40 +256,16 @@ class DebugSideTableBuilder {
private:
static std::vector<Value> GetChangedStackValues(
std::vector<Value>& last_values,
- Vector<LiftoffAssembler::VarState> stack_state,
- AssumeSpilling assume_spilling) {
+ Vector<DebugSideTable::Entry::Value> values) {
std::vector<Value> changed_values;
int old_stack_size = static_cast<int>(last_values.size());
- last_values.resize(stack_state.size());
+ last_values.resize(values.size());
int index = 0;
- for (const auto& slot : stack_state) {
- Value new_value;
- new_value.index = index;
- new_value.kind = slot.kind();
- switch (slot.loc()) {
- case kIntConst:
- new_value.storage = Entry::kConstant;
- new_value.i32_const = slot.i32_const();
- break;
- case kRegister:
- DCHECK_NE(kDidSpill, assume_spilling);
- if (assume_spilling == kAllowRegisters) {
- new_value.storage = Entry::kRegister;
- new_value.reg_code = slot.reg().liftoff_code();
- break;
- }
- DCHECK_EQ(kAssumeSpilling, assume_spilling);
- V8_FALLTHROUGH;
- case kStack:
- new_value.storage = Entry::kStack;
- new_value.stack_offset = slot.offset();
- break;
- }
-
- if (index >= old_stack_size || last_values[index] != new_value) {
- changed_values.push_back(new_value);
- last_values[index] = new_value;
+ for (const auto& value : values) {
+ if (index >= old_stack_size || last_values[index] != value) {
+ changed_values.push_back(value);
+ last_values[index] = value;
}
++index;
}
@@ -361,13 +333,6 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
// listed here explicitly, with a bug assigned to each of them.
- // TODO(6020): Fully implement SIMD in Liftoff.
- STATIC_ASSERT(kStagedFeatures.has_simd());
- if (reason == kSimd) {
- DCHECK(env->enabled_features.has_simd());
- return;
- }
-
// TODO(7581): Fully implement reftypes in Liftoff.
STATIC_ASSERT(kStagedFeatures.has_reftypes());
if (reason == kRefTypes) {
@@ -375,12 +340,6 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
return;
}
- // TODO(v8:8091): Implement exception handling in Liftoff.
- if (reason == kExceptionHandling) {
- DCHECK(env->enabled_features.has_eh());
- return;
- }
-
// Otherwise, bailout is not allowed.
FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
}
@@ -397,10 +356,22 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
+ struct TryInfo {
+ TryInfo() = default;
+ LiftoffAssembler::CacheState catch_state;
+ Label catch_label;
+ bool catch_reached = false;
+ bool in_handler = false;
+ int32_t previous_catch = -1;
+ };
+
struct Control : public ControlBase<Value, validate> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
+ std::unique_ptr<TryInfo> try_info;
+ // Number of exceptions on the stack below this control.
+ int num_exceptions = 0;
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
@@ -438,6 +409,7 @@ class LiftoffCompiler {
WasmCode::RuntimeStubId stub;
WasmCodePosition position;
LiftoffRegList regs_to_save;
+ Register cached_instance;
OutOfLineSafepointInfo* safepoint_info;
uint32_t pc; // for trap handler.
// These two pointers will only be used for debug code:
@@ -457,6 +429,7 @@ class LiftoffCompiler {
s, // stub
pos, // position
{}, // regs_to_save
+ no_reg, // cached_instance
safepoint_info, // safepoint_info
pc, // pc
spilled_registers, // spilled_registers
@@ -465,7 +438,7 @@ class LiftoffCompiler {
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs_to_save,
- SpilledRegistersForInspection* spilled_regs,
+ Register cached_instance, SpilledRegistersForInspection* spilled_regs,
OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {
@@ -474,6 +447,7 @@ class LiftoffCompiler {
WasmCode::kWasmStackGuard, // stub
pos, // position
regs_to_save, // regs_to_save
+ cached_instance, // cached_instance
safepoint_info, // safepoint_info
0, // pc
spilled_regs, // spilled_registers
@@ -502,7 +476,8 @@ class LiftoffCompiler {
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
next_breakpoint_end_(breakpoints.end()),
- dead_breakpoint_(dead_breakpoint) {
+ dead_breakpoint_(dead_breakpoint),
+ handlers_(compilation_zone) {
if (breakpoints.empty()) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
}
@@ -513,7 +488,7 @@ class LiftoffCompiler {
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
- Assembler::kNoHandlerTable);
+ handler_table_offset_);
}
OwnedVector<uint8_t> GetSourcePositionTable() {
@@ -547,17 +522,27 @@ class LiftoffCompiler {
return true;
}
- bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
- const char* context) {
- LiftoffBailoutReason bailout_reason = kOtherReason;
+ V8_INLINE bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
+ const char* context) {
+ if (V8_LIKELY(supported_types_.contains(kind))) return true;
+ return MaybeBailoutForUnsupportedType(decoder, kind, context);
+ }
+
+ V8_NOINLINE bool MaybeBailoutForUnsupportedType(FullDecoder* decoder,
+ ValueKind kind,
+ const char* context) {
+ DCHECK(!supported_types_.contains(kind));
+
+ // Lazily update {supported_types_}; then check again.
+ if (CpuFeatures::SupportsWasmSimd128()) supported_types_.Add(kS128);
+ if (FLAG_experimental_liftoff_extern_ref) {
+ supported_types_.Add(kExternRefSupported);
+ }
+ if (supported_types_.contains(kind)) return true;
+
+ LiftoffBailoutReason bailout_reason;
switch (kind) {
- case kI32:
- case kI64:
- case kF32:
- case kF64:
- return true;
case kS128:
- if (CpuFeatures::SupportsWasmSimd128()) return true;
bailout_reason = kMissingCPUFeature;
break;
case kRef:
@@ -566,11 +551,9 @@ class LiftoffCompiler {
case kRttWithDepth:
case kI8:
case kI16:
- if (FLAG_experimental_liftoff_extern_ref) return true;
bailout_reason = kRefTypes;
break;
- case kBottom:
- case kStmt:
+ default:
UNREACHABLE();
}
EmbeddedVector<char, 128> buffer;
@@ -592,6 +575,7 @@ class LiftoffCompiler {
Control* c = decoder->control_at(i);
Unuse(c->label.get());
if (c->else_state) Unuse(c->else_state->label.get());
+ if (c->try_info != nullptr) Unuse(&c->try_info->catch_label);
}
for (auto& ool : out_of_line_code_) Unuse(ool.label.get());
#endif
@@ -606,10 +590,19 @@ class LiftoffCompiler {
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
ValueKind kind = decoder->local_type(i).kind();
- __ set_local_type(i, kind);
+ __ set_local_kind(i, kind);
}
}
+ // TODO(ahaas): Make this function constexpr once GCC allows it.
+ LiftoffRegList RegsUnusedByParams() {
+ LiftoffRegList regs = kGpCacheRegList;
+ for (auto reg : kGpParamRegisters) {
+ regs.clear(reg);
+ }
+ return regs;
+ }
+
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
const bool needs_pair = needs_gp_reg_pair(kind);
@@ -624,7 +617,12 @@ class LiftoffCompiler {
location.AsRegister());
}
DCHECK(location.IsCallerFrameSlot());
- LiftoffRegister reg = __ GetUnusedRegister(rc, pinned);
+ // For reference type parameters we have to use registers that were not
+ // used for parameters because some reference type stack parameters may
+ // get processed before some value type register parameters.
+ LiftoffRegister reg = is_reference(reg_kind)
+ ? __ GetUnusedRegister(RegsUnusedByParams())
+ : __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
};
@@ -642,12 +640,23 @@ class LiftoffCompiler {
return needs_pair ? 2 : 1;
}
- void StackCheck(WasmCodePosition position) {
+ void StackCheck(FullDecoder* decoder, WasmCodePosition position) {
DEBUG_CODE_COMMENT("stack check");
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return;
+
+ // Loading the limit address can change the stack state, hence do this
+ // before storing information about registers.
+ Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
+ {});
+
LiftoffRegList regs_to_save = __ cache_state()->used_registers;
+ // The cached instance will be reloaded separately.
+ if (__ cache_state()->cached_instance != no_reg) {
+ DCHECK(regs_to_save.has(__ cache_state()->cached_instance));
+ regs_to_save.clear(__ cache_state()->cached_instance);
+ }
SpilledRegistersForInspection* spilled_regs = nullptr;
- Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
OutOfLineSafepointInfo* safepoint_info =
compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
@@ -661,17 +670,11 @@ class LiftoffCompiler {
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
- position, regs_to_save, spilled_regs, safepoint_info,
- RegisterOOLDebugSideTableEntry()));
+ position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
+ safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
OutOfLineCode& ool = out_of_line_code_.back();
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
- {});
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
- // If the stack check triggers, we lose the cached instance register.
- // TODO(clemensb): Restore that register in the OOL code so it's always
- // available at the beginning of the actual function code.
- __ cache_state()->ClearCachedInstanceRegister();
}
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
@@ -686,7 +689,7 @@ class LiftoffCompiler {
// because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueKind kind = __ local_type(param_idx);
+ ValueKind kind = __ local_kind(param_idx);
if (kind != kI32 && kind != kI64) return true;
}
return false;
@@ -708,7 +711,7 @@ class LiftoffCompiler {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- if (!CheckSupportedType(decoder, __ local_type(i), "param")) return;
+ if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return;
}
// Parameter 0 is the instance parameter.
@@ -745,7 +748,7 @@ class LiftoffCompiler {
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
- input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
+ input_idx += ProcessParameter(__ local_kind(param_idx), input_idx);
}
int params_size = __ TopSpillOffset();
DCHECK_EQ(input_idx, descriptor_->InputCount());
@@ -755,7 +758,7 @@ class LiftoffCompiler {
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueKind kind = __ local_type(param_idx);
+ ValueKind kind = __ local_kind(param_idx);
__ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
@@ -763,7 +766,7 @@ class LiftoffCompiler {
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueKind kind = __ local_type(param_idx);
+ ValueKind kind = __ local_kind(param_idx);
__ PushConstant(kind, int32_t{0});
}
}
@@ -773,8 +776,8 @@ class LiftoffCompiler {
Register null_ref_reg = no_reg;
for (uint32_t local_index = num_params; local_index < __ num_locals();
++local_index) {
- ValueKind kind = __ local_type(local_index);
- if (is_reference_type(kind)) {
+ ValueKind kind = __ local_kind(local_index);
+ if (is_reference(kind)) {
if (null_ref_reg == no_reg) {
null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
LoadNullValue(null_ref_reg, {});
@@ -792,7 +795,7 @@ class LiftoffCompiler {
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
- StackCheck(0);
+ StackCheck(decoder, 0);
if (FLAG_wasm_dynamic_tiering) {
// TODO(arobin): Avoid spilling registers unconditionally.
@@ -863,7 +866,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
- static_cast<uint32_t>(descriptor_->StackParameterCount()));
+ static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
return;
}
@@ -914,6 +917,9 @@ class LiftoffCompiler {
__ Fill(entry.reg, entry.offset, entry.kind);
}
}
+ if (ool->cached_instance != no_reg) {
+ __ LoadInstanceFromFrame(ool->cached_instance);
+ }
__ emit_jump(ool->continuation.get());
} else {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
@@ -933,9 +939,18 @@ class LiftoffCompiler {
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_);
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
+ // Emit the handler table.
+ if (!handlers_.empty()) {
+ handler_table_offset_ = HandlerTable::EmitReturnTableStart(&asm_);
+ for (auto& handler : handlers_) {
+ HandlerTable::EmitReturnEntry(&asm_, handler.pc_offset,
+ handler.handler.get()->pos());
+ }
+ }
__ MaybeEmitOutOfLineConstantPool();
// The previous calls may have also generated a bailout.
DidAssemblerBailout(decoder);
+ DCHECK_EQ(num_exceptions_, 0);
}
void OnFirstError(FullDecoder* decoder) {
@@ -1028,12 +1043,18 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallRuntimeStub(WasmCode::kWasmDebugBreak);
- // TODO(ahaas): Define a proper safepoint here.
- safepoint_table_builder_.DefineSafepoint(&asm_);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAllowRegisters);
+ DefineSafepointWithCalleeSavedRegisters();
+ RegisterDebugSideTableEntry(decoder,
+ DebugSideTableBuilder::kAllowRegisters);
}
- void Block(FullDecoder* decoder, Control* block) {}
+ void PushControl(Control* block) {
+ // The Liftoff stack includes implicit exception refs stored for catch
+ // blocks, so that they can be rethrown.
+ block->num_exceptions = num_exceptions_;
+ }
+
+ void Block(FullDecoder* decoder, Control* block) { PushControl(block); }
void Loop(FullDecoder* decoder, Control* loop) {
// Before entering a loop, spill all locals to the stack, in order to free
@@ -1042,8 +1063,6 @@ class LiftoffCompiler {
// TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
- // Same for the cached instance register.
- __ cache_state()->ClearCachedInstanceRegister();
__ PrepareLoopArgs(loop->start_merge.arity);
@@ -1054,29 +1073,158 @@ class LiftoffCompiler {
loop->label_state.Split(*__ cache_state());
// Execute a stack check in the loop header.
- StackCheck(decoder->position());
+ StackCheck(decoder, decoder->position());
+
+ PushControl(loop);
}
void Try(FullDecoder* decoder, Control* block) {
- unsupported(decoder, kExceptionHandling, "try");
+ block->try_info = std::make_unique<TryInfo>();
+ block->try_info->previous_catch = current_catch_;
+ current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
+ PushControl(block);
+ }
+
+ // Load the property in {kReturnRegister0}.
+ LiftoffRegister GetExceptionProperty(LiftoffAssembler::VarState& exception,
+ RootIndex root_index) {
+ DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||
+ root_index == RootIndex::kwasm_exception_values_symbol);
+
+ LiftoffRegList pinned;
+ LiftoffRegister tag_symbol_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadExceptionSymbol(tag_symbol_reg.gp(), pinned, root_index);
+ LiftoffRegister context_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext, pinned);
+
+ LiftoffAssembler::VarState tag_symbol(kPointerKind, tag_symbol_reg, 0);
+ LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmGetOwnProperty,
+ MakeSig::Returns(kPointerKind)
+ .Params(kPointerKind, kPointerKind, kPointerKind),
+ {exception, tag_symbol, context}, kNoSourcePosition);
+
+ return LiftoffRegister(kReturnRegister0);
}
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
- unsupported(decoder, kExceptionHandling, "catch");
+ DCHECK(block->is_try_catch());
+ current_catch_ = block->try_info->previous_catch; // Pop try scope.
+ __ emit_jump(block->label.get());
+
+ // The catch block is unreachable if no possible throws in the try block
+ // exist. We only build a landing pad if some node in the try block can
+ // (possibly) throw. Otherwise the catch environments remain empty.
+ if (!block->try_info->catch_reached) {
+ block->reachability = kSpecOnlyReachable;
+ return;
+ }
+
+ // This is the last use of this label. Re-use the field for the label of the
+ // next catch block, and jump there if the tag does not match.
+ __ bind(&block->try_info->catch_label);
+ new (&block->try_info->catch_label) Label();
+
+ __ cache_state()->Split(block->try_info->catch_state);
+
+ DEBUG_CODE_COMMENT("load caught exception tag");
+ DCHECK_EQ(__ cache_state()->stack_state.back().kind(), kRef);
+ LiftoffRegister caught_tag =
+ GetExceptionProperty(__ cache_state()->stack_state.back(),
+ RootIndex::kwasm_exception_tag_symbol);
+ LiftoffRegList pinned;
+ pinned.set(caught_tag);
+
+ DEBUG_CODE_COMMENT("load expected exception tag");
+ Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, ExceptionsTable, pinned);
+ __ LoadTaggedPointer(
+ imm_tag, imm_tag, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
+
+ DEBUG_CODE_COMMENT("compare tags");
+ Label caught;
+ __ emit_cond_jump(kEqual, &caught, kI32, imm_tag, caught_tag.gp());
+ // The tags don't match, merge the current state into the catch state and
+ // jump to the next handler.
+ __ MergeFullStackWith(block->try_info->catch_state, *__ cache_state());
+ __ emit_jump(&block->try_info->catch_label);
+
+ __ bind(&caught);
+ if (!block->try_info->in_handler) {
+ block->try_info->in_handler = true;
+ num_exceptions_++;
+ }
+ GetExceptionValues(decoder, __ cache_state()->stack_state.back(),
+ imm.exception);
+ }
+
+ void Rethrow(FullDecoder* decoder,
+ const LiftoffAssembler::VarState& exception) {
+ DCHECK_EQ(exception.kind(), kRef);
+ CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kPointerKind),
+ {exception}, decoder->position());
}
void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
- unsupported(decoder, kExceptionHandling, "delegate");
+ DCHECK_EQ(block, decoder->control_at(0));
+ Control* target = decoder->control_at(depth);
+ DCHECK(block->is_incomplete_try());
+ __ bind(&block->try_info->catch_label);
+ if (block->try_info->catch_reached) {
+ __ cache_state()->Steal(block->try_info->catch_state);
+ if (depth == decoder->control_depth() - 1) {
+ // Delegate to the caller, do not emit a landing pad.
+ Rethrow(decoder, __ cache_state()->stack_state.back());
+ } else {
+ DCHECK(target->is_incomplete_try());
+ if (!target->try_info->catch_reached) {
+ target->try_info->catch_state.InitMerge(
+ *__ cache_state(), __ num_locals(), 1,
+ target->stack_depth + target->num_exceptions);
+ target->try_info->catch_reached = true;
+ }
+ __ MergeStackWith(target->try_info->catch_state, 1,
+ LiftoffAssembler::kForwardJump);
+ __ emit_jump(&target->try_info->catch_label);
+ }
+ }
+ current_catch_ = block->try_info->previous_catch;
}
- void Rethrow(FullDecoder* decoder, Control* block) {
- unsupported(decoder, kExceptionHandling, "rethrow");
+ void Rethrow(FullDecoder* decoder, Control* try_block) {
+ int index = try_block->try_info->catch_state.stack_height() - 1;
+ auto& exception = __ cache_state()->stack_state[index];
+ Rethrow(decoder, exception);
+ EmitLandingPad(decoder);
}
void CatchAll(FullDecoder* decoder, Control* block) {
- unsupported(decoder, kExceptionHandling, "catch-all");
+ DCHECK(block->is_try_catchall() || block->is_try_catch() ||
+ block->is_try_unwind());
+ DCHECK_EQ(decoder->control_at(0), block);
+
+ current_catch_ = block->try_info->previous_catch; // Pop try scope.
+
+ // The catch block is unreachable if no possible throws in the try block
+ // exist. We only build a landing pad if some node in the try block can
+ // (possibly) throw. Otherwise the catch environments remain empty.
+ if (!block->try_info->catch_reached) {
+ decoder->SetSucceedingCodeDynamicallyUnreachable();
+ return;
+ }
+
+ __ bind(&block->try_info->catch_label);
+ __ cache_state()->Steal(block->try_info->catch_state);
+ if (!block->try_info->in_handler) {
+ block->try_info->in_handler = true;
+ num_exceptions_++;
+ }
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
@@ -1092,14 +1240,27 @@ class LiftoffCompiler {
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
+
+ PushControl(if_block);
}
void FallThruTo(FullDecoder* decoder, Control* c) {
- if (c->end_merge.reached) {
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ if (!c->end_merge.reached) {
+ c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ c->end_merge.arity,
+ c->stack_depth + c->num_exceptions);
+ }
+ DCHECK(!c->is_try_catchall());
+ if (c->is_try_catch()) {
+ // Drop the implicit exception ref.
+ DCHECK_EQ(c->label_state.stack_height() + 1,
+ __ cache_state()->stack_height());
+ __ MergeStackWith(c->label_state, c->br_merge()->arity,
+ LiftoffAssembler::kForwardJump);
} else {
- c->label_state.Split(*__ cache_state());
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
}
+ __ emit_jump(c->label.get());
TraceCacheState(decoder);
}
@@ -1122,7 +1283,8 @@ class LiftoffCompiler {
// state, then merge the if state into that.
DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
c->label_state.InitMerge(c->else_state->state, __ num_locals(),
- c->start_merge.arity, c->stack_depth);
+ c->start_merge.arity,
+ c->stack_depth + c->num_exceptions);
__ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
// Merge the else state into the end state.
@@ -1136,11 +1298,34 @@ class LiftoffCompiler {
}
}
+ void FinishTry(FullDecoder* decoder, Control* c) {
+ DCHECK(c->is_try_catch() || c->is_try_catchall() || c->is_try_unwind());
+ if (!c->end_merge.reached) {
+ if (c->try_info->catch_reached) {
+ // Drop the implicit exception ref.
+ __ DropValue(__ num_locals() + c->stack_depth + c->num_exceptions);
+ }
+ // Else we did not enter the catch state, continue with the current state.
+ } else {
+ if (c->reachable()) {
+ __ MergeStackWith(c->label_state, c->br_merge()->arity,
+ LiftoffAssembler::kForwardJump);
+ }
+ __ cache_state()->Steal(c->label_state);
+ }
+ if (c->try_info->catch_reached) {
+ num_exceptions_--;
+ }
+ }
+
void PopControl(FullDecoder* decoder, Control* c) {
if (c->is_loop()) return; // A loop just falls through.
if (c->is_onearmed_if()) {
// Special handling for one-armed ifs.
FinishOneArmedIf(decoder, c);
+ } else if (c->is_try_catch() || c->is_try_catchall() ||
+ c->is_try_unwind()) {
+ FinishTry(decoder, c);
} else if (c->end_merge.reached) {
// There is a merge already. Merge our state into that, then continue with
// that state.
@@ -1170,7 +1355,7 @@ class LiftoffCompiler {
param_bytes += element_size_bytes(param_kind);
}
int out_arg_bytes =
- out_argument_kind == kStmt ? 0 : element_size_bytes(out_argument_kind);
+ out_argument_kind == kVoid ? 0 : element_size_bytes(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
@@ -1241,48 +1426,45 @@ class LiftoffCompiler {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueKind sig_reps[] = {kind};
- ValueKindSig sig(0, 1, sig_reps);
+ auto sig = MakeSig::Params(kind);
GenerateCCall(&dst, &sig, kind, &src, ext_ref);
};
EmitUnOp<kind, kind>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
- template <ValueKind dst_type, ValueKind src_kind,
+ template <ValueKind dst_kind, ValueKind src_kind,
TypeConversionTrapping can_trap>
- void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
- WasmCodePosition trap_position) {
+ void EmitTypeConversion(FullDecoder* decoder, WasmOpcode opcode,
+ ExternalReference (*fallback_fn)()) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
- static constexpr RegClass dst_rc = reg_class_for(dst_type);
+ static constexpr RegClass dst_rc = reg_class_for(dst_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc
? __ GetUnusedRegister(dst_rc, {src}, {})
: __ GetUnusedRegister(dst_rc, {});
- DCHECK_EQ(!!can_trap, trap_position > 0);
- Label* trap = can_trap ? AddOutOfLineTrap(
- trap_position,
- WasmCode::kThrowWasmTrapFloatUnrepresentable)
- : nullptr;
+ Label* trap =
+ can_trap ? AddOutOfLineTrap(
+ decoder, WasmCode::kThrowWasmTrapFloatUnrepresentable)
+ : nullptr;
if (!__ emit_type_conversion(opcode, dst, src, trap)) {
DCHECK_NOT_NULL(fallback_fn);
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueKind sig_reps[] = {kI32, src_kind};
- ValueKindSig sig(1, 1, sig_reps);
+ auto sig = MakeSig::Returns(kI32).Params(src_kind);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
+ GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref);
__ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
} else {
- ValueKind sig_reps[] = {src_kind};
- ValueKindSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
+ ValueKind sig_kinds[] = {src_kind};
+ ValueKindSig sig(0, 1, sig_kinds);
+ GenerateCCall(&dst, &sig, dst_kind, &src, ext_ref);
}
}
- __ PushRegister(dst_type, dst);
+ __ PushRegister(dst_kind, dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
@@ -1300,10 +1482,10 @@ class LiftoffCompiler {
case kExpr##opcode: \
return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
-#define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
+#define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \
- return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
- kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
+ return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>( \
+ decoder, kExpr##opcode, ext_ref);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
@@ -1391,9 +1573,8 @@ class LiftoffCompiler {
return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
- ValueKind sig_i_i_reps[] = {kI32, kI32};
- ValueKindSig sig_i_i(1, 1, sig_i_i_reps);
- GenerateCCall(&dst, &sig_i_i, kStmt, &src,
+ auto sig = MakeSig::Returns(kI32).Params(kI32);
+ GenerateCCall(&dst, &sig, kVoid, &src,
ExternalReference::wasm_word32_popcnt());
});
case kExprI64Popcnt:
@@ -1401,10 +1582,9 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
- ValueKind sig_i_l_reps[] = {kI32, kI64};
- ValueKindSig sig_i_l(1, 1, sig_i_l_reps);
+ auto sig = MakeSig::Returns(kI32).Params(kI64);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
- GenerateCCall(&c_call_dst, &sig_i_l, kStmt, &src,
+ GenerateCCall(&c_call_dst, &sig, kVoid, &src,
ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
@@ -1492,9 +1672,7 @@ class LiftoffCompiler {
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
- ValueKind sig_kinds[] = {kI32, kI64, kI64};
- // <i64, i64> -> i32 (with i64 output argument)
- ValueKindSig sig(1, 2, sig_kinds);
+ auto sig = MakeSig::Returns(kI32).Params(kI64, kI64);
GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
__ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
@@ -1533,10 +1711,10 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
- ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
+ ValueKind sig_kinds[] = {k##kind, k##kind, k##kind}; \
const bool out_via_stack = k##kind == kI64; \
- ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
- ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
+ ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_kinds); \
+ ValueKind out_arg_kind = out_via_stack ? kI64 : kVoid; \
GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
});
switch (opcode) {
@@ -1706,51 +1884,47 @@ class LiftoffCompiler {
return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- WasmCodePosition position = decoder->position();
- AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
- AddOutOfLineTrap(position,
- WasmCode::kThrowWasmTrapDivUnrepresentable);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
__ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
div_unrepresentable);
});
case kExprI32DivU:
- return EmitBinOp<kI32, kI32>(
- [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* div_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
- __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
- });
+ return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* div_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
+ __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
+ });
case kExprI32RemS:
- return EmitBinOp<kI32, kI32>(
- [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
- });
+ return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
+ __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
+ });
case kExprI32RemU:
- return EmitBinOp<kI32, kI32>(
- [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
- });
+ return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
+ __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
+ });
case kExprI64DivS:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- WasmCodePosition position = decoder->position();
- AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
- AddOutOfLineTrap(position,
- WasmCode::kThrowWasmTrapDivUnrepresentable);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
@@ -1764,30 +1938,30 @@ class LiftoffCompiler {
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- Label* div_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
+ Label* div_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
}
});
case kExprI64RemS:
- return EmitBinOp<kI64, kI64>(
- [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
- ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
- EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
- }
- });
+ return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
+ if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
+ ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
+ EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
+ }
+ });
case kExprI64RemU:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
+ Label* rem_by_zero =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
@@ -1847,17 +2021,11 @@ class LiftoffCompiler {
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
- ValueKind sig_reps[] = {kRef, kI32};
- ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(func_index_reg, WasmValue(function_index));
LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
- __ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmRefFunc, MakeSig::Returns(kRef).Params(kI32),
+ {func_index_var}, decoder->position());
__ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
@@ -1895,7 +2063,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
+ __ Move(param_reg, info.gp(), kPointerKind);
}
source_position_table_builder_.AddPosition(
@@ -1906,14 +2074,14 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(int64_t));
}
- void DoReturn(FullDecoder* decoder) {
+ void DoReturn(FullDecoder* decoder, uint32_t /* drop_values */) {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
- static_cast<uint32_t>(descriptor_->StackParameterCount()));
+ static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
}
void LocalGet(FullDecoder* decoder, Value* result,
@@ -1951,7 +2119,7 @@ class LiftoffCompiler {
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
- DCHECK_EQ(kind, __ local_type(local_index));
+ DCHECK_EQ(kind, __ local_kind(local_index));
RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), kind);
@@ -2055,7 +2223,7 @@ class LiftoffCompiler {
return;
}
- if (is_reference_type(kind)) {
+ if (is_reference(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
Register base = no_reg;
@@ -2098,7 +2266,7 @@ class LiftoffCompiler {
return;
}
- if (is_reference_type(kind)) {
+ if (is_reference(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
@@ -2137,27 +2305,19 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
- LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
- 0);
+ LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
LiftoffAssembler::VarState index = __ cache_state()->stack_state.back();
- WasmCode::RuntimeStubId target = WasmCode::kWasmTableGet;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
-
ValueKind result_kind = env_->module->tables[imm.index].type.kind();
- ValueKind sig_reps[] = {result_kind, kI32, kI32};
- ValueKindSig sig(1, 2, sig_reps);
-
- __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmTableGet,
+ MakeSig::Returns(result_kind).Params(kI32, kI32),
+ {table_index, index}, decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(1);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
__ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
}
@@ -2169,37 +2329,54 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
- LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
- 0);
+ LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
- WasmCode::RuntimeStubId target = WasmCode::kWasmTableSet;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
-
ValueKind table_kind = env_->module->tables[imm.index].type.kind();
- ValueKind sig_reps[] = {kI32, kI32, table_kind};
- ValueKindSig sig(0, 3, sig_reps);
- __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmTableSet,
+ MakeSig::Params(kI32, kI32, table_kind),
+ {table_index, index, value}, decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(2);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void Unreachable(FullDecoder* decoder) {
- Label* unreachable_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
- __ emit_jump(unreachable_label);
+ WasmCode::RuntimeStubId GetRuntimeStubIdForTrapReason(TrapReason reason) {
+ switch (reason) {
+#define RUNTIME_STUB_FOR_TRAP(trap_reason) \
+ case k##trap_reason: \
+ return WasmCode::kThrowWasm##trap_reason;
+
+ FOREACH_WASM_TRAPREASON(RUNTIME_STUB_FOR_TRAP)
+#undef RUNTIME_STUB_FOR_TRAP
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void Trap(FullDecoder* decoder, TrapReason reason) {
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, GetRuntimeStubIdForTrapReason(reason));
+ __ emit_jump(trap_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
+ void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ LiftoffRegList pinned;
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
+ LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
+ LoadNullValue(null.gp(), pinned);
+ __ emit_cond_jump(kUnequal, trap_label, kOptRef, obj.gp(), null.gp());
+ __ PushRegister(kOptRef, obj);
+ }
+
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
unsupported(decoder, kOtherReason, "testing opcode");
}
@@ -2231,17 +2408,20 @@ class LiftoffCompiler {
void BrImpl(Control* target) {
if (!target->br_merge()->reached) {
- target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- target->br_merge()->arity,
- target->stack_depth);
+ target->label_state.InitMerge(
+ *__ cache_state(), __ num_locals(), target->br_merge()->arity,
+ target->stack_depth + target->num_exceptions);
}
- __ MergeStackWith(target->label_state, target->br_merge()->arity);
+ __ MergeStackWith(target->label_state, target->br_merge()->arity,
+ target->is_loop() ? LiftoffAssembler::kBackwardJump
+ : LiftoffAssembler::kForwardJump);
__ jmp(target->label.get());
}
- void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ void BrOrRet(FullDecoder* decoder, uint32_t depth,
+ uint32_t /* drop_values */) {
if (depth == decoder->control_depth() - 1) {
- DoReturn(decoder);
+ DoReturn(decoder, 0);
} else {
BrImpl(decoder->control_at(depth));
}
@@ -2275,7 +2455,7 @@ class LiftoffCompiler {
outstanding_op_ = kNoOutstandingOp;
}
- BrOrRet(decoder, depth);
+ BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
}
@@ -2288,7 +2468,7 @@ class LiftoffCompiler {
__ jmp(label.get());
} else {
__ bind(label.get());
- BrOrRet(decoder, br_depth);
+ BrOrRet(decoder, br_depth, 0);
}
}
@@ -2353,7 +2533,8 @@ class LiftoffCompiler {
if (c->reachable()) {
if (!c->end_merge.reached) {
c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- c->end_merge.arity, c->stack_depth);
+ c->end_merge.arity,
+ c->stack_depth + c->num_exceptions);
}
__ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
@@ -2378,8 +2559,8 @@ class LiftoffCompiler {
return spilled;
}
- Label* AddOutOfLineTrap(WasmCodePosition position,
- WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
+ Label* AddOutOfLineTrap(FullDecoder* decoder, WasmCode::RuntimeStubId stub,
+ uint32_t pc = 0) {
DCHECK(FLAG_wasm_bounds_checks);
OutOfLineSafepointInfo* safepoint_info = nullptr;
if (V8_UNLIKELY(for_debugging_)) {
@@ -2394,10 +2575,10 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState::SpillLocation::kStackSlots);
}
out_of_line_code_.push_back(OutOfLineCode::Trap(
- stub, position,
+ stub, decoder->position(),
V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
: nullptr,
- safepoint_info, pc, RegisterOOLDebugSideTableEntry()));
+ safepoint_info, pc, RegisterOOLDebugSideTableEntry(decoder)));
return out_of_line_code_.back().label.get();
}
@@ -2409,10 +2590,7 @@ class LiftoffCompiler {
Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, LiftoffRegister index,
LiftoffRegList pinned, ForceCheck force_check) {
- // If the offset does not fit in a uintptr_t, this can never succeed on this
- // machine.
const bool statically_oob =
- offset > std::numeric_limits<uintptr_t>::max() ||
!base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size);
@@ -2435,9 +2613,9 @@ class LiftoffCompiler {
// TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds,
- env_->use_trap_handler ? __ pc_offset() : 0);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
+ env_->use_trap_handler ? __ pc_offset() : 0);
if (statically_oob) {
__ emit_jump(trap_label);
@@ -2469,7 +2647,7 @@ class LiftoffCompiler {
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
if (end_offset > env_->min_memory_size) {
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
end_offset_reg.gp(), mem_size.gp());
}
@@ -2479,7 +2657,7 @@ class LiftoffCompiler {
__ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
index_ptrsize, effective_size_reg.gp());
return index_ptrsize;
}
@@ -2487,8 +2665,8 @@ class LiftoffCompiler {
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
uintptr_t offset, Register index,
LiftoffRegList pinned) {
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapUnalignedAccess, 0);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapUnalignedAccess, 0);
Register address = __ GetUnusedRegister(kGpReg, pinned).gp();
const uint32_t align_mask = access_size - 1;
@@ -2555,7 +2733,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
+ __ Move(param_reg, info.gp(), kPointerKind);
}
source_position_table_builder_.AddPosition(__ pc_offset(),
@@ -2578,7 +2756,7 @@ class LiftoffCompiler {
pinned->clear(LiftoffRegister{old_index});
index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
if (index != old_index) {
- __ Move(index, old_index, kPointerValueType);
+ __ Move(index, old_index, kPointerKind);
}
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
@@ -2589,12 +2767,6 @@ class LiftoffCompiler {
return index;
}
- void Prefetch(FullDecoder* decoder,
- const MemoryAccessImmediate<validate>& imm,
- const Value& index_val, bool temporal) {
- unsupported(decoder, kSimd, "simd prefetch");
- }
-
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
@@ -2616,8 +2788,7 @@ class LiftoffCompiler {
uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapMemOutOfBounds,
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(kind, value);
@@ -2660,8 +2831,7 @@ class LiftoffCompiler {
&protected_load_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapMemOutOfBounds,
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(kS128, value);
@@ -2702,8 +2872,7 @@ class LiftoffCompiler {
__ LoadLane(result, value, addr, index, offset, type, laneidx,
&protected_load_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapMemOutOfBounds,
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
@@ -2739,8 +2908,7 @@ class LiftoffCompiler {
__ Store(addr, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapMemOutOfBounds,
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
if (FLAG_trace_wasm_memory) {
@@ -2769,8 +2937,7 @@ class LiftoffCompiler {
uint32_t protected_store_pc = 0;
__ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapMemOutOfBounds,
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
if (FLAG_trace_wasm_memory) {
@@ -2800,11 +2967,23 @@ class LiftoffCompiler {
LiftoffRegister input = pinned.set(__ PopToRegister());
__ SpillAllRegisters();
- constexpr Register kGpReturnReg = kGpReturnRegisters[0];
- static_assert(kLiftoffAssemblerGpCacheRegs & kGpReturnReg.bit(),
- "first return register is a cache register (needs more "
- "complex code here otherwise)");
- LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
+ LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ Label done;
+
+ if (env_->module->is_memory64) {
+ // If the high word is not 0, this will always fail (would grow by
+ // >=256TB). The int32_t value will be sign-extended below.
+ __ LoadConstant(result, WasmValue(int32_t{-1}));
+ if (kNeedI64RegPair) {
+ __ emit_cond_jump(kUnequal /* neq */, &done, kI32, input.high_gp());
+ input = input.low();
+ } else {
+ LiftoffRegister high_word = __ GetUnusedRegister(kGpReg, pinned);
+ __ emit_i64_shri(high_word, input, 32);
+ __ emit_cond_jump(kUnequal /* neq */, &done, kI32, high_word.gp());
+ }
+ }
WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
@@ -2816,66 +2995,140 @@ class LiftoffCompiler {
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
DefineSafepoint();
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
if (kReturnRegister0 != result.gp()) {
__ Move(result.gp(), kReturnRegister0, kI32);
}
- __ PushRegister(kI32, result);
+ __ bind(&done);
+
+ if (env_->module->is_memory64) {
+ LiftoffRegister result64 = result;
+ if (kNeedI64RegPair) result64 = __ GetUnusedRegister(kGpRegPair, pinned);
+ __ emit_type_conversion(kExprI64SConvertI32, result64, result, nullptr);
+ __ PushRegister(kI64, result64);
+ } else {
+ __ PushRegister(kI32, result);
+ }
+ }
+
+ OwnedVector<DebugSideTable::Entry::Value> GetCurrentDebugSideTableEntries(
+ FullDecoder* decoder,
+ DebugSideTableBuilder::AssumeSpilling assume_spilling) {
+ auto& stack_state = __ cache_state()->stack_state;
+ auto values = OwnedVector<DebugSideTable::Entry::Value>::NewForOverwrite(
+ stack_state.size());
+
+ // For function calls, the decoder still has the arguments on the stack, but
+ // Liftoff already popped them. Hence {decoder->stack_size()} can be bigger
+ // than expected. Just ignore that and use the lower part only.
+ DCHECK_LE(stack_state.size() - num_exceptions_,
+ decoder->num_locals() + decoder->stack_size());
+ int index = 0;
+ int decoder_stack_index = decoder->stack_size();
+ // Iterate the operand stack control block by control block, so that we can
+ // handle the implicit exception value for try blocks.
+ for (int j = decoder->control_depth() - 1; j >= 0; j--) {
+ Control* control = decoder->control_at(j);
+ Control* next_control = j > 0 ? decoder->control_at(j - 1) : nullptr;
+ int end_index = next_control
+ ? next_control->stack_depth + __ num_locals() +
+ next_control->num_exceptions
+ : __ cache_state()->stack_height();
+ bool exception = control->is_try_catch() || control->is_try_catchall() ||
+ control->is_try_unwind();
+ for (; index < end_index; ++index) {
+ auto& slot = stack_state[index];
+ auto& value = values[index];
+ value.index = index;
+ ValueType type =
+ index < static_cast<int>(__ num_locals())
+ ? decoder->local_type(index)
+ : exception ? ValueType::Ref(HeapType::kExtern, kNonNullable)
+ : decoder->stack_value(decoder_stack_index--)->type;
+ DCHECK(CheckCompatibleStackSlotTypes(slot.kind(), type.kind()));
+ value.type = type;
+ switch (slot.loc()) {
+ case kIntConst:
+ value.storage = DebugSideTable::Entry::kConstant;
+ value.i32_const = slot.i32_const();
+ break;
+ case kRegister:
+ DCHECK_NE(DebugSideTableBuilder::kDidSpill, assume_spilling);
+ if (assume_spilling == DebugSideTableBuilder::kAllowRegisters) {
+ value.storage = DebugSideTable::Entry::kRegister;
+ value.reg_code = slot.reg().liftoff_code();
+ break;
+ }
+ DCHECK_EQ(DebugSideTableBuilder::kAssumeSpilling, assume_spilling);
+ V8_FALLTHROUGH;
+ case kStack:
+ value.storage = DebugSideTable::Entry::kStack;
+ value.stack_offset = slot.offset();
+ break;
+ }
+ exception = false;
+ }
+ }
+ DCHECK_EQ(values.size(), index);
+ return values;
}
void RegisterDebugSideTableEntry(
+ FullDecoder* decoder,
DebugSideTableBuilder::AssumeSpilling assume_spilling) {
if (V8_LIKELY(!debug_sidetable_builder_)) return;
- debug_sidetable_builder_->NewEntry(__ pc_offset(),
- VectorOf(__ cache_state()->stack_state),
- assume_spilling);
+ debug_sidetable_builder_->NewEntry(
+ __ pc_offset(),
+ GetCurrentDebugSideTableEntries(decoder, assume_spilling).as_vector());
}
- DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry() {
+ DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry(
+ FullDecoder* decoder) {
if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr;
return debug_sidetable_builder_->NewOOLEntry(
- VectorOf(__ cache_state()->stack_state),
- DebugSideTableBuilder::kAssumeSpilling);
+ GetCurrentDebugSideTableEntries(decoder,
+ DebugSideTableBuilder::kAssumeSpilling)
+ .as_vector());
}
- enum CallKind : bool { kReturnCall = true, kNoReturnCall = false };
+ enum TailCall : bool { kTailCall = true, kNoTailCall = false };
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value[]) {
- CallDirect(decoder, imm, args, nullptr, kNoReturnCall);
+ CallDirect(decoder, imm, args, nullptr, kNoTailCall);
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- CallIndirect(decoder, index_val, imm, kNoReturnCall);
+ CallIndirect(decoder, index_val, imm, kNoTailCall);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- CallRef(decoder, func_ref.type, sig, kNoReturnCall);
+ CallRef(decoder, func_ref.type, sig, kNoTailCall);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- CallDirect(decoder, imm, args, nullptr, kReturnCall);
+ CallDirect(decoder, imm, args, nullptr, kTailCall);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- CallIndirect(decoder, index_val, imm, kReturnCall);
+ CallIndirect(decoder, index_val, imm, kTailCall);
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- CallRef(decoder, func_ref.type, sig, kReturnCall);
+ CallRef(decoder, func_ref.type, sig, kTailCall);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -2894,7 +3147,7 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
null);
- BrOrRet(decoder, depth);
+ BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
__ PushRegister(kRef, ref);
}
@@ -2951,8 +3204,7 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
if (!(asm_.*emit_fn)(dst, src)) {
// Return v128 via stack for ARM.
- ValueKind sig_v_s_reps[] = {kS128};
- ValueKindSig sig_v_s(0, 1, sig_v_s_reps);
+ auto sig_v_s = MakeSig::Params(kS128);
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
__ PushRegister(kS128, dst);
@@ -3054,6 +3306,8 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u);
case wasm::kExprI64x2Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_eq);
+ case wasm::kExprI64x2Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ne);
case wasm::kExprI64x2LtS:
return EmitBinOp<kS128, kS128, true>(
&LiftoffAssembler::emit_i64x2_gt_s);
@@ -3102,8 +3356,8 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
case wasm::kExprV128AnyTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v128_anytrue);
- case wasm::kExprV8x16AllTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_alltrue);
+ case wasm::kExprI8x16AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_alltrue);
case wasm::kExprI8x16BitMask:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_bitmask);
case wasm::kExprI8x16Shl:
@@ -3127,8 +3381,6 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s);
case wasm::kExprI8x16SubSatU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u);
- case wasm::kExprI8x16Mul:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_mul);
case wasm::kExprI8x16MinS:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_s);
case wasm::kExprI8x16MinU:
@@ -3139,8 +3391,8 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
case wasm::kExprI16x8Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
- case wasm::kExprV16x8AllTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_alltrue);
+ case wasm::kExprI16x8AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_alltrue);
case wasm::kExprI16x8BitMask:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_bitmask);
case wasm::kExprI16x8Shl:
@@ -3197,8 +3449,8 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i16x8_q15mulr_sat_s);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
- case wasm::kExprV32x4AllTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_alltrue);
+ case wasm::kExprI32x4AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_alltrue);
case wasm::kExprI32x4BitMask:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_bitmask);
case wasm::kExprI32x4Shl:
@@ -3247,8 +3499,8 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
- case wasm::kExprV64x2AllTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v64x2_alltrue);
+ case wasm::kExprI64x2AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_alltrue);
case wasm::kExprI64x2Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
&LiftoffAssembler::emit_i64x2_shli);
@@ -3430,6 +3682,24 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs);
case wasm::kExprI64x2Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
+ case wasm::kExprF64x2ConvertLowI32x4S:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f64x2_convert_low_i32x4_s);
+ case wasm::kExprF64x2ConvertLowI32x4U:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f64x2_convert_low_i32x4_u);
+ case wasm::kExprF64x2PromoteLowF32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f64x2_promote_low_f32x4);
+ case wasm::kExprF32x4DemoteF64x2Zero:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f32x4_demote_f64x2_zero);
+ case wasm::kExprI32x4TruncSatF64x2SZero:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero);
+ case wasm::kExprI32x4TruncSatF64x2UZero:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero);
default:
unsupported(decoder, kSimd, "simd");
}
@@ -3448,11 +3718,11 @@ class LiftoffCompiler {
__ PushRegister(result_kind, dst);
}
- template <ValueKind src2_type, typename EmitFn>
+ template <ValueKind src2_kind, typename EmitFn>
void EmitSimdReplaceLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
static constexpr RegClass src1_rc = reg_class_for(kS128);
- static constexpr RegClass src2_rc = reg_class_for(src2_type);
+ static constexpr RegClass src2_rc = reg_class_for(src2_kind);
static constexpr RegClass result_rc = reg_class_for(kS128);
// On backends which need fp pair, src1_rc and result_rc end up being
// kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is
@@ -3597,12 +3867,223 @@ class LiftoffCompiler {
tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
}
+ void Store64BitExceptionValue(Register values_array, int* index_in_array,
+ LiftoffRegister value, LiftoffRegList pinned) {
+ if (kNeedI64RegPair) {
+ Store32BitExceptionValue(values_array, index_in_array, value.low_gp(),
+ pinned);
+ Store32BitExceptionValue(values_array, index_in_array, value.high_gp(),
+ pinned);
+ } else {
+ Store32BitExceptionValue(values_array, index_in_array, value.gp(),
+ pinned);
+ __ emit_i64_shri(value, value, 32);
+ Store32BitExceptionValue(values_array, index_in_array, value.gp(),
+ pinned);
+ }
+ }
+
+ void Load16BitExceptionValue(LiftoffRegister dst,
+ LiftoffRegister values_array, uint32_t* index,
+ LiftoffRegList pinned) {
+ __ LoadSmiAsInt32(
+ dst, values_array.gp(),
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index), pinned);
+ (*index)++;
+ }
+
+ void Load32BitExceptionValue(Register dst, LiftoffRegister values_array,
+ uint32_t* index, LiftoffRegList pinned) {
+ LiftoffRegister upper = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Load16BitExceptionValue(upper, values_array, index, pinned);
+ __ emit_i32_shli(upper.gp(), upper.gp(), 16);
+ Load16BitExceptionValue(LiftoffRegister(dst), values_array, index, pinned);
+ __ emit_i32_or(dst, upper.gp(), dst);
+ }
+
+ void Load64BitExceptionValue(LiftoffRegister dst,
+ LiftoffRegister values_array, uint32_t* index,
+ LiftoffRegList pinned) {
+ if (kNeedI64RegPair) {
+ Load32BitExceptionValue(dst.high_gp(), values_array, index, pinned);
+ Load32BitExceptionValue(dst.low_gp(), values_array, index, pinned);
+ } else {
+ Load16BitExceptionValue(dst, values_array, index, pinned);
+ __ emit_i64_shli(dst, dst, 48);
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
+ __ emit_i64_shli(tmp_reg, tmp_reg, 32);
+ __ emit_i64_or(dst, tmp_reg, dst);
+ Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
+ __ emit_i64_shli(tmp_reg, tmp_reg, 16);
+ __ emit_i64_or(dst, tmp_reg, dst);
+ Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
+ __ emit_i64_or(dst, tmp_reg, dst);
+ }
+ }
+
void StoreExceptionValue(ValueType type, Register values_array,
int* index_in_array, LiftoffRegList pinned) {
- // TODO(clemensb): Handle more types.
- DCHECK_EQ(kWasmI32, type);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
- Store32BitExceptionValue(values_array, index_in_array, value.gp(), pinned);
+ switch (type.kind()) {
+ case kI32:
+ Store32BitExceptionValue(values_array, index_in_array, value.gp(),
+ pinned);
+ break;
+ case kF32: {
+ LiftoffRegister gp_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ emit_type_conversion(kExprI32ReinterpretF32, gp_reg, value, nullptr);
+ Store32BitExceptionValue(values_array, index_in_array, gp_reg.gp(),
+ pinned);
+ break;
+ }
+ case kI64:
+ Store64BitExceptionValue(values_array, index_in_array, value, pinned);
+ break;
+ case kF64: {
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(reg_class_for(kI64), pinned));
+ __ emit_type_conversion(kExprI64ReinterpretF64, tmp_reg, value,
+ nullptr);
+ Store64BitExceptionValue(values_array, index_in_array, tmp_reg, pinned);
+ break;
+ }
+ case kS128: {
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ for (int i : {3, 2, 1, 0}) {
+ __ emit_i32x4_extract_lane(tmp_reg, value, i);
+ Store32BitExceptionValue(values_array, index_in_array, tmp_reg.gp(),
+ pinned);
+ }
+ break;
+ }
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth: {
+ --(*index_in_array);
+ __ StoreTaggedPointer(
+ values_array, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
+ *index_in_array),
+ value, pinned);
+ break;
+ }
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+ }
+
+ void LoadExceptionValue(ValueKind kind, LiftoffRegister values_array,
+ uint32_t* index, LiftoffRegList pinned) {
+ RegClass rc = reg_class_for(kind);
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
+ switch (kind) {
+ case kI32:
+ Load32BitExceptionValue(value.gp(), values_array, index, pinned);
+ break;
+ case kF32: {
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
+ __ emit_type_conversion(kExprF32ReinterpretI32, value, tmp_reg,
+ nullptr);
+ break;
+ }
+ case kI64:
+ Load64BitExceptionValue(value, values_array, index, pinned);
+ break;
+ case kF64: {
+ RegClass rc = reg_class_for(kI64);
+ LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(rc, pinned));
+ Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
+ __ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
+ nullptr);
+ break;
+ }
+ case kS128: {
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
+ __ emit_i32x4_splat(value, tmp_reg);
+ for (int lane : {1, 2, 3}) {
+ Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
+ __ emit_i32x4_replace_lane(value, value, tmp_reg, lane);
+ }
+ break;
+ }
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth: {
+ __ LoadTaggedPointer(
+ value.gp(), values_array.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index),
+ pinned);
+ (*index)++;
+ break;
+ }
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+ __ PushRegister(kind, value);
+ }
+
+ void GetExceptionValues(FullDecoder* decoder,
+ LiftoffAssembler::VarState& exception_var,
+ const WasmException* exception) {
+ LiftoffRegList pinned;
+ DEBUG_CODE_COMMENT("get exception values");
+ LiftoffRegister values_array = GetExceptionProperty(
+ exception_var, RootIndex::kwasm_exception_values_symbol);
+ pinned.set(values_array);
+ uint32_t index = 0;
+ const WasmExceptionSig* sig = exception->sig;
+ for (ValueType param : sig->parameters()) {
+ LoadExceptionValue(param.kind(), values_array, &index, pinned);
+ }
+ DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
+ }
+
+ void EmitLandingPad(FullDecoder* decoder) {
+ if (current_catch_ == -1) return;
+ MovableLabel handler;
+ int handler_offset = __ pc_offset();
+
+ // If we return from the throwing code normally, just skip over the handler.
+ Label skip_handler;
+ __ emit_jump(&skip_handler);
+
+ // Handler: merge into the catch state, and jump to the catch body.
+ __ bind(handler.get());
+ __ ExceptionHandler();
+ __ PushException();
+ handlers_.push_back({std::move(handler), handler_offset});
+ Control* current_try =
+ decoder->control_at(decoder->control_depth() - 1 - current_catch_);
+ DCHECK_NOT_NULL(current_try->try_info);
+ if (!current_try->try_info->catch_reached) {
+ current_try->try_info->catch_state.InitMerge(
+ *__ cache_state(), __ num_locals(), 1,
+ current_try->stack_depth + current_try->num_exceptions);
+ current_try->try_info->catch_reached = true;
+ }
+ __ MergeStackWith(current_try->try_info->catch_state, 1,
+ LiftoffAssembler::kForwardJump);
+ __ emit_jump(&current_try->try_info->catch_label);
+
+ __ bind(&skip_handler);
+ // Drop the exception.
+ __ DropValues(1);
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
@@ -3616,26 +4097,14 @@ class LiftoffCompiler {
__ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
// Call the WasmAllocateFixedArray builtin to create the values array.
- DEBUG_CODE_COMMENT("call WasmAllocateFixedArray builtin");
- compiler::CallDescriptor* create_values_descriptor =
- GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>(
- compilation_zone_);
-
- ValueKind create_values_sig_reps[] = {kPointerValueType,
- LiftoffAssembler::kIntPtr};
- ValueKindSig create_values_sig(1, 1, create_values_sig_reps);
-
- __ PrepareBuiltinCall(
- &create_values_sig, create_values_descriptor,
- {LiftoffAssembler::VarState{kSmiValueType,
- LiftoffRegister{encoded_size_reg}, 0}});
- __ CallRuntimeStub(WasmCode::kWasmAllocateFixedArray);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmAllocateFixedArray,
+ MakeSig::Returns(kPointerKind).Params(kPointerKind),
+ {LiftoffAssembler::VarState{
+ kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
+ decoder->position());
// The FixedArray for the exception values is now in the first gp return
// register.
- DCHECK_EQ(kReturnRegister0.code(),
- create_values_descriptor->GetReturnLocation(0).AsRegister());
LiftoffRegister values_array{kReturnRegister0};
pinned.set(values_array);
@@ -3647,46 +4116,29 @@ class LiftoffCompiler {
for (size_t param_idx = sig->parameter_count(); param_idx > 0;
--param_idx) {
ValueType type = sig->GetParam(param_idx - 1);
- if (type != kWasmI32) {
- unsupported(decoder, kExceptionHandling,
- "unsupported type in exception payload");
- return;
- }
StoreExceptionValue(type, values_array.gp(), &index, pinned);
}
DCHECK_EQ(0, index);
// Load the exception tag.
DEBUG_CODE_COMMENT("load exception tag");
- Register exception_tag =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag, ExceptionsTable, pinned);
+ LiftoffRegister exception_tag =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), ExceptionsTable, pinned);
__ LoadTaggedPointer(
- exception_tag, exception_tag, no_reg,
+ exception_tag.gp(), exception_tag.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
// Finally, call WasmThrow.
- DEBUG_CODE_COMMENT("call WasmThrow builtin");
- compiler::CallDescriptor* throw_descriptor =
- GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_);
-
- ValueKind throw_sig_reps[] = {kPointerValueType, kPointerValueType};
- ValueKindSig throw_sig(0, 2, throw_sig_reps);
-
- __ PrepareBuiltinCall(
- &throw_sig, throw_descriptor,
- {LiftoffAssembler::VarState{kPointerValueType,
- LiftoffRegister{exception_tag}, 0},
- LiftoffAssembler::VarState{kPointerValueType, values_array, 0}});
- source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallRuntimeStub(WasmCode::kWasmThrow);
- DefineSafepoint();
- }
+ CallRuntimeStub(WasmCode::kWasmThrow,
+ MakeSig::Params(kPointerKind, kPointerKind),
+ {LiftoffAssembler::VarState{kPointerKind, exception_tag, 0},
+ LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
+ decoder->position());
- void Rethrow(FullDecoder* decoder, const Value& exception) {
- unsupported(decoder, kExceptionHandling, "rethrow");
+ EmitLandingPad(decoder);
}
+
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegList pinned;
@@ -3840,16 +4292,29 @@ class LiftoffCompiler {
#endif
}
- template <typename BuiltinDescriptor>
- compiler::CallDescriptor* GetBuiltinCallDescriptor(Zone* zone) {
- BuiltinDescriptor interface_descriptor;
- return compiler::Linkage::GetStubCallDescriptor(
- zone, // zone
+ void CallRuntimeStub(WasmCode::RuntimeStubId stub_id, const ValueKindSig& sig,
+ std::initializer_list<LiftoffAssembler::VarState> params,
+ int position) {
+ DEBUG_CODE_COMMENT(
+ // NOLINTNEXTLINE(whitespace/braces)
+ (std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str());
+ auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
+ RuntimeStubIdToBuiltinName(stub_id));
+ auto* call_descriptor = compiler::Linkage::GetStubCallDescriptor(
+ compilation_zone_, // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
compiler::CallDescriptor::kNoFlags, // flags
compiler::Operator::kNoProperties, // properties
StubCallMode::kCallWasmRuntimeStub); // stub call mode
+
+ __ PrepareBuiltinCall(&sig, call_descriptor, params);
+ if (position != kNoSourcePosition) {
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(position), true);
+ }
+ __ CallRuntimeStub(stub_id);
+ DefineSafepoint();
}
void AtomicWait(FullDecoder* decoder, ValueKind kind,
@@ -3885,45 +4350,19 @@ class LiftoffCompiler {
// above in {AddMemoryMasking}.
index.MakeRegister(LiftoffRegister(index_plus_offset));
- WasmCode::RuntimeStubId target;
- compiler::CallDescriptor* call_descriptor;
- if (kind == kI32) {
- if (kNeedI64RegPair) {
- target = WasmCode::kWasmI32AtomicWait32;
- call_descriptor =
- GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
- compilation_zone_);
- } else {
- target = WasmCode::kWasmI32AtomicWait64;
- call_descriptor =
- GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
- compilation_zone_);
- }
- } else {
- if (kNeedI64RegPair) {
- target = WasmCode::kWasmI64AtomicWait32;
- call_descriptor =
- GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
- compilation_zone_);
- } else {
- target = WasmCode::kWasmI64AtomicWait64;
- call_descriptor =
- GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
- compilation_zone_);
- }
- }
-
- ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
- ValueKindSig sig(0, 3, sig_reps);
+ static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
+ // 64 bit systems (kNeedI64RegPair == false):
+ {WasmCode::kWasmI64AtomicWait64, WasmCode::kWasmI32AtomicWait64},
+ // 32 bit systems (kNeedI64RegPair == true):
+ {WasmCode::kWasmI64AtomicWait32, WasmCode::kWasmI32AtomicWait32}};
+ auto target = kTargets[kNeedI64RegPair][kind == kI32];
- __ PrepareBuiltinCall(&sig, call_descriptor,
- {index, expected_value, timeout});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(target, MakeSig::Params(kPointerKind, kind, kI64),
+ {index, expected_value, timeout}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(3);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
@@ -3949,22 +4388,17 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
- ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
- ValueKindSig sig(1, 2, sig_reps);
- auto call_descriptor =
- GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
-
LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
index.MakeRegister(LiftoffRegister(index_plus_offset));
- __ PrepareBuiltinCall(&sig, call_descriptor, {index, count});
- __ CallRuntimeStub(WasmCode::kWasmAtomicNotify);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmAtomicNotify,
+ MakeSig::Returns(kI32).Params(kPointerKind, kI32),
+ {index, count}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(2);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
@@ -4112,16 +4546,16 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
- ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
- ValueKindSig sig(1, 5, sig_reps);
+ auto sig =
+ MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kStmt, args, ext_ref);
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args, ext_ref);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
@@ -4155,15 +4589,14 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
- ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
- ValueKindSig sig(1, 4, sig_reps);
+ auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kStmt, args, ext_ref);
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args, ext_ref);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
@@ -4177,22 +4610,20 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
- ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
- ValueKindSig sig(1, 4, sig_reps);
+ auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kStmt, args, ext_ref);
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ GenerateCCall(&result, &sig, kVoid, args, ext_ref);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void LoadSmi(LiftoffRegister reg, int value) {
Address smi_value = Smi::FromInt(value).ptr();
- using smi_type =
- std::conditional_t<kSmiValueType == kI32, int32_t, int64_t>;
+ using smi_type = std::conditional_t<kSmiKind == kI32, int32_t, int64_t>;
__ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
}
@@ -4203,35 +4634,27 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_index_reg, imm.table.index);
- LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
- 0);
+ LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
LiftoffRegister segment_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(segment_index_reg, imm.elem_segment_index);
- LiftoffAssembler::VarState segment_index(kPointerValueType,
- segment_index_reg, 0);
+ LiftoffAssembler::VarState segment_index(kPointerKind, segment_index_reg,
+ 0);
LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
- WasmCode::RuntimeStubId target = WasmCode::kWasmTableInit;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
-
- ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
- ValueKindSig sig(0, 5, sig_reps);
-
- __ PrepareBuiltinCall(&sig, call_descriptor,
- {dst, src, size, table_index, segment_index});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmTableInit,
+ MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind),
+ {dst, src, size, table_index, segment_index},
+ decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(3);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
@@ -4260,66 +4683,113 @@ class LiftoffCompiler {
LiftoffRegister table_dst_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_dst_index_reg, imm.table_dst.index);
- LiftoffAssembler::VarState table_dst_index(kPointerValueType,
+ LiftoffAssembler::VarState table_dst_index(kPointerKind,
table_dst_index_reg, 0);
LiftoffRegister table_src_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_src_index_reg, imm.table_src.index);
- LiftoffAssembler::VarState table_src_index(kPointerValueType,
+ LiftoffAssembler::VarState table_src_index(kPointerKind,
table_src_index_reg, 0);
LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
- WasmCode::RuntimeStubId target = WasmCode::kWasmTableCopy;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
-
- ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
- ValueKindSig sig(0, 5, sig_reps);
-
- __ PrepareBuiltinCall(&sig, call_descriptor,
- {dst, src, size, table_dst_index, table_src_index});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmTableCopy,
+ MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind),
+ {dst, src, size, table_dst_index, table_src_index},
+ decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(3);
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- const Value& value, const Value& delta, Value* result) {
- unsupported(decoder, kRefTypes, "table.grow");
+ const Value&, const Value&, Value* result) {
+ LiftoffRegList pinned;
+
+ LiftoffRegister table_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadSmi(table_index_reg, imm.index);
+ LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+
+ LiftoffAssembler::VarState delta = __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
+
+ CallRuntimeStub(
+ WasmCode::kWasmTableGrow,
+ MakeSig::Returns(kSmiKind).Params(kSmiKind, kI32, kTaggedKind),
+ {table_index, delta, value}, decoder->position());
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(2);
+
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ __ SmiUntag(kReturnRegister0);
+ __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value* result) {
- unsupported(decoder, kRefTypes, "table.size");
+ Value*) {
+ // We have to look up instance->tables[table_index].length.
+
+ LiftoffRegList pinned;
+ // Get the number of calls array address.
+ Register tables = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(tables, Tables, pinned);
+
+ Register table = tables;
+ __ LoadTaggedPointer(
+ table, tables, no_reg,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
+
+ int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
+ WasmTableObject::kCurrentLengthOffset + 1;
+
+ Register result = table;
+ __ Load(LiftoffRegister(result), table, no_reg,
+ wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset),
+ length_field_size == 4 ? LoadType::kI32Load : LoadType::kI64Load,
+ pinned);
+
+ __ SmiUntag(result);
+ __ PushRegister(kI32, LiftoffRegister(result));
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- const Value& start, const Value& value, const Value& count) {
- unsupported(decoder, kRefTypes, "table.fill");
+ const Value&, const Value&, const Value&) {
+ LiftoffRegList pinned;
+
+ LiftoffRegister table_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadSmi(table_index_reg, imm.index);
+ LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+
+ LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
+ LiftoffAssembler::VarState start = __ cache_state()->stack_state.end()[-3];
+
+ CallRuntimeStub(WasmCode::kWasmTableFill,
+ MakeSig::Params(kSmiKind, kI32, kI32, kTaggedKind),
+ {table_index, start, count, value}, decoder->position());
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(3);
+
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) {
- WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
- compilation_zone_);
- ValueKind sig_reps[] = {kRef, rtt.type.kind()};
- ValueKindSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
- __ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmAllocateStructWithRtt,
+ MakeSig::Returns(kRef).Params(rtt.type.kind()), {rtt_value},
+ decoder->position());
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
@@ -4386,13 +4856,13 @@ class LiftoffCompiler {
}
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
- ValueKind rtt_type, bool initial_value_on_stack) {
+ ValueKind rtt_kind, bool initial_value_on_stack) {
// Max length check.
{
LiftoffRegister length =
__ LoadToRegister(__ cache_state()->stack_state.end()[-2], {});
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength));
}
@@ -4400,12 +4870,6 @@ class LiftoffCompiler {
int elem_size = element_size_bytes(elem_kind);
// Allocate the array.
{
- WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
- compilation_zone_);
- ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
- ValueKindSig sig(1, 3, sig_reps);
LiftoffAssembler::VarState rtt_var =
__ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState length_var =
@@ -4413,10 +4877,11 @@ class LiftoffCompiler {
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
- __ PrepareBuiltinCall(&sig, call_descriptor,
- {rtt_var, length_var, elem_size_var});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+
+ CallRuntimeStub(WasmCode::kWasmAllocateArrayWithRtt,
+ MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
+ {rtt_var, length_var, elem_size_var},
+ decoder->position());
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
}
@@ -4483,7 +4948,8 @@ class LiftoffCompiler {
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
- LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
+ LiftoffRegister value =
+ __ GetUnusedRegister(reg_class_for(elem_kind), pinned);
LoadObjectField(value, array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
elem_kind, is_signed, pinned);
@@ -4495,6 +4961,8 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ DCHECK_EQ(reg_class_for(imm.array_type->element_type().kind()),
+ value.reg_class());
LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
@@ -4571,23 +5039,19 @@ class LiftoffCompiler {
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
Value* result) {
ValueKind parent_value_kind = parent.type.kind();
- ValueKind rtt_value_type = kRttWithDepth;
- WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
- ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
- ValueKindSig sig(1, 2, sig_reps);
+ ValueKind rtt_value_kind = kRttWithDepth;
LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(type_reg, WasmValue(type_index));
LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
- __ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ CallRuntimeStub(
+ WasmCode::kWasmAllocateRtt,
+ MakeSig::Returns(rtt_value_kind).Params(kI32, parent_value_kind),
+ {type_var, parent_var}, decoder->position());
// Drop the parent RTT.
__ cache_state()->stack_state.pop_back(1);
- __ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(rtt_value_kind, LiftoffRegister(kReturnRegister0));
}
enum NullSucceeds : bool { // --
@@ -4666,17 +5130,11 @@ class LiftoffCompiler {
// Preserve {obj_reg} across the call.
LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
__ PushRegisters(saved_regs);
- WasmCode::RuntimeStubId target = WasmCode::kWasmSubtypeCheck;
- compiler::CallDescriptor* call_descriptor =
- GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
- compilation_zone_);
- ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
- ValueKindSig sig(1, 2, sig_reps);
- LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
- LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
- __ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
- __ CallRuntimeStub(target);
- DefineSafepoint();
+ LiftoffAssembler::VarState rtt_state(kPointerKind, rtt_reg, 0);
+ LiftoffAssembler::VarState tmp1_state(kPointerKind, tmp1, 0);
+ CallRuntimeStub(WasmCode::kWasmSubtypeCheck,
+ MakeSig::Returns(kI32).Params(kOptRef, rtt.type.kind()),
+ {tmp1_state, rtt_state}, decoder->position());
__ PopRegisters(saved_regs);
__ Move(tmp1.gp(), kReturnRegister0, kI32);
__ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
@@ -4708,8 +5166,8 @@ class LiftoffCompiler {
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
- Label* trap_label = AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapIllegalCast);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister obj_reg =
SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
__ PushRegister(obj.type.kind(), obj_reg);
@@ -4729,7 +5187,7 @@ class LiftoffCompiler {
SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
__ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
- BrOrRet(decoder, depth);
+ BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
// Drop the branch's value, restore original value.
@@ -4844,8 +5302,8 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void AbstractTypeCast(const Value& object, FullDecoder* decoder,
ValueKind result_kind) {
- Label* trap_label = AddOutOfLineTrap(decoder->position(),
- WasmCode::kThrowWasmTrapIllegalCast);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
Label match;
LiftoffRegister obj_reg =
(this->*type_checker)(object, trap_label, {}, no_reg);
@@ -4883,7 +5341,7 @@ class LiftoffCompiler {
__ bind(&match);
__ PushRegister(result_kind, obj_reg);
- BrOrRet(decoder, br_depth);
+ BrOrRet(decoder, br_depth, 0);
__ bind(&no_match);
// Drop the branch's value, restore original value.
@@ -4925,7 +5383,7 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
- const Value args[], Value returns[], CallKind call_kind) {
+ const Value args[], Value returns[], TailCall tail_call) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
@@ -4958,9 +5416,9 @@ class LiftoffCompiler {
Register* explicit_instance = &imported_function_ref;
__ PrepareCall(sig, call_descriptor, &target, explicit_instance);
- if (call_kind == kReturnCall) {
+ if (tail_call) {
__ PrepareTailCall(
- static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(call_descriptor->ParameterSlotCount()),
static_cast<int>(
call_descriptor->GetStackParameterDelta(descriptor_)));
__ TailCallIndirect(target);
@@ -4974,10 +5432,10 @@ class LiftoffCompiler {
__ PrepareCall(sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
- if (call_kind == kReturnCall) {
+ if (tail_call) {
DCHECK(descriptor_->CanTailCall(call_descriptor));
__ PrepareTailCall(
- static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(call_descriptor->ParameterSlotCount()),
static_cast<int>(
call_descriptor->GetStackParameterDelta(descriptor_)));
__ TailCallNativeWasmCode(addr);
@@ -4988,19 +5446,18 @@ class LiftoffCompiler {
}
}
- DefineSafepoint();
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
-
- __ FinishCall(sig, call_descriptor);
+ if (!tail_call) {
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ EmitLandingPad(decoder);
+ __ FinishCall(sig, call_descriptor);
+ }
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
- CallKind call_kind) {
+ TailCall tail_call) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- if (imm.table_index != 0) {
- return unsupported(decoder, kRefTypes, "table index != 0");
- }
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5013,10 +5470,23 @@ class LiftoffCompiler {
Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register indirect_function_table = no_reg;
+ if (imm.table_index != 0) {
+ Register indirect_function_tables =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_tables,
+ IndirectFunctionTables, pinned);
+
+ indirect_function_table = indirect_function_tables;
+ __ LoadTaggedPointer(
+ indirect_function_table, indirect_function_tables, no_reg,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_index),
+ pinned);
+ }
// Bounds check against the table size.
- Label* invalid_func_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapTableOutOfBounds);
+ Label* invalid_func_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds);
uint32_t canonical_sig_num =
env_->module->canonicalized_type_ids[imm.sig_index];
@@ -5025,8 +5495,15 @@ class LiftoffCompiler {
// Compare against table size stored in
// {instance->indirect_function_table_size}.
- LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
- pinned);
+ if (imm.table_index == 0) {
+ LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
+ pinned);
+ } else {
+ __ Load(
+ LiftoffRegister(tmp_const), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
+ LoadType::kI32Load, pinned);
+ }
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
@@ -5054,21 +5531,29 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize,
- pinned);
+ if (imm.table_index == 0) {
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds,
+ kSystemPointerSize, pinned);
+ } else {
+ __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kSigIdsOffset),
+ kPointerLoadType, pinned);
+ }
// Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
STATIC_ASSERT((1 << 2) == kInt32Size);
__ emit_i32_shli(index, index, 2);
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
pinned);
+ // TODO(9495): Do not always compare signatures, same as wasm-compiler.cc.
// Compare against expected signature.
__ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
- Label* sig_mismatch_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
- __ emit_cond_jump(kUnequal, sig_mismatch_label, LiftoffAssembler::kIntPtr,
- scratch, tmp_const);
+ Label* sig_mismatch_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch,
+ tmp_const);
// At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call");
@@ -5080,7 +5565,14 @@ class LiftoffCompiler {
// At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]}
- LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
+ if (imm.table_index == 0) {
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
+ } else {
+ __ LoadTaggedPointer(
+ table, indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
+ pinned);
+ }
__ LoadTaggedPointer(tmp_const, table, index,
ObjectAccess::ElementOffsetInTaggedFixedArray(0),
pinned);
@@ -5095,8 +5587,15 @@ class LiftoffCompiler {
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kSystemPointerSize,
- pinned);
+ if (imm.table_index == 0) {
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
+ kSystemPointerSize, pinned);
+ } else {
+ __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kTargetsOffset),
+ kPointerLoadType, pinned);
+ }
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
@@ -5107,9 +5606,9 @@ class LiftoffCompiler {
Register target = scratch;
__ PrepareCall(sig, call_descriptor, &target, explicit_instance);
- if (call_kind == kReturnCall) {
+ if (tail_call) {
__ PrepareTailCall(
- static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(call_descriptor->ParameterSlotCount()),
static_cast<int>(
call_descriptor->GetStackParameterDelta(descriptor_)));
__ TailCallIndirect(target);
@@ -5117,16 +5616,16 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
- }
-
- DefineSafepoint();
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(sig, call_descriptor);
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ EmitLandingPad(decoder);
+ __ FinishCall(sig, call_descriptor);
+ }
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
- const FunctionSig* type_sig, CallKind call_kind) {
+ const FunctionSig* type_sig, TailCall tail_call) {
ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
@@ -5183,11 +5682,10 @@ class LiftoffCompiler {
WasmExportedFunctionData::kInstanceOffset),
pinned);
LiftoffRegister func_index = target;
- __ LoadTaggedSignedAsInt32(
- func_index, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kFunctionIndexOffset),
- pinned);
+ __ LoadSmiAsInt32(func_index, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kFunctionIndexOffset),
+ pinned);
LiftoffRegister imported_function_refs = temp;
__ LoadTaggedPointer(imported_function_refs.gp(), callee_instance.gp(),
no_reg,
@@ -5214,11 +5712,10 @@ class LiftoffCompiler {
WasmInstanceObject::kJumpTableStartOffset),
kPointerLoadType, pinned);
LiftoffRegister jump_table_offset = temp;
- __ LoadTaggedSignedAsInt32(
- jump_table_offset, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kJumpTableOffsetOffset),
- pinned);
+ __ LoadSmiAsInt32(jump_table_offset, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kJumpTableOffsetOffset),
+ pinned);
__ emit_ptrsize_add(target.gp(), jump_table_start.gp(),
jump_table_offset.gp());
__ emit_jump(&perform_call);
@@ -5279,23 +5776,16 @@ class LiftoffCompiler {
LiftoffRegList saved_regs = LiftoffRegList::ForRegs(func_data);
__ PushRegisters(saved_regs);
- WasmCode::RuntimeStubId builtin = WasmCode::kWasmAllocatePair;
- compiler::CallDescriptor* builtin_call_descriptor =
- GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
- compilation_zone_);
- ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
- ValueKindSig builtin_sig(1, 2, sig_reps);
LiftoffRegister current_instance = instance;
__ FillInstanceInto(current_instance.gp());
LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
- __ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor,
- {instance_var, callable_var});
- __ CallRuntimeStub(builtin);
- DefineSafepoint();
+ CallRuntimeStub(WasmCode::kWasmAllocatePair,
+ MakeSig::Returns(kOptRef).Params(kOptRef, kOptRef),
+ {instance_var, callable_var}, decoder->position());
if (instance.gp() != kReturnRegister0) {
- __ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kIntPtr);
+ __ Move(instance.gp(), kReturnRegister0, kPointerKind);
}
// Restore {func_data}, which we saved across the call.
@@ -5317,9 +5807,9 @@ class LiftoffCompiler {
Register target_reg = target.gp();
Register instance_reg = instance.gp();
__ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
- if (call_kind == kReturnCall) {
+ if (tail_call) {
__ PrepareTailCall(
- static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(call_descriptor->ParameterSlotCount()),
static_cast<int>(
call_descriptor->GetStackParameterDelta(descriptor_)));
__ TailCallIndirect(target_reg);
@@ -5327,10 +5817,12 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target_reg);
+
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ EmitLandingPad(decoder);
+ __ FinishCall(sig, call_descriptor);
}
- DefineSafepoint();
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(sig, call_descriptor);
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
@@ -5340,11 +5832,18 @@ class LiftoffCompiler {
pinned);
}
+ void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
+ RootIndex root_index) {
+ LOAD_INSTANCE_FIELD(dst, IsolateRoot, kSystemPointerSize, pinned);
+ uint32_t offset_imm = IsolateData::root_slot_offset(root_index);
+ __ LoadFullPointer(dst, dst, offset_imm);
+ }
+
void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
LiftoffRegList pinned, ValueType type) {
if (!type.is_nullable()) return;
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
@@ -5353,8 +5852,8 @@ class LiftoffCompiler {
void BoundsCheck(FullDecoder* decoder, LiftoffRegister array,
LiftoffRegister index, LiftoffRegList pinned) {
- Label* trap_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds);
LiftoffRegister length = __ GetUnusedRegister(kGpReg, pinned);
constexpr int kLengthOffset =
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
@@ -5372,7 +5871,7 @@ class LiftoffCompiler {
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
int offset, ValueKind kind, bool is_signed,
LiftoffRegList pinned) {
- if (is_reference_type(kind)) {
+ if (is_reference(kind)) {
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
} else {
// Primitive kind.
@@ -5384,7 +5883,7 @@ class LiftoffCompiler {
void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned,
ValueKind kind) {
- if (is_reference_type(kind)) {
+ if (is_reference(kind)) {
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
} else {
// Primitive kind.
@@ -5414,7 +5913,7 @@ class LiftoffCompiler {
return LoadNullValue(reg.gp(), pinned);
case kRtt:
case kRttWithDepth:
- case kStmt:
+ case kVoid:
case kBottom:
case kRef:
UNREACHABLE();
@@ -5448,6 +5947,10 @@ class LiftoffCompiler {
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
+ static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
+ kI32, kI64, kF32, kF64};
+ static constexpr base::EnumSet<ValueKind> kExternRefSupported{
+ kRef, kOptRef, kRtt, kRttWithDepth, kI8, kI16};
LiftoffAssembler asm_;
@@ -5455,6 +5958,8 @@ class LiftoffCompiler {
// Set by the first opcode, reset by the second.
WasmOpcode outstanding_op_ = kNoOutstandingOp;
+ // {supported_types_} is updated in {MaybeBailoutForUnsupportedType}.
+ base::EnumSet<ValueKind> supported_types_ = kUnconditionallySupported;
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
DebugSideTableBuilder* const debug_sidetable_builder_;
@@ -5488,6 +5993,20 @@ class LiftoffCompiler {
// at the first breakable opcode in the function (if compiling for debugging).
bool did_function_entry_break_checks_ = false;
+ // Depth of the current try block.
+ int32_t current_catch_ = -1;
+
+ struct HandlerInfo {
+ MovableLabel handler;
+ int pc_offset;
+ };
+
+ ZoneVector<HandlerInfo> handlers_;
+ int handler_table_offset_ = Assembler::kNoHandlerTable;
+
+ // Current number of exception refs on the stack.
+ int num_exceptions_ = 0;
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -5512,6 +6031,11 @@ class LiftoffCompiler {
__ cache_state()->DefineSafepoint(safepoint);
}
+ void DefineSafepointWithCalleeSavedRegisters() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
+ }
+
Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
Register instance = __ cache_state()->cached_instance;
if (instance == no_reg) {
@@ -5526,6 +6050,13 @@ class LiftoffCompiler {
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
+// static
+constexpr WasmOpcode LiftoffCompiler::kNoOutstandingOp;
+// static
+constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported;
+// static
+constexpr base::EnumSet<ValueKind> LiftoffCompiler::kExternRefSupported;
+
} // namespace
WasmCompilationResult ExecuteLiftoffCompilation(
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 94ba6f783ec..ca715a8a328 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -375,7 +375,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return kind == kS128 || is_reference_type(kind);
+ return kind == kS128 || is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -446,6 +446,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load, pinned);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = MemOperand(src_addr, offset_imm);
+ lw(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -1517,9 +1523,8 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
DCHECK_EQ(kind, kI32);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK(kind == kI32 ||
- (is_reference_type(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ DCHECK(kind == kI32 || (is_reference(kind) && (liftoff_cond == kEqual ||
+ liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
@@ -2019,9 +2024,9 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
bailout(kSimd, "emit_v128_anytrue");
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v8x16_alltrue");
+ bailout(kSimd, "emit_i8x16_alltrue");
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -2095,11 +2100,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_sub_sat_u");
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_mul");
-}
-
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2134,9 +2134,9 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_neg");
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v16x8_alltrue");
+ bailout(kSimd, "emit_i16x8_alltrue");
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -2254,9 +2254,9 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_neg");
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v32x4_alltrue");
+ bailout(kSimd, "emit_i32x4_alltrue");
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -2356,9 +2356,9 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i64x2_neg");
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v64x2_alltrue");
+ bailout(kSimd, "emit_i64x2_alltrue");
}
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
@@ -2948,7 +2948,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
liftoff::Load(this, *next_result_reg, sp, 0, out_argument_kind);
}
@@ -2998,23 +2998,35 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addiu(sp, sp, size);
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
if (src.kind() == kF64) {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
asm_->push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
}
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->push(kScratchReg);
break;
}
- case LiftoffAssembler::VarState::kRegister:
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
@@ -3023,8 +3035,10 @@ void LiftoffStackSlots::Construct() {
liftoff::push(asm_, src.reg(), src.kind());
}
break;
+ }
case LiftoffAssembler::VarState::kIntConst: {
// The high word is the sign extension of the low word.
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->li(kScratchReg,
Operand(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index deb54995b16..a5a9f8ce231 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -362,7 +362,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return kind == kS128 || is_reference_type(kind);
+ return kind == kS128 || is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -431,6 +431,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Ld(dst, src_op);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -1372,7 +1378,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
DCHECK((kind == kI32 || kind == kI64) ||
- (is_reference_type(kind) &&
+ (is_reference(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1983,7 +1989,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
liftoff::EmitAnyTrue(this, dst, src);
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B);
}
@@ -2073,11 +2079,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- mulv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
-}
-
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2113,7 +2114,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
}
@@ -2237,7 +2238,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
}
@@ -2341,7 +2342,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_D);
}
@@ -3115,7 +3116,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
}
@@ -3165,25 +3166,38 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Daddu(sp, sp, size);
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg);
} else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
asm_->push(kScratchReg);
asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg);
}
break;
- case LiftoffAssembler::VarState::kRegister:
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
liftoff::push(asm_, src.reg(), src.kind());
break;
+ }
case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->li(kScratchReg, Operand(src.i32_const()));
asm_->push(kScratchReg);
break;
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 644d392594f..4e99821a27d 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -82,7 +82,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return (kind == kS128 || is_reference_type(kind));
+ return (kind == kS128 || is_reference(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -120,6 +120,11 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ bailout(kUnsupportedArchitecture, "LoadFullPointer");
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -843,9 +848,9 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v64x2_alltrue");
+ bailout(kSimd, "i64x2_alltrue");
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -967,9 +972,9 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v32x4_alltrue");
+ bailout(kSimd, "i32x4_alltrue");
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -1098,9 +1103,9 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v16x8_alltrue");
+ bailout(kSimd, "i16x8_alltrue");
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -1303,9 +1308,9 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
bailout(kSimd, "v8x16_anytrue");
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v8x16_alltrue");
+ bailout(kSimd, "i8x16_alltrue");
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -1721,11 +1726,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16mul");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1801,7 +1801,7 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 2f624f79f5c..47f8ce2125d 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -91,21 +91,21 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->Lw(dst.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
assm->Ld(dst.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
assm->LoadFloat(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->LoadDouble(dst.fp(), src);
break;
default:
@@ -114,22 +114,22 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
MemOperand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
assm->Usw(src.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->Usd(src.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
assm->UStoreFloat(src.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
assm->UStoreDouble(src.fp(), dst);
break;
default:
@@ -137,23 +137,23 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->addi(sp, sp, -kSystemPointerSize);
assm->Sw(reg.gp(), MemOperand(sp, 0));
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->push(reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->addi(sp, sp, -kSystemPointerSize);
assm->StoreFloat(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kF64:
+ case kF64:
assm->addi(sp, sp, -kSystemPointerSize);
assm->StoreDouble(reg.fp(), MemOperand(sp, 0));
break;
@@ -334,18 +334,18 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ switch (kind) {
+ case kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -356,17 +356,17 @@ bool LiftoffAssembler::NeedsAlignment(ValueType type) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64:
+ case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break;
- case ValueType::kF32:
+ case kF32:
TurboAssembler::LoadFPRImmediate(reg.fp(),
value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::LoadFPRImmediate(reg.fp(),
value.to_f64_boxed().get_bits());
break;
@@ -375,21 +375,26 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
- int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK(size == 4 || size == 8);
+ MemOperand src{instance, offset};
if (size == 4) {
- Lw(dst, MemOperand(dst, offset));
+ Lw(dst, src);
} else {
- Ld(dst, MemOperand(dst, offset));
+ Ld(dst, src);
}
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- int32_t offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+ Register instance,
+ int offset) {
+ LoadFromInstance(dst, instance, offset, kTaggedSize);
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -413,12 +418,15 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
+ if (skip_write_barrier) return;
+
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
@@ -595,64 +603,64 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Store(this, fp, offset, src, type);
+ liftoff::Store(this, fp, offset, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- liftoff::Load(this, dst, MemOperand(sp, offset), type);
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Sw(reg.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
- case ValueType::kRttWithDepth:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
Sd(reg.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
StoreFloat(reg.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case ValueType::kS128:
+ case kS128:
bailout(kSimd, "Spill S128");
break;
default:
@@ -664,15 +672,15 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32: {
+ case kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
Sw(tmp.gp(), dst);
break;
}
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kI64:
+ case kRef:
+ case kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
Sd(tmp.gp(), dst);
@@ -685,21 +693,21 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Lw(reg.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ case kI64:
+ case kRef:
+ case kOptRef:
Ld(reg.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
LoadFloat(reg.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
default:
@@ -1206,15 +1214,15 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
+ DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK((type == kWasmI32 || type == kWasmI64) ||
- (type.is_reference_type() &&
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1286,7 +1294,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1329,6 +1337,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_shuffle");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1360,6 +1373,21 @@ void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i64x2_eq");
}
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_splat");
@@ -1532,6 +1560,36 @@ void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f32x4_le");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4.demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_eq");
@@ -1599,9 +1657,9 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
bailout(kSimd, "emit_v128_anytrue");
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v8x16_alltrue");
+ bailout(kSimd, "emit_i8x16_alltrue");
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -1675,11 +1733,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_sub_sat_u");
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_mul");
-}
-
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1709,9 +1762,9 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_neg");
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v16x8_alltrue");
+ bailout(kSimd, "emit_i16x8_alltrue");
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -1819,9 +1872,9 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_neg");
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v32x4_alltrue");
+ bailout(kSimd, "emit_i32x4_alltrue");
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -1911,6 +1964,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i64x2_neg");
}
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i64x2_shl");
@@ -2222,6 +2280,32 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_abs");
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
+
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_abs");
@@ -2403,17 +2487,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
Add64(sp, sp, Operand(-stack_bytes));
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2439,8 +2523,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
}
Add64(sp, sp, Operand(stack_bytes));
@@ -2454,7 +2538,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -2489,18 +2573,39 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Add64(sp, sp, Operand(size));
}
-void LiftoffStackSlots::Construct() {
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
- asm_->push(kScratchReg);
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->push(kScratchReg);
+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ }
break;
- case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.type());
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
break;
+ }
case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->li(kScratchReg, Operand(src.i32_const()));
asm_->push(kScratchReg);
break;
@@ -2508,7 +2613,6 @@ void LiftoffStackSlots::Construct() {
}
}
}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 7bb58877dc3..8560c91553f 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -99,7 +99,25 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = r1;
+ // Push the return address and frame pointer to complete the stack frame.
+ lay(sp, MemOperand(sp, -2 * kSystemPointerSize));
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize));
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize));
+ LoadU64(scratch, MemOperand(fp));
+ StoreU64(scratch, MemOperand(sp));
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize));
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize));
+ }
+
+ // Set the new stack and frame pointer.
+ lay(sp, MemOperand(fp, -stack_param_delta * kSystemPointerSize));
+ Pop(r14, fp);
}
void LiftoffAssembler::AlignFrameSize() {}
@@ -144,7 +162,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return (kind == kS128 || is_reference_type(kind));
+ return (kind == kS128 || is_reference(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -220,6 +238,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ UseScratchRegisterScope temps(this);
+ LoadU64(dst, MemOperand(src_addr, offset_imm), r1);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -412,25 +436,230 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ AtomicExchangeU8(ip, src.gp(), r1, r0);
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r1, src.gp());
+ ShiftRightU32(r1, r1, Operand(16));
+#else
+ LoadU16(r1, src.gp());
+#endif
+ Push(r2);
+ AtomicExchangeU16(ip, r1, r2, r0);
+ Pop(r2);
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r1, src.gp());
+#else
+ LoadU32(r1, src.gp());
+#endif
+ Label do_cs;
+ bind(&do_cs);
+ cs(r0, r1, MemOperand(ip));
+ bne(&do_cs, Label::kNear);
+ break;
+ }
+ case StoreType::kI64Store: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(r1, src.gp());
+#else
+ mov(r1, src.gp());
+#endif
+ Label do_cs;
+ bind(&do_cs);
+ csg(r0, r1, MemOperand(ip));
+ bne(&do_cs, Label::kNear);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label doadd;
+ bind(&doadd);
+ LoadU8(tmp1, MemOperand(ip));
+ AddS32(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &doadd);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label doadd;
+ bind(&doadd);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ AddS32(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ AddS32(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &doadd);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label doadd;
+ bind(&doadd);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ AddS32(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ AddS32(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &doadd);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label doadd;
+ bind(&doadd);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ AddS64(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ AddS64(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &doadd);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ SubS32(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ SubS32(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ SubS32(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ SubS32(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ SubS32(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ SubS64(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ SubS64(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
@@ -455,14 +684,134 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ AtomicExchangeU8(ip, value.gp(), result.gp(), r0);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r1, value.gp());
+ ShiftRightU32(r1, r1, Operand(16));
+#else
+ LoadU16(r1, value.gp());
+#endif
+ AtomicExchangeU16(ip, r1, result.gp(), r0);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#else
+ LoadU16(result.gp(), result.gp());
+#endif
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r1, value.gp());
+#else
+ LoadU32(r1, value.gp());
+#endif
+ Label do_cs;
+ bind(&do_cs);
+ cs(result.gp(), r1, MemOperand(ip));
+ bne(&do_cs, Label::kNear);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
+ LoadU32(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI64Store: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(r1, value.gp());
+#else
+ mov(r1, value.gp());
+#endif
+ Label do_cs;
+ bind(&do_cs);
+ csg(result.gp(), r1, MemOperand(ip));
+ bne(&do_cs, Label::kNear);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ AtomicCmpExchangeU8(ip, result.gp(), expected.gp(), new_value.gp(), r0,
+ r1);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Push(r2, r3);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r2, expected.gp());
+ lrvr(r3, new_value.gp());
+ ShiftRightU32(r2, r2, Operand(16));
+ ShiftRightU32(r3, r3, Operand(16));
+#else
+ LoadU16(r2, expected.gp());
+ LoadU16(r3, new_value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), r2, r3, r0, r1);
+ LoadU16(result.gp(), result.gp());
+ Pop(r2, r3);
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Push(r2, r3);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(r2, expected.gp());
+ lrvr(r3, new_value.gp());
+#else
+ LoadU32(r2, expected.gp());
+ LoadU32(r3, new_value.gp());
+#endif
+ CmpAndSwap(r2, r3, MemOperand(ip));
+ LoadU32(result.gp(), r2);
+ Pop(r2, r3);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Push(r2, r3);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(r2, expected.gp());
+ lrvgr(r3, new_value.gp());
+#else
+ mov(r2, expected.gp());
+ mov(r3, new_value.gp());
+#endif
+ CmpAndSwap64(r2, r3, MemOperand(ip));
+ mov(result.gp(), r2);
+ Pop(r2, r3);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
@@ -484,6 +833,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
case kRef:
case kRtt:
case kOptRef:
+ case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(fp, offset));
break;
@@ -562,6 +912,7 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
case kRef:
case kRtt:
case kOptRef:
+ case kRttWithDepth:
case kI64: {
LoadU64(dst.gp(), MemOperand(sp, offset));
break;
@@ -585,6 +936,12 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
}
}
+#ifdef V8_TARGET_BIG_ENDIAN
+constexpr int stack_bias = -4;
+#else
+constexpr int stack_bias = 0;
+#endif
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
@@ -608,6 +965,9 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
UNREACHABLE();
}
+ dst_offset += (length == 4 ? stack_bias : 0);
+ src_offset += (length == 4 ? stack_bias : 0);
+
if (is_int20(dst_offset)) {
lay(ip, liftoff::GetStackSlot(dst_offset));
} else {
@@ -645,28 +1005,28 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
- MemOperand dst = liftoff::GetStackSlot(offset);
+
switch (kind) {
case kI32:
- StoreU32(reg.gp(), dst);
+ StoreU32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias));
break;
case kI64:
case kOptRef:
case kRef:
case kRtt:
case kRttWithDepth:
- StoreU64(reg.gp(), dst);
+ StoreU64(reg.gp(), liftoff::GetStackSlot(offset));
break;
case kF32:
- StoreF32(reg.fp(), dst);
+ StoreF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias));
break;
case kF64:
- StoreF64(reg.fp(), dst);
+ StoreF64(reg.fp(), liftoff::GetStackSlot(offset));
break;
case kS128: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- StoreV128(reg.fp(), dst, scratch);
+ StoreV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
break;
}
default:
@@ -676,23 +1036,18 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
- MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
Register src = no_reg;
- if (!is_uint12(abs(dst.offset()))) {
- src = GetUnusedRegister(kGpReg, {}).gp();
- } else {
- src = temps.Acquire();
- }
+ src = ip;
switch (value.type().kind()) {
case kI32: {
mov(src, Operand(value.to_i32()));
- StoreU32(src, dst);
+ StoreU32(src, liftoff::GetStackSlot(offset + stack_bias));
break;
}
case kI64: {
mov(src, Operand(value.to_i64()));
- StoreU64(src, dst);
+ StoreU64(src, liftoff::GetStackSlot(offset));
break;
}
default:
@@ -702,27 +1057,27 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
- MemOperand src = liftoff::GetStackSlot(offset);
switch (kind) {
case kI32:
- LoadS32(reg.gp(), src);
+ LoadS32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias));
break;
case kI64:
case kRef:
case kOptRef:
case kRtt:
- LoadU64(reg.gp(), src);
+ case kRttWithDepth:
+ LoadU64(reg.gp(), liftoff::GetStackSlot(offset));
break;
case kF32:
- LoadF32(reg.fp(), src);
+ LoadF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias));
break;
case kF64:
- LoadF64(reg.fp(), src);
+ LoadF64(reg.fp(), liftoff::GetStackSlot(offset));
break;
case kS128: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- LoadV128(reg.fp(), src, scratch);
+ LoadV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
break;
}
default:
@@ -787,114 +1142,133 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define LFR_TO_REG(reg) reg.gp()
-// V(name, instr, dtype, stype, dcast, scast, rcast)
-#define UNOP_LIST(V) \
- V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE) \
- V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE) \
- V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, USE) \
- V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE) \
- V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE) \
- V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE) \
- V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE) \
- V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE) \
- V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE)
-
-#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast) \
- void LiftoffAssembler::emit_##name(dtype dst, stype src) { \
+// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
+#define UNOP_LIST(V) \
+ V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
+ V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, true, bool) \
+ V(u32_to_intptr, LoadU32, Register, Register, , , USE, , void) \
+ V(i32_signextend_i8, lbr, Register, Register, , , USE, , void) \
+ V(i32_signextend_i16, lhr, Register, Register, , , USE, , void) \
+ V(i64_signextend_i8, lgbr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i64_signextend_i16, lghr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i64_signextend_i32, LoadS32, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
+ V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
+ V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
+
+#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
+ ret, return_type) \
+ return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
auto _dst = dcast(dst); \
auto _src = scast(src); \
instr(_dst, _src); \
rcast(_dst); \
+ return ret; \
}
UNOP_LIST(EMIT_UNOP_FUNCTION)
#undef EMIT_UNOP_FUNCTION
#undef UNOP_LIST
-// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast)
-#define BINOP_LIST(V) \
- V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE) \
- V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, SIGN_EXT) \
- V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, SIGN_EXT) \
- V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, SIGN_EXT) \
- V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, SIGN_EXT) \
- V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, SIGN_EXT) \
- V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, SIGN_EXT) \
- V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
- V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
- V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT) \
- V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT) \
- V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT) \
- V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT) \
- V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT) \
- V(i32_and, And, Register, Register, Register, , , , SIGN_EXT) \
- V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT) \
- V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT) \
- V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT) \
- V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
- V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE) \
- V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE) \
- V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE) \
- V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE) \
- V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE) \
- V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE) \
- V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE) \
- V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE) \
- V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE) \
- V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE)
+// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
+// return_val, return_type)
+#define BINOP_LIST(V) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
+ void) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
+ void) \
+ V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
+ V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
+ V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
+ V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i32_and, And, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
- scast2, rcast) \
- void LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, stype2 rhs) { \
+ scast2, rcast, ret, return_type) \
+ return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
+ stype2 rhs) { \
auto _dst = dcast(dst); \
auto _lhs = scast1(lhs); \
auto _rhs = scast2(rhs); \
instr(_dst, _lhs, _rhs); \
rcast(_dst); \
+ return ret; \
}
BINOP_LIST(EMIT_BINOP_FUNCTION)
@@ -905,17 +1279,6 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
-bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "i32_popcnt");
- return true;
-}
-
-bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
- LiftoffRegister src) {
- Popcnt64(dst.gp(), src.gp());
- return true;
-}
-
bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_POS_INF, dst, src);
return true;
@@ -1128,48 +1491,281 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
-#ifdef V8_TARGET_ARCH_S390X
- bailout(kUnsupportedArchitecture, "emit_u32_to_intptr");
-#else
-// This is a nop on s390.
-#endif
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MovDoubleToInt64(r0, lhs);
+ // Clear sign bit in {r0}.
+ AndP(r0, Operand(~kF64SignBit));
+
+ MovDoubleToInt64(scratch2, rhs);
+ // Isolate sign bit in {scratch2}.
+ AndP(scratch2, Operand(kF64SignBit));
+ // Combine {scratch2} into {r0}.
+ OrP(r0, r0, scratch2);
+ MovInt64ToDouble(dst, r0);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MovDoubleToInt64(r0, lhs);
+ // Clear sign bit in {r0}.
+ AndP(r0, Operand(~kF64SignBit));
+
+ MovDoubleToInt64(scratch2, rhs);
+ // Isolate sign bit in {scratch2}.
+ AndP(scratch2, Operand(kF64SignBit));
+ // Combine {scratch2} into {r0}.
+ OrP(r0, r0, scratch2);
+ MovInt64ToDouble(dst, r0);
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- bailout(kUnsupportedArchitecture, "emit_type_conversion");
- return true;
-}
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ lgfr(dst.gp(), src.gp());
+ return true;
+ case kExprI32SConvertF32: {
+ ConvertFloat32ToInt32(dst.gp(), src.fp(),
+ kRoundToZero); // f32 -> i32 round to zero.
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ ConvertDoubleToInt32(dst.gp(), src.fp());
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ ConvertDoubleToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI32SConvertSatF32: {
+ Label done, src_is_nan;
+ lzer(kScratchDoubleReg);
+ cebr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
-void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
-}
+ // source is a finite number
+ ConvertFloat32ToInt32(dst.gp(), src.fp(),
+ kRoundToZero); // f32 -> i32 round to zero.
+ b(&done);
-void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
-}
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
-void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
-}
+ bind(&done);
+ return true;
+ }
+ case kExprI32UConvertSatF32: {
+ Label done, src_is_nan;
+ lzer(kScratchDoubleReg);
+ cebr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
-void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
-}
+ // source is a finite number
+ ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
+ b(&done);
-void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
-}
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32SConvertSatF64: {
+ Label done, src_is_nan;
+ lzdr(kScratchDoubleReg, r0);
+ cdbr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ ConvertDoubleToInt32(dst.gp(), src.fp());
+ b(&done);
+
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32UConvertSatF64: {
+ Label done, src_is_nan;
+ lzdr(kScratchDoubleReg, r0);
+ cdbr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ ConvertDoubleToUnsignedInt32(dst.gp(), src.fp());
+ b(&done);
+
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ lgdr(dst.gp(), src.fp());
+ srlg(dst.gp(), dst.gp(), Operand(32));
+ return true;
+ case kExprI64SConvertI32:
+ LoadS32(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ llgfr(dst.gp(), src.gp());
+ return true;
+ case kExprI64ReinterpretF64:
+ lgdr(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ ConvertIntToFloat(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ ConvertUnsignedIntToFloat(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF32ConvertF64:
+ ledbr(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32: {
+ sllg(r0, src.gp(), Operand(32));
+ ldgr(dst.fp(), r0);
+ return true;
+ }
+ case kExprF64SConvertI32: {
+ ConvertIntToDouble(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ ConvertUnsignedIntToDouble(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF64ConvertF32:
+ ldebr(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ ldgr(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI64:
+ ConvertInt64ToDouble(dst.fp(), src.gp());
+ return true;
+ case kExprF64UConvertI64:
+ ConvertUnsignedInt64ToDouble(dst.fp(), src.gp());
+ return true;
+ case kExprI64SConvertF32: {
+ ConvertFloat32ToInt64(dst.gp(), src.fp()); // f32 -> i64 round to zero.
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ ConvertFloat32ToUnsignedInt64(dst.gp(),
+ src.fp()); // f32 -> i64 round to zero.
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprF32SConvertI64:
+ ConvertInt64ToFloat(dst.fp(), src.gp());
+ return true;
+ case kExprF32UConvertI64:
+ ConvertUnsignedInt64ToFloat(dst.fp(), src.gp());
+ return true;
+ case kExprI64SConvertF64: {
+ ConvertDoubleToInt64(dst.gp(), src.fp()); // f64 -> i64 round to zero.
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ ConvertDoubleToUnsignedInt64(dst.gp(),
+ src.fp()); // f64 -> i64 round to zero.
+ b(Condition(1), trap);
+ return true;
+ }
+ case kExprI64SConvertSatF32: {
+ Label done, src_is_nan;
+ lzer(kScratchDoubleReg);
+ cebr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ // source is a finite number
+ ConvertFloat32ToInt64(dst.gp(), src.fp()); // f32 -> i64 round to zero.
+ b(&done);
+
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64UConvertSatF32: {
+ Label done, src_is_nan;
+ lzer(kScratchDoubleReg);
+ cebr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ // source is a finite number
+ ConvertFloat32ToUnsignedInt64(dst.gp(),
+ src.fp()); // f32 -> i64 round to zero.
+ b(&done);
+
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64SConvertSatF64: {
+ Label done, src_is_nan;
+ lzdr(kScratchDoubleReg, r0);
+ cdbr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ ConvertDoubleToInt64(dst.gp(), src.fp()); // f64 -> i64 round to zero.
+ b(&done);
+
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64UConvertSatF64: {
+ Label done, src_is_nan;
+ lzdr(kScratchDoubleReg, r0);
+ cdbr(src.fp(), kScratchDoubleReg);
+ b(Condition(1), &src_is_nan);
+
+ ConvertDoubleToUnsignedInt64(dst.gp(),
+ src.fp()); // f64 -> i64 round to zero.
+ b(&done);
-void LiftoffAssembler::emit_jump(Label* label) {
- bailout(kUnsupportedArchitecture, "emit_jump");
+ bind(&src_is_nan);
+ lghi(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ default:
+ UNREACHABLE();
+ }
}
+void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
+
+void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
+
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
@@ -1213,11 +1809,23 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
- bailout(kUnsupportedArchitecture, "emit_i32_cond_jumpi");
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ CmpS32(lhs, Operand(imm));
+ b(cond, label);
}
+#define EMIT_EQZ(test, src) \
+ { \
+ Label done; \
+ test(r0, src); \
+ mov(dst, Operand(1)); \
+ beq(&done); \
+ mov(dst, Operand(0)); \
+ bind(&done); \
+ }
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_eqz");
+ EMIT_EQZ(ltr, src);
}
#define EMIT_SET_CONDITION(dst, cond) \
@@ -1243,7 +1851,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_eqz");
+ EMIT_EQZ(ltgr, src.gp());
}
void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
@@ -1545,9 +2153,9 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v64x2_alltrue");
+ bailout(kSimd, "i64x2_alltrue");
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1669,9 +2277,9 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v32x4_alltrue");
+ bailout(kSimd, "i32x4_alltrue");
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -1800,9 +2408,9 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v16x8_alltrue");
+ bailout(kSimd, "i16x8_alltrue");
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -2011,9 +2619,9 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
bailout(kSimd, "v8x16_anytrue");
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v8x16_alltrue");
+ bailout(kSimd, "i8x16_alltrue");
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -2081,11 +2689,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16mul");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2435,34 +3038,52 @@ void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- bailout(kUnsupportedArchitecture, "StackCheck");
+ LoadU64(limit_address, MemOperand(limit_address));
+ CmpU64(sp, limit_address);
+ b(le, ool_code);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
+ PrepareCallCFunction(0, 0, no_reg);
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- bailout(kUnsupportedArchitecture, "AssertUnreachable");
+ // Asserts unreachable within the wasm code.
+ TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
+ Drop(num_stack_slots);
+ Ret();
}
void LiftoffAssembler::CallC(const ValueKindSig* sig,
@@ -2470,51 +3091,230 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, 8);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ lay(sp, MemOperand(sp, -kStackPageSize));
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ lay(sp, MemOperand(sp, -size));
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes));
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes));
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes));
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mov(r2, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, no_reg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r2;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ lay(sp, MemOperand(sp, total_size));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- bailout(kUnsupportedArchitecture, "CallIndirect");
+ DCHECK(target != no_reg);
+ Call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
- bailout(kUnsupportedArchitecture, "TailCallIndirect");
+ DCHECK(target != no_reg);
+ Jump(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- bailout(kUnsupportedArchitecture, "CallRuntimeStub");
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ lay(sp, MemOperand(sp, -size));
+ TurboAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
-}
-
-void LiftoffStackSlots::Construct() {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
-}
-
-void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64_copysign");
-}
-
-void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32_copysign");
+ lay(sp, MemOperand(sp, size));
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_));
+ asm_->lay(sp, MemOperand(sp, -kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp));
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchDoubleReg);
+ break;
+ }
+ case kS128: {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadV128(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), scratch);
+ asm_->lay(sp, MemOperand(sp, -kSimd128Size));
+ asm_->StoreV128(kScratchDoubleReg, MemOperand(sp), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->lay(sp, MemOperand(sp, -kSystemPointerSize));
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp));
+ break;
+ case kF64:
+ asm_->push(src.reg().fp());
+ break;
+ case kS128: {
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->lay(sp, MemOperand(sp, -kSimd128Size));
+ asm_->StoreV128(src.reg().fp(), MemOperand(sp), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d2c757ec08e..e8a57bafca1 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -9,6 +9,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
+#include "src/codegen/x64/register-x64.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
@@ -119,6 +120,12 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
case kI64:
assm->movq(dst, src.gp());
break;
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->StoreTaggedField(dst, src.gp());
+ break;
case kF32:
assm->Movss(dst, src.fp());
break;
@@ -133,24 +140,26 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind,
+ int padding = 0) {
switch (kind) {
case kI32:
case kI64:
case kRef:
case kOptRef:
+ assm->AllocateStackSpace(padding);
assm->pushq(reg.gp());
break;
case kF32:
- assm->AllocateStackSpace(kSystemPointerSize);
+ assm->AllocateStackSpace(kSystemPointerSize + padding);
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kF64:
- assm->AllocateStackSpace(kSystemPointerSize);
+ assm->AllocateStackSpace(kSystemPointerSize + padding);
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
case kS128:
- assm->AllocateStackSpace(kSystemPointerSize * 2);
+ assm->AllocateStackSpace(kSystemPointerSize * 2 + padding);
assm->Movdqu(Operand(rsp, 0), reg.fp());
break;
default:
@@ -244,12 +253,11 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
- return is_reference_type(kind) ? kSystemPointerSize
- : element_size_bytes(kind);
+ return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
- return is_reference_type(kind);
+ return is_reference(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -331,6 +339,13 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
LoadTaggedPointerField(dst, src_op);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ Operand src_op = liftoff::GetMemOp(this, src_addr, no_reg,
+ static_cast<uint32_t>(offset_imm));
+ movq(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -816,7 +831,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
if (kind == kI32) {
movl(dst, src);
} else {
- DCHECK(kI64 == kind || is_reference_type(kind));
+ DCHECK(kI64 == kind || is_reference(kind));
movq(dst, src);
}
}
@@ -2723,21 +2738,21 @@ void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
// Different register alias requirements depending on CpuFeatures supported:
if (CpuFeatures::IsSupported(AVX)) {
// 1. AVX, no requirements.
- I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
} else if (CpuFeatures::IsSupported(SSE4_2)) {
// 2. SSE4_2, dst == lhs.
if (dst != lhs) {
- movdqa(dst.fp(), lhs.fp());
+ movaps(dst.fp(), lhs.fp());
}
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp());
+ I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), kScratchDoubleReg);
} else {
// 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
- // macro-assembler uses kScratchDoubleReg, so don't use it.
- I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
+ kScratchDoubleReg);
movaps(dst.fp(), liftoff::kScratchDoubleReg2);
} else {
- I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
}
}
@@ -2747,24 +2762,24 @@ void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
// Different register alias requirements depending on CpuFeatures supported:
if (CpuFeatures::IsSupported(AVX)) {
// 1. AVX, no requirements.
- I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
} else if (CpuFeatures::IsSupported(SSE4_2)) {
// 2. SSE4_2, dst != lhs.
if (dst == lhs) {
- // macro-assembler uses kScratchDoubleReg, so don't use it.
- I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
- movdqa(dst.fp(), liftoff::kScratchDoubleReg2);
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
+ kScratchDoubleReg);
+ movaps(dst.fp(), liftoff::kScratchDoubleReg2);
} else {
- I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
} else {
// 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
- // macro-assembler uses kScratchDoubleReg, so don't use it.
- I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
+ kScratchDoubleReg);
movaps(dst.fp(), liftoff::kScratchDoubleReg2);
} else {
- I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
}
}
@@ -2862,14 +2877,14 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- // Ensure that we don't overwrite any inputs with the movdqu below.
+ // Ensure that we don't overwrite any inputs with the movaps below.
DCHECK_NE(dst, src1);
DCHECK_NE(dst, src2);
if (!CpuFeatures::IsSupported(AVX) && dst != mask) {
- movdqu(dst.fp(), mask.fp());
- S128Select(dst.fp(), dst.fp(), src1.fp(), src2.fp());
+ movaps(dst.fp(), mask.fp());
+ S128Select(dst.fp(), dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
} else {
- S128Select(dst.fp(), mask.fp(), src1.fp(), src2.fp());
+ S128Select(dst.fp(), mask.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
}
@@ -2889,7 +2904,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
liftoff::EmitAnyTrue(this, dst, src);
}
-void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
}
@@ -2918,7 +2933,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
} else {
if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- pand(dst.fp(), kScratchDoubleReg);
+ andps(dst.fp(), kScratchDoubleReg);
}
subq(kScratchRegister, Immediate(8));
Movq(tmp_simd.fp(), kScratchRegister);
@@ -3027,68 +3042,6 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
rhs);
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kS128);
- LiftoffRegister tmp =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- vpsrlw(tmp.fp(), lhs.fp(), 8);
- vpsrlw(kScratchDoubleReg, rhs.fp(), 8);
- // t = I16x8Mul(t0, t1)
- //    => __PP __PP ...  __PP  __PP
- vpmullw(tmp.fp(), tmp.fp(), kScratchDoubleReg);
- // s = left * 256
- vpsllw(kScratchDoubleReg, lhs.fp(), 8);
- // dst = I16x8Mul(left * 256, right)
- //    => pp__ pp__ ...  pp__  pp__
- vpmullw(dst.fp(), kScratchDoubleReg, rhs.fp());
- // dst = I16x8Shr(dst, 8)
- //    => 00pp 00pp ...  00pp  00pp
- vpsrlw(dst.fp(), dst.fp(), 8);
- // t = I16x8Shl(t, 8)
- //    => PP00 PP00 ...  PP00  PP00
- vpsllw(tmp.fp(), tmp.fp(), 8);
- // dst = I16x8Or(dst, t)
- //    => PPpp PPpp ...  PPpp  PPpp
- vpor(dst.fp(), dst.fp(), tmp.fp());
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- // I16x8 view of I8x16
- // left = AAaa AAaa ... AAaa AAaa
- // right= BBbb BBbb ... BBbb BBbb
- // t = 00AA 00AA ... 00AA 00AA
- // s = 00BB 00BB ... 00BB 00BB
- movaps(tmp.fp(), dst.fp());
- movaps(kScratchDoubleReg, rhs.fp());
- psrlw(tmp.fp(), 8);
- psrlw(kScratchDoubleReg, 8);
- // dst = left * 256
- psllw(dst.fp(), 8);
- // t = I16x8Mul(t, s)
- //    => __PP __PP ...  __PP  __PP
- pmullw(tmp.fp(), kScratchDoubleReg);
- // dst = I16x8Mul(left * 256, right)
- //    => pp__ pp__ ...  pp__  pp__
- pmullw(dst.fp(), rhs.fp());
- // t = I16x8Shl(t, 8)
- //    => PP00 PP00 ...  PP00  PP00
- psllw(tmp.fp(), 8);
- // dst = I16x8Shr(dst, 8)
- //    => 00pp 00pp ...  00pp  00pp
- psrlw(dst.fp(), 8);
- // dst = I16x8Or(dst, t)
- //    => PPpp PPpp ...  PPpp  PPpp
- por(dst.fp(), tmp.fp());
- }
-}
-
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3128,7 +3081,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
}
@@ -3269,28 +3222,27 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true, /*is_signed=*/true);
+ I16x8ExtMulLow(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg,
+ /*is_signed=*/true);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
- /*is_signed=*/false);
+ I16x8ExtMulLow(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg,
+ /*is_signed=*/false);
}
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
- /*is_signed=*/true);
+ I16x8ExtMulHighS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
- /*is_signed=*/false);
+ I16x8ExtMulHighU(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
@@ -3310,7 +3262,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
}
@@ -3431,16 +3383,16 @@ inline void I32x4ExtMulHelper(LiftoffAssembler* assm, XMMRegister dst,
bool is_signed) {
// I32x4ExtMul requires dst == src1 if AVX is not supported.
if (CpuFeatures::IsSupported(AVX) || dst == src1) {
- assm->I32x4ExtMul(dst, src1, src2, low, is_signed);
+ assm->I32x4ExtMul(dst, src1, src2, kScratchDoubleReg, low, is_signed);
} else if (dst != src2) {
// dst != src1 && dst != src2
assm->movaps(dst, src1);
- assm->I32x4ExtMul(dst, dst, src2, low, is_signed);
+ assm->I32x4ExtMul(dst, dst, src2, kScratchDoubleReg, low, is_signed);
} else {
// dst == src2
// Extended multiplication is commutative,
assm->movaps(dst, src2);
- assm->I32x4ExtMul(dst, dst, src1, low, is_signed);
+ assm->I32x4ExtMul(dst, dst, src1, kScratchDoubleReg, low, is_signed);
}
}
} // namespace liftoff
@@ -3484,11 +3436,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
vpsubq(dst.fp(), reg, src.fp());
} else {
psubq(reg, src.fp());
- if (dst.fp() != reg) movapd(dst.fp(), reg);
+ if (dst.fp() != reg) movaps(dst.fp(), reg);
}
}
-void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
}
@@ -3567,27 +3519,28 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true, /*is_signed=*/true);
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg, /*low=*/true,
+ /*is_signed=*/true);
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg, /*low=*/true,
/*is_signed=*/false);
}
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg, /*low=*/false,
/*is_signed=*/true);
}
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg, /*low=*/false,
/*is_signed=*/false);
}
@@ -3613,7 +3566,7 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
LiftoffRegister src) {
- I64x2UConvertI32x4High(dst.fp(), src.fp());
+ I64x2UConvertI32x4High(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
@@ -3865,13 +3818,13 @@ void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
vminpd(dst.fp(), rhs.fp(), lhs.fp());
} else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movapd(kScratchDoubleReg, src);
+ movaps(kScratchDoubleReg, src);
minpd(kScratchDoubleReg, dst.fp());
minpd(dst.fp(), src);
} else {
- movapd(kScratchDoubleReg, lhs.fp());
+ movaps(kScratchDoubleReg, lhs.fp());
minpd(kScratchDoubleReg, rhs.fp());
- movapd(dst.fp(), rhs.fp());
+ movaps(dst.fp(), rhs.fp());
minpd(dst.fp(), lhs.fp());
}
// propagate -0's and NaNs, which may be non-canonical.
@@ -3893,13 +3846,13 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
} else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movapd(kScratchDoubleReg, src);
+ movaps(kScratchDoubleReg, src);
maxpd(kScratchDoubleReg, dst.fp());
maxpd(dst.fp(), src);
} else {
- movapd(kScratchDoubleReg, lhs.fp());
+ movaps(kScratchDoubleReg, lhs.fp());
maxpd(kScratchDoubleReg, rhs.fp());
- movapd(dst.fp(), rhs.fp());
+ movaps(dst.fp(), rhs.fp());
maxpd(dst.fp(), lhs.fp());
}
// Find discrepancies.
@@ -3954,7 +3907,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
movaps(kScratchDoubleReg, src.fp());
cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- pand(dst.fp(), kScratchDoubleReg);
+ andps(dst.fp(), kScratchDoubleReg);
}
// Set top bit if >= 0 (but not -0.0!).
Pxor(kScratchDoubleReg, dst.fp());
@@ -4080,7 +4033,7 @@ void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8UConvertI8x16High(dst.fp(), src.fp());
+ I16x8UConvertI8x16High(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
@@ -4100,7 +4053,7 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4UConvertI16x8High(dst.fp(), src.fp());
+ I32x4UConvertI16x8High(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
@@ -4151,7 +4104,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- I64x2Abs(dst.fp(), src.fp());
+ I64x2Abs(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -4415,7 +4368,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_kind != kStmt) {
+ if (out_argument_kind != kVoid) {
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_kind);
}
@@ -4471,22 +4424,32 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size));
}
-void LiftoffStackSlots::Construct() {
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_;
+ DCHECK_LT(0, stack_decrement);
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
if (src.kind() == kI32) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
} else if (src.kind() == kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
// Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
} else {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized
// bytes), because f32 and f64 values are clearly distinguished in
@@ -4494,10 +4457,13 @@ void LiftoffStackSlots::Construct() {
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
}
break;
- case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.kind());
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed = src.kind() == kS128 ? kSimd128Size : kSystemPointerSize;
+ liftoff::push(asm_, src.reg(), src.kind(), stack_decrement - pushed);
break;
+ }
case LiftoffAssembler::VarState::kIntConst:
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->pushq(Immediate(src.i32_const()));
break;
}
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 7d78e34a03d..3af5afaee37 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -35,6 +35,7 @@
#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -1211,7 +1212,7 @@ namespace {
class SignatureHelper : public i::AllStatic {
public:
// Use an invalid type as a marker separating params and results.
- static constexpr i::wasm::ValueType kMarker = i::wasm::kWasmStmt;
+ static constexpr i::wasm::ValueType kMarker = i::wasm::kWasmVoid;
static i::Handle<i::PodArray<i::wasm::ValueType>> Serialize(
i::Isolate* isolate, FuncType* type) {
@@ -1426,7 +1427,7 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
UNIMPLEMENTED();
case i::wasm::kI8:
case i::wasm::kI16:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
case i::wasm::kBottom:
UNREACHABLE();
break;
@@ -1467,7 +1468,7 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
UNIMPLEMENTED();
case i::wasm::kI8:
case i::wasm::kI16:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
case i::wasm::kBottom:
UNREACHABLE();
break;
@@ -1732,7 +1733,7 @@ auto Global::get() const -> Val {
UNIMPLEMENTED();
case i::wasm::kI8:
case i::wasm::kI16:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
case i::wasm::kBottom:
UNREACHABLE();
}
diff --git a/deps/v8/src/wasm/c-api.h b/deps/v8/src/wasm/c-api.h
index 426806f1d20..0dba237d301 100644
--- a/deps/v8/src/wasm/c-api.h
+++ b/deps/v8/src/wasm/c-api.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_C_API_H_
#define V8_WASM_C_API_H_
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index b6d22160ca9..6fd5ad5f9f6 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_CODE_SPACE_ACCESS_H_
#define V8_WASM_CODE_SPACE_ACCESS_H_
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 49ab7c8fe77..987180c83f6 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_COMPILATION_ENVIRONMENT_H_
#define V8_WASM_COMPILATION_ENVIRONMENT_H_
@@ -64,9 +68,11 @@ struct CompilationEnv {
const LowerSimd lower_simd;
- static constexpr uint32_t kMaxMemoryPagesAtRuntime =
- std::min(kV8MaxWasmMemoryPages,
- std::numeric_limits<uintptr_t>::max() / kWasmPageSize);
+ // We assume that memories of size >= half of the virtual address space
+ // cannot be allocated (see https://crbug.com/1201340).
+ static constexpr uint32_t kMaxMemoryPagesAtRuntime = std::min(
+ kV8MaxWasmMemoryPages,
+ (uintptr_t{1} << (kSystemPointerSize == 4 ? 31 : 63)) / kWasmPageSize);
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index fbd3be5dcf9..c181c8df874 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_DECODER_H_
#define V8_WASM_DECODER_H_
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 6c9700b1005..d37f7186818 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
#define V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
@@ -480,7 +484,7 @@ struct ImmF64Immediate {
template <Decoder::ValidateFlag validate>
struct GlobalIndexImmediate {
uint32_t index;
- ValueType type = kWasmStmt;
+ ValueType type = kWasmVoid;
const WasmGlobal* global = nullptr;
uint32_t length;
@@ -514,7 +518,7 @@ struct SelectTypeImmediate {
template <Decoder::ValidateFlag validate>
struct BlockTypeImmediate {
uint32_t length = 1;
- ValueType type = kWasmStmt;
+ ValueType type = kWasmVoid;
uint32_t sig_index = 0;
const FunctionSig* sig = nullptr;
@@ -552,7 +556,7 @@ struct BlockTypeImmediate {
return static_cast<uint32_t>(sig->parameter_count());
}
uint32_t out_arity() const {
- if (type == kWasmStmt) return 0;
+ if (type == kWasmVoid) return 0;
if (type != kWasmBottom) return 1;
return static_cast<uint32_t>(sig->return_count());
}
@@ -562,7 +566,7 @@ struct BlockTypeImmediate {
}
ValueType out_type(uint32_t index) {
if (type == kWasmBottom) return sig->GetReturn(index);
- DCHECK_NE(kWasmStmt, type);
+ DCHECK_NE(kWasmVoid, type);
DCHECK_EQ(0, index);
return type;
}
@@ -910,7 +914,7 @@ struct PcForErrors<Decoder::kFullValidation> {
// An entry on the value stack.
template <Decoder::ValidateFlag validate>
struct ValueBase : public PcForErrors<validate> {
- ValueType type = kWasmStmt;
+ ValueType type = kWasmVoid;
ValueBase(const byte* pc, ValueType type)
: PcForErrors<validate>(pc), type(type) {}
@@ -1046,7 +1050,7 @@ struct ControlBase : public PcForErrors<validate> {
F(RefFunc, uint32_t function_index, Value* result) \
F(RefAsNonNull, const Value& arg, Value* result) \
F(Drop) \
- F(DoReturn) \
+ F(DoReturn, uint32_t drop_values) \
F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
F(LocalTee, const Value& value, Value* result, \
@@ -1059,11 +1063,11 @@ struct ControlBase : public PcForErrors<validate> {
const TableIndexImmediate<validate>& imm) \
F(TableSet, const Value& index, const Value& value, \
const TableIndexImmediate<validate>& imm) \
- F(Unreachable) \
+ F(Trap, TrapReason reason) \
F(NopForTestingUnsupportedInLiftoff) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
- F(BrOrRet, uint32_t depth) \
+ F(BrOrRet, uint32_t depth, uint32_t drop_values) \
F(BrIf, const Value& cond, uint32_t depth) \
F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
F(Else, Control* if_block) \
@@ -1075,8 +1079,6 @@ struct ControlBase : public PcForErrors<validate> {
F(LoadLane, LoadType type, const Value& value, const Value& index, \
const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
Value* result) \
- F(Prefetch, const MemoryAccessImmediate<validate>& imm, const Value& index, \
- bool temporal) \
F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
const Value& index, const Value& value) \
F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
@@ -1155,6 +1157,7 @@ struct ControlBase : public PcForErrors<validate> {
F(RttSub, uint32_t type_index, const Value& parent, Value* result) \
F(RefTest, const Value& obj, const Value& rtt, Value* result) \
F(RefCast, const Value& obj, const Value& rtt, Value* result) \
+ F(AssertNull, const Value& obj, Value* result) \
F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
uint32_t depth) \
F(RefIsData, const Value& object, Value* result) \
@@ -1880,9 +1883,7 @@ class WasmDecoder : public Decoder {
return length;
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
return length + 1;
- FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- case kExprPrefetchT:
- case kExprPrefetchNT: {
+ FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE) {
MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX,
kConservativelyAssumeMemory64);
@@ -1955,7 +1956,10 @@ class WasmDecoder : public Decoder {
ArrayIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
- case kExprBrOnCast: {
+ case kExprBrOnCast:
+ case kExprBrOnData:
+ case kExprBrOnFunc:
+ case kExprBrOnI31: {
BranchDepthImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
@@ -1967,12 +1971,15 @@ class WasmDecoder : public Decoder {
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
- return length;
+ case kExprRefAsData:
+ case kExprRefAsFunc:
+ case kExprRefAsI31:
+ case kExprRefIsData:
+ case kExprRefIsFunc:
+ case kExprRefIsI31:
case kExprRefTest:
- case kExprRefCast: {
- TypeIndexImmediate<validate> ht(decoder, pc + length);
- return length + ht.length;
- }
+ case kExprRefCast:
+ return length;
default:
// This is unreachable except for malformed modules.
if (validate) {
@@ -2176,17 +2183,16 @@ MemoryAccessImmediate<validate>::MemoryAccessImmediate(
: MemoryAccessImmediate(decoder, pc, max_alignment,
decoder->module_->is_memory64) {}
-#define CALL_INTERFACE(name, ...) interface_.name(this, ##__VA_ARGS__)
-#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
+#define CALL_INTERFACE_IF_OK_AND_REACHABLE(name, ...) \
do { \
DCHECK(!control_.empty()); \
- DCHECK_EQ(current_code_reachable_, \
+ DCHECK_EQ(current_code_reachable_and_ok_, \
this->ok() && control_.back().reachable()); \
- if (current_code_reachable_) { \
+ if (current_code_reachable_and_ok_) { \
interface_.name(this, ##__VA_ARGS__); \
} \
} while (false)
-#define CALL_INTERFACE_IF_PARENT_REACHABLE(name, ...) \
+#define CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(name, ...) \
do { \
DCHECK(!control_.empty()); \
if (VALIDATE(this->ok()) && \
@@ -2199,7 +2205,8 @@ template <Decoder::ValidateFlag validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
- using ArgVector = base::SmallVector<Value, 8>;
+ using ArgVector = Vector<Value>;
+ using ReturnVector = base::SmallVector<Value, 2>;
// All Value types should be trivially copyable for performance. We push, pop,
// and store them in local variables.
@@ -2227,6 +2234,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t params_count = static_cast<uint32_t>(this->num_locals());
uint32_t locals_length;
this->DecodeLocals(this->pc(), &locals_length, params_count);
+ if (this->failed()) return TraceFailed();
this->consume_bytes(locals_length);
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->local_type(index).is_defaultable())) {
@@ -2237,7 +2245,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- CALL_INTERFACE(StartFunction);
+ // Cannot use CALL_INTERFACE_* macros because control is empty.
+ interface().StartFunction(this);
DecodeFunctionBody();
if (this->failed()) return TraceFailed();
@@ -2250,7 +2259,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
return TraceFailed();
}
- CALL_INTERFACE(FinishFunction);
+ // Cannot use CALL_INTERFACE_* macros because control is empty.
+ interface().FinishFunction(this);
if (this->failed()) return TraceFailed();
TRACE("wasm-decode ok\n\n");
@@ -2310,7 +2320,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* current = &control_.back();
if (current->reachable()) {
current->reachability = kSpecOnlyReachable;
- current_code_reachable_ = false;
+ current_code_reachable_and_ok_ = false;
}
}
@@ -2328,7 +2338,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Controls whether code should be generated for the current block (basically
// a cache for {ok() && control_.back().reachable()}).
- bool current_code_reachable_ = true;
+ bool current_code_reachable_and_ok_ = true;
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmBottom};
@@ -2342,12 +2352,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
- bool CheckSimdPostMvp(WasmOpcode opcode) {
- if (!FLAG_wasm_simd_post_mvp && WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
+ bool CheckSimdFeatureFlagOpcode(WasmOpcode opcode) {
+ if (!FLAG_experimental_wasm_relaxed_simd &&
+ WasmOpcodes::IsRelaxedSimdOpcode(opcode)) {
this->DecodeError(
- "simd opcode not available, enable with --wasm-simd-post-mvp");
+ "simd opcode not available, enable with --experimental-relaxed-simd");
return false;
}
+
return true;
}
@@ -2452,7 +2464,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("Invalid opcode 0x%x", opcode);
return 0;
}
- CALL_INTERFACE_IF_REACHABLE(NopForTestingUnsupportedInLiftoff);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(NopForTestingUnsupportedInLiftoff);
return 1;
}
@@ -2465,10 +2477,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PopArgs(imm.sig);
- Control* block = PushControl(kControlBlock);
+ ArgVector args = PeekArgs(imm.sig);
+ Control* block = PushControl(kControlBlock, 0, args.length());
SetBlockType(block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Block, block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Block, block);
+ DropArgs(imm.sig);
PushMergeValues(block, &block->start_merge);
return 1 + imm.length;
}
@@ -2482,7 +2495,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("rethrow not targeting catch or catch-all");
return 0;
}
- CALL_INTERFACE_IF_REACHABLE(Rethrow, c);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
return 1 + imm.length;
}
@@ -2491,8 +2504,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(eh);
ExceptionIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
+ ArgVector args = PeekArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Throw, imm, VectorOf(args));
+ DropArgs(imm.exception->ToFunctionSig());
EndControl();
return 1 + imm.length;
}
@@ -2502,10 +2516,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PopArgs(imm.sig);
- Control* try_block = PushControl(kControlTry);
+ ArgVector args = PeekArgs(imm.sig);
+ Control* try_block = PushControl(kControlTry, 0, args.length());
SetBlockType(try_block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Try, try_block);
+ DropArgs(imm.sig);
PushMergeValues(try_block, &try_block->start_merge);
return 1 + imm.length;
}
@@ -2528,19 +2543,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("catch after unwind for try");
return 0;
}
- c->kind = kControlTryCatch;
FallThruTo(c);
+ c->kind = kControlTryCatch;
+ // TODO(jkummerow): Consider moving the stack manipulation after the
+ // INTERFACE call for consistency.
DCHECK_LE(stack_ + c->stack_depth, stack_end_);
stack_end_ = stack_ + c->stack_depth;
c->reachability = control_at(1)->innerReachability();
const WasmExceptionSig* sig = imm.exception->sig;
EnsureStackSpace(static_cast<int>(sig->parameter_count()));
for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(sig->GetParam(i));
+ Push(CreateValue(sig->GetParam(i)));
}
Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchException, imm, c, values);
- current_code_reachable_ = this->ok() && c->reachable();
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
+ current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1 + imm.length;
}
@@ -2562,13 +2579,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
if (target->is_try_catch() || target->is_try_catchall() ||
- target->is_try_catchall()) {
+ target->is_try_unwind()) {
this->DecodeError(
"cannot delegate inside the catch handler of the target");
+ return 0;
}
FallThruTo(c);
- CALL_INTERFACE_IF_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
- current_code_reachable_ = this->ok() && control_.back().reachable();
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
+ current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
EndControl();
PopControl(c);
return 1 + imm.length;
@@ -2592,10 +2610,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
FallThruTo(c);
c->kind = kControlTryCatchAll;
- stack_end_ = stack_ + c->stack_depth;
c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchAll, c);
- current_code_reachable_ = this->ok() && c->reachable();
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
+ stack_end_ = stack_ + c->stack_depth;
+ current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1;
}
@@ -2614,10 +2632,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
FallThruTo(c);
c->kind = kControlTryUnwind;
- stack_end_ = stack_ + c->stack_depth;
c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchAll, c);
- current_code_reachable_ = this->ok() && c->reachable();
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
+ stack_end_ = stack_ + c->stack_depth;
+ current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1;
}
@@ -2625,36 +2643,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
- Value ref_object = Pop(0);
+ Value ref_object = Peek(0, 0);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
switch (ref_object.type.kind()) {
case kBottom:
- // We are in a polymorphic stack. No need to push an additional bottom
- // value.
+ // We are in a polymorphic stack. Leave the stack as it is.
DCHECK(check_result != kReachableBranch);
break;
- case kRef: {
- // Simply forward the popped argument to the result.
- Value* result = Push(ref_object.type);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(Forward, ref_object, result);
- }
+ case kRef:
+ // For a non-nullable value, we won't take the branch, and can leave
+ // the stack as it is.
break;
- }
case kOptRef: {
if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_REACHABLE(BrOnNull, ref_object, imm.depth);
- Value* result =
- Push(ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnNull, ref_object, imm.depth);
+ Value result = CreateValue(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
// The result of br_on_null has the same value as the argument (but a
// non-nullable type).
- CALL_INTERFACE(Forward, ref_object, result);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Forward, ref_object, &result);
c->br_merge()->reached = true;
+ Drop(ref_object);
+ Push(result);
} else {
// Even in non-reachable code, we need to push a value of the correct
// type to the stack.
- Push(ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ Drop(ref_object);
+ Push(CreateValue(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable)));
}
break;
}
@@ -2679,14 +2696,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
ArgVector let_local_values =
- PopArgs(static_cast<uint32_t>(imm.in_arity()),
- VectorOf(this->local_types_.data(), new_locals_count));
- ArgVector args = PopArgs(imm.sig);
- Control* let_block = PushControl(kControlLet, new_locals_count);
+ PeekArgs(static_cast<uint32_t>(imm.in_arity()),
+ VectorOf(this->local_types_.data(), new_locals_count));
+ ArgVector args = PeekArgs(imm.sig, new_locals_count);
+ Control* let_block = PushControl(kControlLet, new_locals_count,
+ let_local_values.length() + args.length());
SetBlockType(let_block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Block, let_block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Block, let_block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AllocateLocals,
+ VectorOf(let_local_values));
+ Drop(new_locals_count); // Drop {let_local_values}.
+ DropArgs(imm.sig); // Drop {args}.
PushMergeValues(let_block, &let_block->start_merge);
- CALL_INTERFACE_IF_REACHABLE(AllocateLocals, VectorOf(let_local_values));
return 1 + imm.length + locals_length;
}
@@ -2694,10 +2715,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PopArgs(imm.sig);
- Control* block = PushControl(kControlLoop);
+ ArgVector args = PeekArgs(imm.sig);
+ Control* block = PushControl(kControlLoop, 0, args.length());
SetBlockType(&control_.back(), imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Loop, block);
+ DropArgs(imm.sig);
PushMergeValues(block, &block->start_merge);
return 1 + imm.length;
}
@@ -2706,12 +2728,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value cond = Pop(0, kWasmI32);
- ArgVector args = PopArgs(imm.sig);
+ Value cond = Peek(0, 0, kWasmI32);
+ ArgVector args = PeekArgs(imm.sig, 1);
if (!VALIDATE(this->ok())) return 0;
- Control* if_block = PushControl(kControlIf);
+ Control* if_block = PushControl(kControlIf, 0, 1 + args.length());
SetBlockType(if_block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(If, cond, if_block);
+ Drop(cond);
+ DropArgs(imm.sig); // Drop {args}.
PushMergeValues(if_block, &if_block->start_merge);
return 1 + imm.length;
}
@@ -2729,11 +2753,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
if (!TypeCheckFallThru()) return 0;
c->kind = kControlIfElse;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
- current_code_reachable_ = this->ok() && c->reachable();
+ current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1;
}
@@ -2756,22 +2780,23 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Emulate catch-all + re-throw.
FallThruTo(c);
c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchAll, c);
- current_code_reachable_ = this->ok() && control_.back().reachable();
- CALL_INTERFACE_IF_REACHABLE(Rethrow, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
+ current_code_reachable_and_ok_ =
+ this->ok() && control_.back().reachable();
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
}
if (c->is_try_unwind()) {
// Unwind implicitly rethrows at the end.
- CALL_INTERFACE_IF_REACHABLE(Rethrow, c);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
}
if (c->is_let()) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DeallocateLocals, c->locals_count);
this->local_types_.erase(this->local_types_.begin(),
this->local_types_.begin() + c->locals_count);
this->num_locals_ -= c->locals_count;
- CALL_INTERFACE_IF_REACHABLE(DeallocateLocals, c->locals_count);
}
if (!TypeCheckFallThru()) return 0;
@@ -2793,17 +2818,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Select) {
- Value cond = Pop(2, kWasmI32);
- Value fval = Pop(1);
- Value tval = Pop(0, fval.type);
+ Value cond = Peek(0, 2, kWasmI32);
+ Value fval = Peek(1, 1);
+ Value tval = Peek(2, 0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
- if (!VALIDATE(!type.is_reference_type())) {
+ if (!VALIDATE(!type.is_reference())) {
this->DecodeError(
"select without type is only valid for value type inputs");
return 0;
}
- Value* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ Value result = CreateValue(type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Select, cond, fval, tval, &result);
+ Drop(3);
+ Push(result);
return 1;
}
@@ -2812,11 +2839,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
this->module_);
if (this->failed()) return 0;
- Value cond = Pop(2, kWasmI32);
- Value fval = Pop(1, imm.type);
- Value tval = Pop(0, imm.type);
- Value* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ Value cond = Peek(0, 2, kWasmI32);
+ Value fval = Peek(1, 1, imm.type);
+ Value tval = Peek(2, 0, imm.type);
+ Value result = CreateValue(imm.type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Select, cond, fval, tval, &result);
+ Drop(3);
+ Push(result);
return 1 + imm.length;
}
@@ -2824,9 +2853,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false, 0);
if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_REACHABLE(BrOrRet, imm.depth);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOrRet, imm.depth, 0);
c->br_merge()->reached = true;
}
EndControl();
@@ -2836,20 +2865,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(BrIf) {
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
- Value cond = Pop(0, kWasmI32);
+ Value cond = Peek(0, 0, kWasmI32);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_REACHABLE(BrIf, cond, imm.depth);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
}
+ Drop(cond);
return 1 + imm.length;
}
DECODE(BrTable) {
BranchTableImmediate<validate> imm(this, this->pc_ + 1);
BranchTableIterator<validate> iterator(this, imm);
- Value key = Pop(0, kWasmI32);
+ Value key = Peek(0, 0, kWasmI32);
if (this->failed()) return 0;
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
@@ -2881,35 +2911,36 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- if (!VALIDATE(TypeCheckBrTable(result_types))) return 0;
+ if (!VALIDATE(TypeCheckBrTable(result_types, 1))) return 0;
DCHECK(this->ok());
- if (current_code_reachable_) {
- CALL_INTERFACE(BrTable, imm, key);
+ if (current_code_reachable_and_ok_) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrTable, imm, key);
for (int i = 0, e = control_depth(); i < e; ++i) {
if (!br_targets[i]) continue;
control_at(i)->br_merge()->reached = true;
}
}
-
+ Drop(key);
EndControl();
return 1 + iterator.length();
}
DECODE(Return) {
- if (V8_LIKELY(current_code_reachable_)) {
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
if (!VALIDATE(TypeCheckReturn())) return 0;
DoReturn();
} else {
- // We pop all return values from the stack to check their type.
+ // We inspect all return values from the stack to check their type.
// Since we deal with unreachable code, we do not have to keep the
// values.
int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = num_returns - 1; i >= 0; --i) {
- Pop(i, this->sig_->GetReturn(i));
+ for (int i = num_returns - 1, depth = 0; i >= 0; --i, ++depth) {
+ Peek(depth, i, this->sig_->GetReturn(i));
}
+ Drop(num_returns);
}
EndControl();
@@ -2917,36 +2948,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Unreachable) {
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap, TrapReason::kTrapUnreachable);
EndControl();
return 1;
}
DECODE(I32Const) {
ImmI32Immediate<validate> imm(this, this->pc_ + 1);
- Value* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
+ Value value = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I32Const, &value, imm.value);
+ Push(value);
return 1 + imm.length;
}
DECODE(I64Const) {
ImmI64Immediate<validate> imm(this, this->pc_ + 1);
- Value* value = Push(kWasmI64);
- CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
+ Value value = CreateValue(kWasmI64);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I64Const, &value, imm.value);
+ Push(value);
return 1 + imm.length;
}
DECODE(F32Const) {
ImmF32Immediate<validate> imm(this, this->pc_ + 1);
- Value* value = Push(kWasmF32);
- CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
+ Value value = CreateValue(kWasmF32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(F32Const, &value, imm.value);
+ Push(value);
return 1 + imm.length;
}
DECODE(F64Const) {
ImmF64Immediate<validate> imm(this, this->pc_ + 1);
- Value* value = Push(kWasmF64);
- CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
+ Value value = CreateValue(kWasmF64);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(F64Const, &value, imm.value);
+ Push(value);
return 1 + imm.length;
}
@@ -2956,25 +2991,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->module_);
if (!VALIDATE(this->ok())) return 0;
ValueType type = ValueType::Ref(imm.type, kNullable);
- Value* value = Push(type);
- CALL_INTERFACE_IF_REACHABLE(RefNull, type, value);
+ Value value = CreateValue(type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefNull, type, &value);
+ Push(value);
return 1 + imm.length;
}
DECODE(RefIsNull) {
CHECK_PROTOTYPE_OPCODE(reftypes);
- Value value = Pop(0);
- Value* result = Push(kWasmI32);
+ Value value = Peek(0, 0);
+ Value result = CreateValue(kWasmI32);
switch (value.type.kind()) {
case kOptRef:
- CALL_INTERFACE_IF_REACHABLE(UnOp, kExprRefIsNull, value, result);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, kExprRefIsNull, value,
+ &result);
+ Drop(value);
+ Push(result);
return 1;
case kBottom:
// We are in unreachable code, the return value does not matter.
case kRef:
// For non-nullable references, the result is always false.
- CALL_INTERFACE_IF_REACHABLE(Drop);
- CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
+ Drop(value);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I32Const, &result, 0);
+ Push(result);
return 1;
default:
if (validate) {
@@ -2992,26 +3033,27 @@ class WasmFullDecoder : public WasmDecoder<validate> {
HeapType heap_type(this->enabled_.has_typed_funcref()
? this->module_->functions[imm.index].sig_index
: HeapType::kFunc);
- Value* value = Push(ValueType::Ref(heap_type, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
+ Value value = CreateValue(ValueType::Ref(heap_type, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefFunc, imm.index, &value);
+ Push(value);
return 1 + imm.length;
}
DECODE(RefAsNonNull) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- Value value = Pop(0);
+ Value value = Peek(0, 0);
switch (value.type.kind()) {
case kBottom:
// We are in unreachable code. Forward the bottom value.
- case kRef: {
- Value* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(Forward, value, result);
+ case kRef:
+ // A non-nullable value can remain as-is.
return 1;
- }
case kOptRef: {
- Value* result =
- Push(ValueType::Ref(value.type.heap_type(), kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
+ Value result =
+ CreateValue(ValueType::Ref(value.type.heap_type(), kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefAsNonNull, value, &result);
+ Drop(value);
+ Push(result);
return 1;
}
default:
@@ -3025,39 +3067,46 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(LocalGet) {
LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value* value = Push(this->local_type(imm.index));
- CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
+ Value value = CreateValue(this->local_type(imm.index));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalGet, &value, imm);
+ Push(value);
return 1 + imm.length;
}
DECODE(LocalSet) {
LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value value = Pop(0, this->local_type(imm.index));
- CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
+ Value value = Peek(0, 0, this->local_type(imm.index));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalSet, value, imm);
+ Drop(value);
return 1 + imm.length;
}
DECODE(LocalTee) {
LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value value = Pop(0, this->local_type(imm.index));
- Value* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
+ ValueType local_type = this->local_type(imm.index);
+ Value value = Peek(0, 0, local_type);
+ Value result = CreateValue(local_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalTee, value, &result, imm);
+ Drop(value);
+ Push(result);
return 1 + imm.length;
}
DECODE(Drop) {
- Pop(0);
- CALL_INTERFACE_IF_REACHABLE(Drop);
+ Peek(0, 0);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
+ Drop(1);
return 1;
}
DECODE(GlobalGet) {
GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
+ Value result = CreateValue(imm.type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(GlobalGet, &result, imm);
+ Push(result);
return 1 + imm.length;
}
@@ -3068,8 +3117,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("immutable global #%u cannot be assigned", imm.index);
return 0;
}
- Value value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
+ Value value = Peek(0, 0, imm.type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(GlobalSet, value, imm);
+ Drop(value);
return 1 + imm.length;
}
@@ -3077,9 +3127,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(reftypes);
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value index = Pop(0, kWasmI32);
- Value* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
+ Value index = Peek(0, 0, kWasmI32);
+ Value result = CreateValue(this->module_->tables[imm.index].type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableGet, index, &result, imm);
+ Drop(index);
+ Push(result);
return 1 + imm.length;
}
@@ -3087,9 +3139,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(reftypes);
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value value = Pop(1, this->module_->tables[imm.index].type);
- Value index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
+ Value value = Peek(0, 1, this->module_->tables[imm.index].type);
+ Value index = Peek(1, 0, kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableSet, index, value, imm);
+ Drop(2);
return 1 + imm.length;
}
@@ -3130,13 +3183,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(MemoryGrow) {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->DecodeError("grow_memory is not supported for asmjs modules");
- return 0;
- }
- Value value = Pop(0, kWasmI32);
- Value* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
+ // This opcode will not be emitted by the asm translator.
+ DCHECK_EQ(kWasmOrigin, this->module_->origin);
+ ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value value = Peek(0, 0, mem_type);
+ Value result = CreateValue(mem_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryGrow, value, &result);
+ Drop(value);
+ Push(result);
return 1 + imm.length;
}
@@ -3144,28 +3198,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
ValueType result_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
- Value* result = Push(result_type);
- CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ Value result = CreateValue(result_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(CurrentMemoryPages, &result);
+ Push(result);
return 1 + imm.length;
}
DECODE(CallFunction) {
CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PopArgs(imm.sig);
- Value* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
+ ArgVector args = PeekArgs(imm.sig);
+ ReturnVector returns = CreateReturnValues(imm.sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(CallDirect, imm, args.begin(),
+ returns.begin());
+ DropArgs(imm.sig);
+ PushReturns(returns);
return 1 + imm.length;
}
DECODE(CallIndirect) {
CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value index = Pop(0, kWasmI32);
- ArgVector args = PopArgs(imm.sig);
- Value* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
- returns);
+ Value index = Peek(0, 0, kWasmI32);
+ ArgVector args = PeekArgs(imm.sig, 1);
+ ReturnVector returns = CreateReturnValues(imm.sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(CallIndirect, index, imm, args.begin(),
+ returns.begin());
+ Drop(index);
+ DropArgs(imm.sig);
+ PushReturns(returns);
return 1 + imm.length;
}
@@ -3178,8 +3239,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"tail call return types mismatch");
return 0;
}
- ArgVector args = PopArgs(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
+ ArgVector args = PeekArgs(imm.sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ReturnCall, imm, args.begin());
+ DropArgs(imm.sig);
EndControl();
return 1 + imm.length;
}
@@ -3194,54 +3256,61 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"tail call return types mismatch");
return 0;
}
- Value index = Pop(0, kWasmI32);
- ArgVector args = PopArgs(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm, args.begin());
+ Value index = Peek(0, 0, kWasmI32);
+ ArgVector args = PeekArgs(imm.sig, 1);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ReturnCallIndirect, index, imm,
+ args.begin());
+ Drop(index);
+ DropArgs(imm.sig);
EndControl();
return 1 + imm.length;
}
DECODE(CallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- Value func_ref = Pop(0);
+ Value func_ref = Peek(0, 0);
ValueType func_type = func_ref.type;
if (func_type == kWasmBottom) {
// We are in unreachable code, maintain the polymorphic stack.
return 1;
}
- if (!VALIDATE(func_type.is_object_reference_type() &&
- func_type.has_index() &&
+ if (!VALIDATE(func_type.is_object_reference() && func_type.has_index() &&
this->module_->has_signature(func_type.ref_index()))) {
PopTypeError(0, func_ref, "function reference");
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
- ArgVector args = PopArgs(sig);
- Value* returns = PushReturns(sig);
- CALL_INTERFACE_IF_REACHABLE(CallRef, func_ref, sig, func_type.ref_index(),
- args.begin(), returns);
+ ArgVector args = PeekArgs(sig, 1);
+ ReturnVector returns = CreateReturnValues(sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(CallRef, func_ref, sig,
+ func_type.ref_index(), args.begin(),
+ returns.begin());
+ Drop(func_ref);
+ DropArgs(sig);
+ PushReturns(returns);
return 1;
}
DECODE(ReturnCallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
CHECK_PROTOTYPE_OPCODE(return_call);
- Value func_ref = Pop(0);
+ Value func_ref = Peek(0, 0);
ValueType func_type = func_ref.type;
if (func_type == kWasmBottom) {
// We are in unreachable code, maintain the polymorphic stack.
return 1;
}
- if (!VALIDATE(func_type.is_object_reference_type() &&
- func_type.has_index() &&
+ if (!VALIDATE(func_type.is_object_reference() && func_type.has_index() &&
this->module_->has_signature(func_type.ref_index()))) {
PopTypeError(0, func_ref, "function reference");
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
- ArgVector args = PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCallRef, func_ref, sig,
- func_type.ref_index(), args.begin());
+ ArgVector args = PeekArgs(sig, 1);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ReturnCallRef, func_ref, sig,
+ func_type.ref_index(), args.begin());
+ Drop(func_ref);
+ DropArgs(sig);
EndControl();
return 1;
}
@@ -3260,7 +3329,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Simd) {
CHECK_PROTOTYPE_OPCODE(simd);
- if (!CheckHardwareSupportsSimd()) {
+ if (!CheckHardwareSupportsSimd() && !FLAG_wasm_simd_ssse3_codegen) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on missing Wasm SIMD support");
}
@@ -3272,7 +3341,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->pc_, &opcode_length);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
- if (!CheckSimdPostMvp(full_opcode)) {
+ if (!CheckSimdFeatureFlagOpcode(full_opcode)) {
return 0;
}
return DecodeSimdOpcode(full_opcode, opcode_length);
@@ -3418,7 +3487,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
[&](uint32_t i) {
return Value{this->pc_, this->sig_->GetReturn(i)};
});
- CALL_INTERFACE(StartFunctionBody, c);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
}
// Decode the function body.
@@ -3430,9 +3499,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
EnsureStackSpace(1);
uint8_t first_byte = *this->pc_;
WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
- CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
- OpcodeHandler handler = GetOpcodeHandler(first_byte);
- int len = (*handler)(this, opcode);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
+ int len;
+ // Allowing two of the most common decoding functions to get inlined
+ // appears to be the sweet spot.
+ // Handling _all_ opcodes via a giant switch-statement has been tried
+ // and found to be slower than calling through the handler table.
+ if (opcode == kExprLocalGet) {
+ len = WasmFullDecoder::DecodeLocalGet(this, opcode);
+ } else if (opcode == kExprI32Const) {
+ len = WasmFullDecoder::DecodeI32Const(this, opcode);
+ } else {
+ OpcodeHandler handler = GetOpcodeHandler(first_byte);
+ len = (*handler)(this, opcode);
+ }
this->pc_ += len;
}
@@ -3446,9 +3526,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* current = &control_.back();
DCHECK_LE(stack_ + current->stack_depth, stack_end_);
stack_end_ = stack_ + current->stack_depth;
- CALL_INTERFACE_IF_REACHABLE(EndControl, current);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
- current_code_reachable_ = false;
+ current_code_reachable_and_ok_ = false;
}
template <typename func>
@@ -3476,45 +3556,89 @@ class WasmFullDecoder : public WasmDecoder<validate> {
[args](uint32_t i) { return args[i]; });
}
- // Pops arguments as required by signature.
- V8_INLINE ArgVector PopArgs(const FunctionSig* sig) {
+ V8_INLINE void EnsureStackArguments(int count) {
+ uint32_t limit = control_.back().stack_depth;
+ if (stack_size() >= count + limit) return;
+ EnsureStackArguments_Slow(count, limit);
+ }
+
+ V8_NOINLINE void EnsureStackArguments_Slow(int count, uint32_t limit) {
+ if (!VALIDATE(control_.back().unreachable())) {
+ int index = count - stack_size() - 1;
+ NotEnoughArgumentsError(index);
+ }
+ // Silently create unreachable values out of thin air. Since we push them
+ // onto the stack, while conceptually we should be inserting them under
+ // any existing elements, we have to avoid validation failures that would
+ // be caused by finding non-unreachable values in the wrong slot, so we
+ // replace the entire current scope's values.
+ Drop(static_cast<int>(stack_size() - limit));
+ EnsureStackSpace(count + limit - stack_size());
+ while (stack_size() < count + limit) {
+ Push(UnreachableValue(this->pc_));
+ }
+ }
+
+ // Peeks arguments as required by signature.
+ V8_INLINE ArgVector PeekArgs(const FunctionSig* sig, int depth = 0) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
- ArgVector args(count);
- for (int i = count - 1; i >= 0; --i) {
- args[i] = Pop(i, sig->GetParam(i));
+ if (count == 0) return {};
+ EnsureStackArguments(depth + count);
+ ArgVector args(stack_value(depth + count), count);
+ for (int i = 0; i < count; i++) {
+ ValidateArgType(args, i, sig->GetParam(i));
}
return args;
}
+ V8_INLINE void DropArgs(const FunctionSig* sig) {
+ int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
+ Drop(count);
+ }
- V8_INLINE ArgVector PopArgs(const StructType* type) {
+ V8_INLINE ArgVector PeekArgs(const StructType* type, int depth = 0) {
int count = static_cast<int>(type->field_count());
- ArgVector args(count);
- for (int i = count - 1; i >= 0; i--) {
- args[i] = Pop(i, type->field(i).Unpacked());
+ if (count == 0) return {};
+ EnsureStackArguments(depth + count);
+ ArgVector args(stack_value(depth + count), count);
+ for (int i = 0; i < count; i++) {
+ ValidateArgType(args, i, type->field(i).Unpacked());
}
return args;
}
+ V8_INLINE void DropArgs(const StructType* type) {
+ Drop(static_cast<int>(type->field_count()));
+ }
- V8_INLINE ArgVector PopArgs(uint32_t base_index,
- Vector<ValueType> arg_types) {
- ArgVector args(arg_types.size());
- for (int i = static_cast<int>(arg_types.size()) - 1; i >= 0; i--) {
- args[i] = Pop(base_index + i, arg_types[i]);
+ V8_INLINE ArgVector PeekArgs(uint32_t base_index,
+ Vector<ValueType> arg_types) {
+ int size = static_cast<int>(arg_types.size());
+ EnsureStackArguments(size);
+ ArgVector args(stack_value(size), arg_types.size());
+ for (int i = 0; i < size; i++) {
+ ValidateArgType(args, i, arg_types[i]);
}
return args;
}
ValueType GetReturnType(const FunctionSig* sig) {
DCHECK_GE(1, sig->return_count());
- return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
+ return sig->return_count() == 0 ? kWasmVoid : sig->GetReturn();
}
- Control* PushControl(ControlKind kind, uint32_t locals_count = 0) {
+ // TODO(jkummerow): Consider refactoring control stack management so
+ // that {drop_values} is never needed. That would require decoupling
+ // creation of the Control object from setting of its stack depth.
+ Control* PushControl(ControlKind kind, uint32_t locals_count = 0,
+ uint32_t drop_values = 0) {
DCHECK(!control_.empty());
Reachability reachability = control_.back().innerReachability();
- control_.emplace_back(kind, locals_count, stack_size(), this->pc_,
+ // In unreachable code, we may run out of stack.
+ uint32_t stack_depth =
+ stack_size() >= drop_values ? stack_size() - drop_values : 0;
+ stack_depth = std::max(stack_depth, control_.back().stack_depth);
+ control_.emplace_back(kind, locals_count, stack_depth, this->pc_,
reachability);
- current_code_reachable_ = this->ok() && reachability == kReachable;
+ current_code_reachable_and_ok_ = this->ok() && reachability == kReachable;
return &control_.back();
}
@@ -3523,7 +3647,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_LT(1, control_.size());
DCHECK_EQ(c, &control_.back());
- CALL_INTERFACE_IF_PARENT_REACHABLE(PopControl, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(PopControl, c);
// A loop just leaves the values on the stack.
if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
@@ -3534,7 +3658,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
- current_code_reachable_ = control_.back().reachable();
+ current_code_reachable_and_ok_ = control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 1) {
@@ -3542,9 +3666,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
- Value index = Pop(0, index_type);
- Value* result = Push(type.value_type());
- CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
+ Value index = Peek(0, 0, index_type);
+ Value result = CreateValue(type.value_type());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadMem, type, imm, index, &result);
+ Drop(index);
+ Push(result);
return prefix_len + imm.length;
}
@@ -3557,10 +3683,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
- Value index = Pop(0, index_type);
- Value* result = Push(kWasmS128);
- CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
- result);
+ Value index = Peek(0, 0, index_type);
+ Value result = CreateValue(kWasmS128);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadTransform, type, transform, imm,
+ index, &result);
+ Drop(index);
+ Push(result);
return opcode_length + imm.length;
}
@@ -3571,12 +3699,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SimdLaneImmediate<validate> lane_imm(
this, this->pc_ + opcode_length + mem_imm.length);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
- Value v128 = Pop(1, kWasmS128);
- Value index = Pop(0, kWasmI32);
-
- Value* result = Push(kWasmS128);
- CALL_INTERFACE_IF_REACHABLE(LoadLane, type, v128, index, mem_imm,
- lane_imm.lane, result);
+ Value v128 = Peek(0, 1, kWasmS128);
+ Value index = Peek(1, 0, kWasmI32);
+
+ Value result = CreateValue(kWasmS128);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadLane, type, v128, index, mem_imm,
+ lane_imm.lane, &result);
+ Drop(2);
+ Push(result);
return opcode_length + mem_imm.length + lane_imm.length;
}
@@ -3588,11 +3718,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SimdLaneImmediate<validate> lane_imm(
this, this->pc_ + opcode_length + mem_imm.length);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
- Value v128 = Pop(1, kWasmS128);
- Value index = Pop(0, kWasmI32);
+ Value v128 = Peek(0, 1, kWasmS128);
+ Value index = Peek(1, 0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(StoreLane, type, mem_imm, index, v128,
- lane_imm.lane);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreLane, type, mem_imm, index, v128,
+ lane_imm.lane);
+ Drop(2);
return opcode_length + mem_imm.length + lane_imm.length;
}
@@ -3600,10 +3731,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
- Value value = Pop(1, store.value_type());
+ Value value = Peek(0, 1, store.value_type());
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
- Value index = Pop(0, index_type);
- CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
+ Value index = Peek(1, 0, index_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreMem, store, imm, index, value);
+ Drop(2);
return prefix_len + imm.length;
}
@@ -3659,11 +3791,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
- bool TypeCheckBrTable(const std::vector<ValueType>& result_types) {
+ bool TypeCheckBrTable(const std::vector<ValueType>& result_types,
+ uint32_t drop_values) {
int br_arity = static_cast<int>(result_types.size());
if (V8_LIKELY(!control_.back().unreachable())) {
int available =
static_cast<int>(stack_size()) - control_.back().stack_depth;
+ available -= std::min(available, static_cast<int>(drop_values));
// There have to be enough values on the stack.
if (!VALIDATE(available >= br_arity)) {
this->DecodeError(
@@ -3671,7 +3805,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
br_arity, startrel(control_.back().pc()), available);
return false;
}
- Value* stack_values = stack_end_ - br_arity;
+ Value* stack_values = stack_end_ - br_arity - drop_values;
// Type-check the topmost br_arity values on the stack.
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
@@ -3683,16 +3817,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
} else { // !control_.back().reachable()
- // Pop values from the stack, accoring to the expected signature.
- for (int i = 0; i < br_arity; ++i) Pop(i + 1, result_types[i]);
+ // Type-check the values on the stack.
+ for (int i = 0; i < br_arity; ++i) {
+ Peek(i + drop_values, i + 1, result_types[i]);
+ }
}
return this->ok();
}
uint32_t SimdConstOp(uint32_t opcode_length) {
Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
- auto* result = Push(kWasmS128);
- CALL_INTERFACE_IF_REACHABLE(S128Const, imm, result);
+ Value result = CreateValue(kWasmS128);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(S128Const, imm, &result);
+ Push(result);
return opcode_length + kSimd128Size;
}
@@ -3700,10 +3837,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t opcode_length) {
SimdLaneImmediate<validate> imm(this, this->pc_ + opcode_length);
if (this->Validate(this->pc_ + opcode_length, opcode, imm)) {
- Value inputs[] = {Pop(0, kWasmS128)};
- Value* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
- result);
+ Value inputs[] = {Peek(0, 0, kWasmS128)};
+ Value result = CreateValue(type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdLaneOp, opcode, imm,
+ ArrayVector(inputs), &result);
+ Drop(1);
+ Push(result);
}
return opcode_length + imm.length;
}
@@ -3712,13 +3851,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t opcode_length) {
SimdLaneImmediate<validate> imm(this, this->pc_ + opcode_length);
if (this->Validate(this->pc_ + opcode_length, opcode, imm)) {
- Value inputs[2] = {UnreachableValue(this->pc_),
- UnreachableValue(this->pc_)};
- inputs[1] = Pop(1, type);
- inputs[0] = Pop(0, kWasmS128);
- Value* result = Push(kWasmS128);
- CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
- result);
+ Value inputs[2] = {Peek(1, 0, kWasmS128), Peek(0, 1, type)};
+ Value result = CreateValue(kWasmS128);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdLaneOp, opcode, imm,
+ ArrayVector(inputs), &result);
+ Drop(2);
+ Push(result);
}
return opcode_length + imm.length;
}
@@ -3726,27 +3864,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t Simd8x16ShuffleOp(uint32_t opcode_length) {
Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
if (this->Validate(this->pc_ + opcode_length, imm)) {
- Value input1 = Pop(1, kWasmS128);
- Value input0 = Pop(0, kWasmS128);
- Value* result = Push(kWasmS128);
- CALL_INTERFACE_IF_REACHABLE(Simd8x16ShuffleOp, imm, input0, input1,
- result);
+ Value input1 = Peek(0, 1, kWasmS128);
+ Value input0 = Peek(1, 0, kWasmS128);
+ Value result = CreateValue(kWasmS128);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Simd8x16ShuffleOp, imm, input0, input1,
+ &result);
+ Drop(2);
+ Push(result);
}
return opcode_length + 16;
}
- uint32_t SimdPrefetch(uint32_t opcode_length, bool temporal) {
- if (!CheckHasMemory()) return 0;
- // Alignment doesn't matter, set to an arbitrary value.
- uint32_t max_alignment = 4;
- MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
- max_alignment);
- ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
- Value index = Pop(0, index_type);
- CALL_INTERFACE_IF_REACHABLE(Prefetch, imm, index, temporal);
- return opcode_length + imm.length;
- }
-
uint32_t DecodeSimdOpcode(WasmOpcode opcode, uint32_t opcode_length) {
// opcode_length is the number of bytes that this SIMD-specific opcode takes
// up in the LEB128 encoded form.
@@ -3851,33 +3979,43 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprS128Const:
return SimdConstOp(opcode_length);
- case kExprPrefetchT: {
- return SimdPrefetch(opcode_length, /*temporal=*/true);
- }
- case kExprPrefetchNT: {
- return SimdPrefetch(opcode_length, /*temporal=*/false);
- }
default: {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
this->DecodeError("invalid simd opcode");
return 0;
}
- ArgVector args = PopArgs(sig);
- Value* results =
- sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args), results);
+ ArgVector args = PeekArgs(sig);
+ if (sig->return_count() == 0) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdOp, opcode, VectorOf(args),
+ nullptr);
+ DropArgs(sig);
+ } else {
+ ReturnVector results = CreateReturnValues(sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdOp, opcode, VectorOf(args),
+ results.begin());
+ DropArgs(sig);
+ PushReturns(results);
+ }
return opcode_length;
}
}
}
+ bool ObjectRelatedWithRtt(Value obj, Value rtt) {
+ return IsSubtypeOf(ValueType::Ref(rtt.type.ref_index(), kNonNullable),
+ obj.type, this->module_) ||
+ IsSubtypeOf(obj.type,
+ ValueType::Ref(rtt.type.ref_index(), kNullable),
+ this->module_);
+ }
+
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Pop(imm.struct_type->field_count());
+ Value rtt = Peek(0, imm.struct_type->field_count());
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
return 0;
@@ -3891,10 +4029,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"rtt for type " + std::to_string(imm.index));
return 0;
}
- ArgVector args = PopArgs(imm.struct_type);
- Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(StructNewWithRtt, imm, rtt, args.begin(),
- value);
+ ArgVector args = PeekArgs(imm.struct_type, 1);
+ Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewWithRtt, imm, rtt,
+ args.begin(), &value);
+ Drop(rtt);
+ DropArgs(imm.struct_type);
+ Push(value);
return opcode_length + imm.length;
}
case kExprStructNewDefault: {
@@ -3912,7 +4053,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
}
- Value rtt = Pop(0);
+ Value rtt = Peek(0, 0);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(0, rtt, "rtt");
return 0;
@@ -3925,8 +4066,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
- Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(StructNewDefault, imm, rtt, value);
+ Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewDefault, imm, rtt, &value);
+ Drop(rtt);
+ Push(value);
return opcode_length + imm.length;
}
case kExprStructGet: {
@@ -3942,9 +4085,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
Value struct_obj =
- Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
- Value* value = Push(field_type);
- CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
+ Peek(0, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ Value value = CreateValue(field_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StructGet, struct_obj, field, true,
+ &value);
+ Drop(struct_obj);
+ Push(value);
return opcode_length + field.length;
}
case kExprStructGetU:
@@ -3962,10 +4108,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
Value struct_obj =
- Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
- Value* value = Push(field_type.Unpacked());
- CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
- opcode == kExprStructGetS, value);
+ Peek(0, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ Value value = CreateValue(field_type.Unpacked());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StructGet, struct_obj, field,
+ opcode == kExprStructGetS, &value);
+ Drop(struct_obj);
+ Push(value);
return opcode_length + field.length;
}
case kExprStructSet: {
@@ -3977,16 +4125,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
field.index, field.struct_index.index);
return 0;
}
- Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
+ Value field_value =
+ Peek(0, 1, struct_type->field(field.index).Unpacked());
Value struct_obj =
- Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
- CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
+ Peek(1, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StructSet, struct_obj, field,
+ field_value);
+ Drop(2);
return opcode_length + field.length;
}
case kExprArrayNewWithRtt: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Pop(2);
+ Value rtt = Peek(0, 2);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(2, rtt, "rtt");
return 0;
@@ -3999,11 +4150,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(2, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
- Value length = Pop(1, kWasmI32);
- Value initial_value = Pop(0, imm.array_type->element_type().Unpacked());
- Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(ArrayNewWithRtt, imm, length, initial_value,
- rtt, value);
+ Value length = Peek(1, 1, kWasmI32);
+ Value initial_value =
+ Peek(2, 0, imm.array_type->element_type().Unpacked());
+ Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayNewWithRtt, imm, length,
+ initial_value, rtt, &value);
+ Drop(3); // rtt, length, initial_value.
+ Push(value);
return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
@@ -4016,7 +4170,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
imm.index, imm.array_type->element_type().name().c_str());
return 0;
}
- Value rtt = Pop(1);
+ Value rtt = Peek(0, 1);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(1, rtt, "rtt");
return 0;
@@ -4029,9 +4183,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(1, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
- Value length = Pop(0, kWasmI32);
- Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(ArrayNewDefault, imm, length, rtt, value);
+ Value length = Peek(1, 0, kWasmI32);
+ Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayNewDefault, imm, length, rtt,
+ &value);
+ Drop(2); // rtt, length
+ Push(value);
return opcode_length + imm.length;
}
case kExprArrayGetS:
@@ -4046,11 +4203,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
imm.array_type->element_type().name().c_str());
return 0;
}
- Value index = Pop(1, kWasmI32);
- Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
- Value* value = Push(imm.array_type->element_type().Unpacked());
- CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
- opcode == kExprArrayGetS, value);
+ Value index = Peek(0, 1, kWasmI32);
+ Value array_obj = Peek(1, 0, ValueType::Ref(imm.index, kNullable));
+ Value value = CreateValue(imm.array_type->element_type().Unpacked());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayGet, array_obj, imm, index,
+ opcode == kExprArrayGetS, &value);
+ Drop(2); // index, array_obj
+ Push(value);
return opcode_length + imm.length;
}
case kExprArrayGet: {
@@ -4063,11 +4222,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
imm.index, imm.array_type->element_type().name().c_str());
return 0;
}
- Value index = Pop(1, kWasmI32);
- Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
- Value* value = Push(imm.array_type->element_type());
- CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
- value);
+ Value index = Peek(0, 1, kWasmI32);
+ Value array_obj = Peek(1, 0, ValueType::Ref(imm.index, kNullable));
+ Value value = CreateValue(imm.array_type->element_type());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayGet, array_obj, imm, index,
+ true, &value);
+ Drop(2); // index, array_obj
+ Push(value);
return opcode_length + imm.length;
}
case kExprArraySet: {
@@ -4078,51 +4239,63 @@ class WasmFullDecoder : public WasmDecoder<validate> {
imm.index);
return 0;
}
- Value value = Pop(2, imm.array_type->element_type().Unpacked());
- Value index = Pop(1, kWasmI32);
- Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
- CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
+ Value value = Peek(0, 2, imm.array_type->element_type().Unpacked());
+ Value index = Peek(1, 1, kWasmI32);
+ Value array_obj = Peek(2, 0, ValueType::Ref(imm.index, kNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArraySet, array_obj, imm, index,
+ value);
+ Drop(3);
return opcode_length + imm.length;
}
case kExprArrayLen: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
- Value* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
+ Value array_obj = Peek(0, 0, ValueType::Ref(imm.index, kNullable));
+ Value value = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayLen, array_obj, &value);
+ Drop(array_obj);
+ Push(value);
return opcode_length + imm.length;
}
case kExprI31New: {
- Value input = Pop(0, kWasmI32);
- Value* value = Push(kWasmI31Ref);
- CALL_INTERFACE_IF_REACHABLE(I31New, input, value);
+ Value input = Peek(0, 0, kWasmI32);
+ Value value = CreateValue(kWasmI31Ref);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I31New, input, &value);
+ Drop(input);
+ Push(value);
return opcode_length;
}
case kExprI31GetS: {
- Value i31 = Pop(0, kWasmI31Ref);
- Value* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I31GetS, i31, value);
+ Value i31 = Peek(0, 0, kWasmI31Ref);
+ Value value = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I31GetS, i31, &value);
+ Drop(i31);
+ Push(value);
return opcode_length;
}
case kExprI31GetU: {
- Value i31 = Pop(0, kWasmI31Ref);
- Value* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I31GetU, i31, value);
+ Value i31 = Peek(0, 0, kWasmI31Ref);
+ Value value = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I31GetU, i31, &value);
+ Drop(i31);
+ Push(value);
return opcode_length;
}
case kExprRttCanon: {
TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value* value = Push(ValueType::Rtt(imm.index, 0));
- CALL_INTERFACE_IF_REACHABLE(RttCanon, imm.index, value);
+ Value value = CreateValue(ValueType::Rtt(imm.index, 0));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
+ Push(value);
return opcode_length + imm.length;
}
case kExprRttSub: {
TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value parent = Pop(0);
+ Value parent = Peek(0, 0);
if (parent.type.is_bottom()) {
- Push(kWasmBottom);
+ DCHECK(!current_code_reachable_and_ok_);
+ // Just leave the unreachable/bottom value on the stack.
} else {
if (!VALIDATE(parent.type.is_rtt() &&
IsHeapSubtypeOf(imm.index, parent.type.ref_index(),
@@ -4132,18 +4305,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"rtt for a supertype of type " + std::to_string(imm.index));
return 0;
}
- Value* value =
- Push(ValueType::Rtt(imm.index, parent.type.depth() + 1));
-
- CALL_INTERFACE_IF_REACHABLE(RttSub, imm.index, parent, value);
+ Value value = parent.type.has_depth()
+ ? CreateValue(ValueType::Rtt(
+ imm.index, parent.type.depth() + 1))
+ : CreateValue(ValueType::Rtt(imm.index));
+
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttSub, imm.index, parent, &value);
+ Drop(parent);
+ Push(value);
}
return opcode_length + imm.length;
}
case kExprRefTest: {
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- Value rtt = Pop(1);
- Value obj = Pop(0);
- Value* value = Push(kWasmI32);
+ Value rtt = Peek(0, 1);
+ Value obj = Peek(1, 0);
+ Value value = CreateValue(kWasmI32);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(1, rtt, "rtt");
return 0;
@@ -4157,21 +4334,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
- if (!VALIDATE(IsSubtypeOf(
- ValueType::Ref(rtt.type.ref_index(), kNonNullable), obj.type,
- this->module_))) {
- PopTypeError(
- 0, obj,
- "supertype of type " + std::to_string(rtt.type.ref_index()));
- return 0;
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefTest, obj, rtt, &value);
+ } else {
+ // Unrelated types. Will always fail.
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(I32Const, &value, 0);
}
- CALL_INTERFACE_IF_REACHABLE(RefTest, obj, rtt, value);
}
+ Drop(2);
+ Push(value);
return opcode_length;
}
case kExprRefCast: {
- Value rtt = Pop(1);
- Value obj = Pop(0);
+ Value rtt = Peek(0, 1);
+ Value obj = Peek(1, 0);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(1, rtt, "rtt");
return 0;
@@ -4185,17 +4363,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
- if (!VALIDATE(IsSubtypeOf(
- ValueType::Ref(rtt.type.ref_index(), kNonNullable), obj.type,
- this->module_))) {
- PopTypeError(
- 0, obj,
- "supertype of type " + std::to_string(rtt.type.ref_index()));
- return 0;
- }
- Value* value = Push(
+ Value value = CreateValue(
ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()));
- CALL_INTERFACE_IF_REACHABLE(RefCast, obj, rtt, value);
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefCast, obj, rtt, &value);
+ } else {
+ // Unrelated types. The only way this will not trap is if the object
+ // is null.
+ if (obj.type.is_nullable()) {
+ // Drop rtt from the stack, then assert that obj is null.
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AssertNull, obj, &value);
+ } else {
+ // TODO(manoskouk): Change the trap label.
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap,
+ TrapReason::kTrapIllegalCast);
+ EndControl();
+ }
+ }
+ Drop(2);
+ Push(value);
}
return opcode_length;
}
@@ -4206,12 +4395,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.size())) {
return 0;
}
- Value rtt = Pop(1);
+ Value rtt = Peek(0, 1);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(1, rtt, "rtt");
return 0;
}
- Value obj = Pop(0);
+ Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4220,45 +4409,52 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
- // The static type of {obj} must be a supertype of {rtt}'s type.
- if (!VALIDATE(rtt.type.is_bottom() || obj.type.is_bottom() ||
- IsHeapSubtypeOf(rtt.type.ref_index(),
- obj.type.heap_representation(),
- this->module_))) {
- PopTypeError(1, rtt, obj.type);
- return 0;
- }
Control* c = control_at(branch_depth.depth);
if (c->br_merge()->arity == 0) {
this->DecodeError(
"br_on_cast must target a branch of arity at least 1");
return 0;
}
- // We temporarily push this value to the stack for TypeCheckBranchResult
- // and for MergeValuesInto in the interface.
- Value* result_on_branch =
- Push(rtt.type.is_bottom()
- ? kWasmBottom
- : ValueType::Ref(rtt.type.ref_index(), kNonNullable));
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ // Attention: contrary to most other instructions, we modify the
+ // stack before calling the interface function. This makes it
+ // significantly more convenient to pass around the values that
+ // will be on the stack when the branch is taken.
+ // TODO(jkummerow): Reconsider this choice.
+ Drop(2); // {obj} and {ret}.
+ Value result_on_branch = CreateValue(
+ rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.ref_index(), kNonNullable));
+ Push(result_on_branch);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(BrOnCast, obj, rtt, result_on_branch,
- branch_depth.depth);
- c->br_merge()->reached = true;
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack, so we can't
+ // reuse {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(
+ BrOnCast, obj, rtt, value_on_branch, branch_depth.depth);
+ c->br_merge()->reached = true;
+ }
+ // Otherwise the types are unrelated. Do not branch.
} else if (check_result == kInvalidStack) {
return 0;
}
- Pop(0); // Drop {result_on_branch}, restore original value.
- Value* result_on_fallthrough = Push(obj.type);
- *result_on_fallthrough = obj;
+ Drop(result_on_branch);
+ Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
-#define ABSTRACT_TYPE_CHECK(heap_type) \
- case kExprRefIs##heap_type: { \
- Value arg = Pop(0, kWasmAnyRef); \
- Value* result = Push(kWasmI32); \
- CALL_INTERFACE_IF_REACHABLE(RefIs##heap_type, arg, result); \
- return opcode_length; \
+#define ABSTRACT_TYPE_CHECK(heap_type) \
+ case kExprRefIs##heap_type: { \
+ Value arg = Peek(0, 0, kWasmAnyRef); \
+ Value result = CreateValue(kWasmI32); \
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefIs##heap_type, arg, &result); \
+ Drop(arg); \
+ Push(result); \
+ return opcode_length; \
}
ABSTRACT_TYPE_CHECK(Data)
@@ -4266,15 +4462,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ABSTRACT_TYPE_CHECK(I31)
#undef ABSTRACT_TYPE_CHECK
-#define ABSTRACT_TYPE_CAST(heap_type) \
- case kExprRefAs##heap_type: { \
- Value arg = Pop(0, kWasmAnyRef); \
- if (!arg.type.is_bottom()) { \
- Value* result = \
- Push(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
- CALL_INTERFACE_IF_REACHABLE(RefAs##heap_type, arg, result); \
- } \
- return opcode_length; \
+#define ABSTRACT_TYPE_CAST(heap_type) \
+ case kExprRefAs##heap_type: { \
+ Value arg = Peek(0, 0, kWasmAnyRef); \
+ if (!arg.type.is_bottom()) { \
+ Value result = \
+ CreateValue(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefAs##heap_type, arg, &result); \
+ Drop(arg); \
+ Push(result); \
+ } \
+ return opcode_length; \
}
ABSTRACT_TYPE_CAST(Data)
@@ -4292,7 +4490,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
- Value obj = Pop(0, kWasmAnyRef);
+ Value obj = Peek(0, 0, kWasmAnyRef);
Control* c = control_at(branch_depth.depth);
HeapType::Representation heap_type =
opcode == kExprBrOnFunc
@@ -4303,25 +4501,37 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SafeOpcodeNameAt(this->pc_));
return 0;
}
- // We temporarily push this value to the stack for TypeCheckBranchResult
- // and for MergeValuesInto in the interface.
- Value* result_on_branch = Push(ValueType::Ref(heap_type, kNonNullable));
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ // Attention: contrary to most other instructions, we modify the
+ // stack before calling the interface function. This makes it
+ // significantly more convenient to pass around the values that
+ // will be on the stack when the branch is taken.
+ // TODO(jkummerow): Reconsider this choice.
+ Drop(obj);
+ Value result_on_branch =
+ CreateValue(ValueType::Ref(heap_type, kNonNullable));
+ Push(result_on_branch);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
if (V8_LIKELY(check_result == kReachableBranch)) {
+ // The {value_on_branch} parameter we pass to the interface must be
+ // pointer-identical to the object on the stack, so we can't reuse
+ // {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
if (opcode == kExprBrOnFunc) {
- CALL_INTERFACE(BrOnFunc, obj, result_on_branch, branch_depth.depth);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnFunc, obj, value_on_branch,
+ branch_depth.depth);
} else if (opcode == kExprBrOnData) {
- CALL_INTERFACE(BrOnData, obj, result_on_branch, branch_depth.depth);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnData, obj, value_on_branch,
+ branch_depth.depth);
} else {
- CALL_INTERFACE(BrOnI31, obj, result_on_branch, branch_depth.depth);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnI31, obj, value_on_branch,
+ branch_depth.depth);
}
c->br_merge()->reached = true;
} else if (check_result == kInvalidStack) {
return 0;
}
- Pop(0); // Drop {result_on_branch}, restore original value.
- Value* result_on_fallthrough = Push(obj.type);
- *result_on_fallthrough = obj;
+ Drop(result_on_branch);
+ Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
default:
@@ -4342,7 +4552,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#define CASE_ATOMIC_STORE_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
- ret_type = kWasmStmt; \
+ ret_type = kWasmVoid; \
break; /* to generic mem access code below */ \
}
ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
@@ -4363,7 +4573,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"invalid atomic operand");
return 0;
}
- CALL_INTERFACE_IF_REACHABLE(AtomicFence);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicFence);
return 1 + opcode_length;
}
default:
@@ -4377,9 +4587,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// TODO(10949): Fix this for memory64 (index type should be kWasmI64
// then).
CHECK(!this->module_->is_memory64);
- ArgVector args = PopArgs(sig);
- Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
+ ArgVector args = PeekArgs(sig);
+ if (ret_type == kWasmVoid) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
+ nullptr);
+ DropArgs(sig);
+ } else {
+ Value result = CreateValue(GetReturnType(sig));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
+ &result);
+ DropArgs(sig);
+ Push(result);
+ }
return opcode_length + imm.length;
}
@@ -4404,79 +4623,89 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprMemoryInit: {
MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Pop(2, sig->GetParam(2));
- Value src = Pop(1, sig->GetParam(1));
- Value dst = Pop(0, sig->GetParam(0));
- CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
+ Value size = Peek(0, 2, sig->GetParam(2));
+ Value src = Peek(1, 1, sig->GetParam(1));
+ Value dst = Peek(2, 0, sig->GetParam(0));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryInit, imm, dst, src, size);
+ Drop(3);
return opcode_length + imm.length;
}
case kExprDataDrop: {
DataDropImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- CALL_INTERFACE_IF_REACHABLE(DataDrop, imm);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DataDrop, imm);
return opcode_length + imm.length;
}
case kExprMemoryCopy: {
MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Pop(2, sig->GetParam(2));
- Value src = Pop(1, sig->GetParam(1));
- Value dst = Pop(0, sig->GetParam(0));
- CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
+ Value size = Peek(0, 2, sig->GetParam(2));
+ Value src = Peek(1, 1, sig->GetParam(1));
+ Value dst = Peek(2, 0, sig->GetParam(0));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryCopy, imm, dst, src, size);
+ Drop(3);
return opcode_length + imm.length;
}
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value size = Pop(2, sig->GetParam(2));
- Value value = Pop(1, sig->GetParam(1));
- Value dst = Pop(0, sig->GetParam(0));
- CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
+ Value size = Peek(0, 2, sig->GetParam(2));
+ Value value = Peek(1, 1, sig->GetParam(1));
+ Value dst = Peek(2, 0, sig->GetParam(0));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryFill, imm, dst, value, size);
+ Drop(3);
return opcode_length + imm.length;
}
case kExprTableInit: {
TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- ArgVector args = PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
+ ArgVector args = PeekArgs(sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableInit, imm, VectorOf(args));
+ DropArgs(sig);
return opcode_length + imm.length;
}
case kExprElemDrop: {
ElemDropImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- CALL_INTERFACE_IF_REACHABLE(ElemDrop, imm);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ElemDrop, imm);
return opcode_length + imm.length;
}
case kExprTableCopy: {
TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- ArgVector args = PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
+ ArgVector args = PeekArgs(sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableCopy, imm, VectorOf(args));
+ DropArgs(sig);
return opcode_length + imm.length;
}
case kExprTableGrow: {
TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value delta = Pop(1, sig->GetParam(1));
- Value value = Pop(0, this->module_->tables[imm.index].type);
- Value* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
+ Value delta = Peek(0, 1, sig->GetParam(1));
+ Value value = Peek(1, 0, this->module_->tables[imm.index].type);
+ Value result = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableGrow, imm, value, delta,
+ &result);
+ Drop(2);
+ Push(result);
return opcode_length + imm.length;
}
case kExprTableSize: {
TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
+ Value result = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableSize, imm, &result);
+ Push(result);
return opcode_length + imm.length;
}
case kExprTableFill: {
TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value count = Pop(2, sig->GetParam(2));
- Value value = Pop(1, this->module_->tables[imm.index].type);
- Value start = Pop(0, sig->GetParam(0));
- CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
+ Value count = Peek(0, 2, sig->GetParam(2));
+ Value value = Peek(1, 1, this->module_->tables[imm.index].type);
+ Value start = Peek(2, 0, sig->GetParam(0));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableFill, imm, start, value, count);
+ Drop(3);
return opcode_length + imm.length;
}
default:
@@ -4487,7 +4716,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void DoReturn() {
DCHECK_GE(stack_size(), this->sig_->return_count());
- CALL_INTERFACE_IF_REACHABLE(DoReturn);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
}
V8_INLINE void EnsureStackSpace(int slots_needed) {
@@ -4510,14 +4739,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
stack_capacity_end_ = new_stack + new_stack_capacity;
}
- V8_INLINE Value* Push(ValueType type) {
- DCHECK_NE(kWasmStmt, type);
+ V8_INLINE Value CreateValue(ValueType type) { return Value{this->pc_, type}; }
+ V8_INLINE void Push(Value value) {
+ DCHECK_NE(kWasmVoid, value.type);
// {EnsureStackSpace} should have been called before, either in the central
// decoding loop, or individually if more than one element is pushed.
DCHECK_GT(stack_capacity_end_, stack_end_);
- *stack_end_ = Value{this->pc_, type};
+ *stack_end_ = value;
++stack_end_;
- return stack_end_ - 1;
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
@@ -4539,13 +4768,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_EQ(c->stack_depth + merge->arity, stack_size());
}
- Value* PushReturns(const FunctionSig* sig) {
+ V8_INLINE ReturnVector CreateReturnValues(const FunctionSig* sig) {
size_t return_count = sig->return_count();
- EnsureStackSpace(static_cast<int>(return_count));
+ ReturnVector values(return_count);
for (size_t i = 0; i < return_count; ++i) {
- Push(sig->GetReturn(i));
+ values[i] = CreateValue(sig->GetReturn(i));
}
- return stack_end_ - return_count;
+ return values;
+ }
+ V8_INLINE void PushReturns(ReturnVector values) {
+ EnsureStackSpace(static_cast<int>(values.size()));
+ for (Value& value : values) Push(value);
}
// We do not inline these functions because doing so causes a large binary
@@ -4571,8 +4804,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SafeOpcodeNameAt(this->pc_), index + 1);
}
- V8_INLINE Value Pop(int index, ValueType expected) {
- Value val = Pop(index);
+ V8_INLINE Value Peek(int depth, int index, ValueType expected) {
+ Value val = Peek(depth, index);
if (!VALIDATE(IsSubtypeOf(val.type, expected, this->module_) ||
val.type == kWasmBottom || expected == kWasmBottom)) {
PopTypeError(index, val, expected);
@@ -4580,34 +4813,72 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
- V8_INLINE Value Pop(int index) {
+ V8_INLINE Value Peek(int depth, int index) {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
- if (stack_size() <= limit) {
- // Popping past the current control start in reachable code.
+ if (V8_UNLIKELY(stack_size() <= limit + depth)) {
+ // Peeking past the current control start in reachable code.
if (!VALIDATE(control_.back().unreachable())) {
NotEnoughArgumentsError(index);
}
return UnreachableValue(this->pc_);
}
- DCHECK_LT(stack_, stack_end_);
- stack_end_--;
- return *stack_end_;
+ DCHECK_LE(stack_, stack_end_ - depth - 1);
+ return *(stack_end_ - depth - 1);
+ }
+
+ V8_INLINE void ValidateArgType(ArgVector args, int index,
+ ValueType expected) {
+ Value val = args[index];
+ if (!VALIDATE(IsSubtypeOf(val.type, expected, this->module_) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
+ PopTypeError(index, val, expected);
+ }
}
+ V8_INLINE void Drop(int count = 1) {
+ DCHECK(!control_.empty());
+ uint32_t limit = control_.back().stack_depth;
+ // TODO(wasm): This check is often redundant.
+ if (V8_UNLIKELY(stack_size() < limit + count)) {
+ // Popping past the current control start in reachable code.
+ if (!VALIDATE(!control_.back().reachable())) {
+ NotEnoughArgumentsError(0);
+ }
+ // Pop what we can.
+ count = std::min(count, static_cast<int>(stack_size() - limit));
+ }
+ DCHECK_LE(stack_, stack_end_ - count);
+ stack_end_ -= count;
+ }
+ // For more descriptive call sites:
+ V8_INLINE void Drop(const Value& /* unused */) { Drop(1); }
+
// Pops values from the stack, as defined by {merge}. Thereby we type-check
// unreachable merges. Afterwards the values are pushed again on the stack
// according to the signature in {merge}. This is done so follow-up validation
// is possible.
- bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch) {
+ bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch,
+ uint32_t drop_values = 0) {
int arity = merge.arity;
// For conditional branches, stack value '0' is the condition of the branch,
// and the result values start at index '1'.
int index_offset = conditional_branch ? 1 : 0;
- for (int i = arity - 1; i >= 0; --i) Pop(index_offset + i, merge[i].type);
- // Push values of the correct type back on the stack.
- EnsureStackSpace(arity);
- for (int i = 0; i < arity; ++i) Push(merge[i].type);
+ for (int i = arity - 1, depth = drop_values; i >= 0; --i, ++depth) {
+ Peek(depth, index_offset + i, merge[i].type);
+ }
+ // Push values of the correct type onto the stack.
+ Drop(drop_values);
+ Drop(arity);
+ // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
+ // than requested. So ensuring stack space here is not redundant.
+ EnsureStackSpace(arity + drop_values);
+ for (int i = 0; i < arity; i++) Push(CreateValue(merge[i].type));
+ // {drop_values} are about to be dropped anyway, so we can forget their
+ // previous types, but we do have to maintain the correct stack height.
+ for (uint32_t i = 0; i < drop_values; i++) {
+ Push(UnreachableValue(this->pc_));
+ }
return this->ok();
}
@@ -4617,18 +4888,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_EQ(c, &control_.back());
DCHECK_NE(c->kind, kControlLoop);
if (!TypeCheckFallThru()) return;
- if (!c->reachable()) return;
- CALL_INTERFACE(FallThruTo, c);
- c->end_merge.reached = true;
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(FallThruTo, c);
+ if (c->reachable()) c->end_merge.reached = true;
}
- bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
+ bool TypeCheckMergeValues(Control* c, uint32_t drop_values,
+ Merge<Value>* merge) {
static_assert(validate, "Call this function only within VALIDATE");
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_GE(stack_size(), c->stack_depth + merge->arity);
- // The computation of {stack_values} is only valid if {merge->arity} is >0.
- DCHECK_LT(0, merge->arity);
- Value* stack_values = stack_end_ - merge->arity;
+ DCHECK_GE(stack_size() - drop_values, c->stack_depth + merge->arity);
+ Value* stack_values = stack_value(merge->arity + drop_values);
// Typecheck the topmost {merge->arity} values on the stack.
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
@@ -4676,7 +4945,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
if (expected == 0) return true; // Fast path.
- return TypeCheckMergeValues(&c, &c.end_merge);
+ return TypeCheckMergeValues(&c, 0, &c.end_merge);
}
// Type-check an unreachable fallthru. First we do an arity check, then a
@@ -4708,7 +4977,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Otherwise, we have a polymorphic stack: check if any values that may exist
// on top of the stack are compatible with {c}, and push back to the stack
// values based on the type of {c}.
- TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch) {
+ TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch,
+ uint32_t drop_values) {
if (V8_LIKELY(control_.back().reachable())) {
// We only do type-checking here. This is only needed during validation.
if (!validate) return kReachableBranch;
@@ -4717,20 +4987,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// more.
uint32_t expected = c->br_merge()->arity;
if (expected == 0) return kReachableBranch; // Fast path.
- DCHECK_GE(stack_size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_size()) - control_.back().stack_depth;
- if (!VALIDATE(actual >= expected)) {
+ uint32_t limit = control_.back().stack_depth;
+ if (!VALIDATE(stack_size() >= limit + drop_values + expected)) {
+ uint32_t actual = stack_size() - limit;
+ actual -= std::min(actual, drop_values);
this->DecodeError(
"expected %u elements on the stack for br to @%d, found %u",
expected, startrel(c->pc()), actual);
return kInvalidStack;
}
- return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
- : kInvalidStack;
+ return TypeCheckMergeValues(c, drop_values, c->br_merge())
+ ? kReachableBranch
+ : kInvalidStack;
}
- return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch)
+ return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch,
+ drop_values)
? kUnreachableBranch
: kInvalidStack;
}
@@ -4768,9 +5040,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
- this->current_code_reachable_ = false;
+ this->current_code_reachable_and_ok_ = false;
TRACE(" !%s\n", this->error_.message().c_str());
- CALL_INTERFACE(OnFirstError);
+ // Cannot use CALL_INTERFACE_* macros because we emitted an error.
+ interface().OnFirstError(this);
}
int BuildSimplePrototypeOperator(WasmOpcode opcode) {
@@ -4783,7 +5056,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int BuildSimpleOperator(WasmOpcode opcode, const FunctionSig* sig) {
DCHECK_GE(1, sig->return_count());
- ValueType ret = sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
+ ValueType ret = sig->return_count() == 0 ? kWasmVoid : sig->GetReturn(0);
if (sig->parameter_count() == 1) {
return BuildSimpleOperator(opcode, ret, sig->GetParam(0));
} else {
@@ -4795,18 +5068,32 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType arg_type) {
- Value val = Pop(0, arg_type);
- Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
- CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
+ Value val = Peek(0, 0, arg_type);
+ if (return_type == kWasmVoid) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, opcode, val, nullptr);
+ Drop(val);
+ } else {
+ Value ret = CreateValue(return_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(UnOp, opcode, val, &ret);
+ Drop(val);
+ Push(ret);
+ }
return 1;
}
int BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType lhs_type, ValueType rhs_type) {
- Value rval = Pop(1, rhs_type);
- Value lval = Pop(0, lhs_type);
- Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
- CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
+ Value rval = Peek(0, 1, rhs_type);
+ Value lval = Peek(1, 0, lhs_type);
+ if (return_type == kWasmVoid) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BinOp, opcode, lval, rval, nullptr);
+ Drop(2);
+ } else {
+ Value ret = CreateValue(return_type);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(BinOp, opcode, lval, rval, &ret);
+ Drop(2);
+ Push(ret);
+ }
return 1;
}
@@ -4818,10 +5105,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#undef DEFINE_SIMPLE_SIG_OPERATOR
};
-#undef CALL_INTERFACE
-#undef CALL_INTERFACE_IF_REACHABLE
-#undef CALL_INTERFACE_IF_PARENT_REACHABLE
-
class EmptyInterface {
public:
static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
@@ -4835,6 +5118,8 @@ class EmptyInterface {
#undef DEFINE_EMPTY_CALLBACK
};
+#undef CALL_INTERFACE_IF_OK_AND_REACHABLE
+#undef CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE
#undef TRACE
#undef TRACE_INST_FORMAT
#undef VALIDATE
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index c5aab2b593e..0733359055e 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -209,27 +209,26 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
os << RawOpcodeName(opcode) << ",";
if (opcode == kExprLoop || opcode == kExprIf || opcode == kExprBlock ||
- opcode == kExprTry) {
- DCHECK_EQ(2, length);
-
- // TODO(7748) Update this for gc and ref types if needed
- switch (i.pc()[1]) {
-#define CASE_LOCAL_TYPE(local_name, type_name) \
- case k##local_name##Code: \
- os << " kWasm" #type_name ","; \
- break;
-
- CASE_LOCAL_TYPE(I32, I32)
- CASE_LOCAL_TYPE(I64, I64)
- CASE_LOCAL_TYPE(F32, F32)
- CASE_LOCAL_TYPE(F64, F64)
- CASE_LOCAL_TYPE(S128, S128)
- CASE_LOCAL_TYPE(Void, Stmt)
- default:
- os << " 0x" << AsHex(i.pc()[1], 2) << ",";
- break;
+ opcode == kExprTry || opcode == kExprLet) {
+ if (i.pc()[1] & 0x80) {
+ uint32_t temp_length;
+ ValueType type =
+ value_type_reader::read_value_type<Decoder::kNoValidation>(
+ &decoder, i.pc() + 1, &temp_length, module,
+ WasmFeatures::All());
+ if (temp_length == 1) {
+ os << type.name() << ",";
+ } else {
+ // TODO(manoskouk): Improve this for rtts and (nullable) refs.
+ for (unsigned j = offset; j < length; ++j) {
+ os << " 0x" << AsHex(i.pc()[j], 2) << ",";
+ }
+ }
+ } else {
+ for (unsigned j = offset; j < length; ++j) {
+ os << " 0x" << AsHex(i.pc()[j], 2) << ",";
+ }
}
-#undef CASE_LOCAL_TYPE
} else {
for (unsigned j = offset; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
@@ -249,7 +248,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprLoop:
case kExprIf:
case kExprBlock:
- case kExprTry: {
+ case kExprTry:
+ case kExprLet: {
BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
i.pc() + 1, module);
os << " @" << i.pc_offset();
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 4bc42eda260..b1e1cebe2f3 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_FUNCTION_BODY_DECODER_H_
#define V8_WASM_FUNCTION_BODY_DECODER_H_
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 0129d4d8e71..4303344f136 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -14,6 +14,8 @@
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -188,13 +190,24 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kLiftoff:
// The --wasm-tier-mask-for-testing flag can force functions to be
- // compiled with TurboFan, see documentation.
+ // compiled with TurboFan, and the --wasm-debug-mask-for-testing can force
+ // them to be compiled for debugging, see documentation.
if (V8_LIKELY(FLAG_wasm_tier_mask_for_testing == 0) ||
func_index_ >= 32 ||
((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0)) {
- result = ExecuteLiftoffCompilation(wasm_engine->allocator(), env,
- func_body, func_index_,
- for_debugging_, counters, detected);
+ if (V8_LIKELY(func_index_ >= 32 || (FLAG_wasm_debug_mask_for_testing &
+ (1 << func_index_)) == 0)) {
+ result = ExecuteLiftoffCompilation(
+ wasm_engine->allocator(), env, func_body, func_index_,
+ for_debugging_, counters, detected);
+ } else {
+ // We don't use the debug side table, we only pass it to cover
+ // different code paths in Liftoff for testing.
+ std::unique_ptr<DebugSideTable> debug_sidetable;
+ result = ExecuteLiftoffCompilation(
+ wasm_engine->allocator(), env, func_body, func_index_,
+ kForDebugging, counters, detected, {}, &debug_sidetable);
+ }
if (result.succeeded()) break;
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 4894076303b..f8d1f00a4e4 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 3893d2841de..c856f4d9493 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -67,12 +67,6 @@ struct SsaEnv : public ZoneObject {
}
};
-#define BUILD(func, ...) \
- ([&] { \
- DCHECK(decoder->ok()); \
- return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
- })()
-
constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
@@ -88,7 +82,7 @@ class WasmGraphBuildingInterface {
explicit Value(Args&&... args) V8_NOEXCEPT
: ValueBase(std::forward<Args>(args)...) {}
};
- using StackValueVector = base::SmallVector<Value, 8>;
+ using ValueVector = base::SmallVector<Value, 8>;
using NodeVector = base::SmallVector<TFNode*, 8>;
struct TryInfo : public ZoneObject {
@@ -122,15 +116,12 @@ class WasmGraphBuildingInterface {
void StartFunction(FullDecoder* decoder) {
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// instance parameter.
- TFNode* start = builder_->Start(
- static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
+ builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
uint32_t num_locals = decoder->num_locals();
SsaEnv* ssa_env = decoder->zone()->New<SsaEnv>(
- decoder->zone(), SsaEnv::kReached, start, start, num_locals);
+ decoder->zone(), SsaEnv::kReached, effect(), control(), num_locals);
SetEnv(ssa_env);
- // Initialize the instance parameter (index 0).
- builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
// the instance parameter.
uint32_t index = 0;
@@ -147,7 +138,7 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm) BUILD(TraceFunctionEntry, decoder->position());
+ if (FLAG_trace_wasm) builder_->TraceFunctionEntry(decoder->position());
}
// Reload the instance cache entries into the Ssa Environment.
@@ -178,6 +169,26 @@ class WasmGraphBuildingInterface {
ssa_env_->state = SsaEnv::kMerged;
TFNode* loop_node = builder_->Loop(control());
+
+ if (FLAG_wasm_loop_unrolling) {
+ uint32_t nesting_depth = 0;
+ for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
+ if (decoder->control_at(depth)->is_loop()) {
+ nesting_depth++;
+ }
+ }
+ // If this loop is nested, the parent loop's is_innermost field needs to
+ // be false. If the last loop in loop_infos_ has less depth, it has to be
+ // the parent loop. If it does not, it means another loop has been found
+ // within the parent loop, and that loop will have set the parent's
+ // is_innermost to false, so we do not need to do anything.
+ if (nesting_depth > 0 &&
+ loop_infos_.back().nesting_depth < nesting_depth) {
+ loop_infos_.back().is_innermost = false;
+ }
+ loop_infos_.emplace_back(loop_node, nesting_depth, true);
+ }
+
builder_->SetControl(loop_node);
decoder->control_at(0)->loop_node = loop_node;
@@ -236,7 +247,7 @@ class WasmGraphBuildingInterface {
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- BUILD(BranchNoHint, cond.node, &if_true, &if_false);
+ builder_->BranchNoHint(cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder->zone(), ssa_env_);
false_env->control = if_false;
@@ -290,13 +301,13 @@ class WasmGraphBuildingInterface {
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
- result->node = BUILD(Unop, opcode, value.node, decoder->position());
+ result->node = builder_->Unop(opcode, value.node, decoder->position());
}
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
TFNode* node =
- BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ builder_->Binop(opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
@@ -326,11 +337,11 @@ class WasmGraphBuildingInterface {
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- result->node = BUILD(RefFunc, function_index);
+ result->node = builder_->RefFunc(function_index);
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- result->node = BUILD(RefAsNonNull, arg.node, decoder->position());
+ result->node = builder_->RefAsNonNull(arg.node, decoder->position());
}
void Drop(FullDecoder* decoder) {}
@@ -366,81 +377,89 @@ class WasmGraphBuildingInterface {
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
- result->node = BUILD(GlobalGet, imm.index);
+ result->node = builder_->GlobalGet(imm.index);
}
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
- BUILD(GlobalSet, imm.index, value.node);
+ builder_->GlobalSet(imm.index, value.node);
}
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- result->node = BUILD(TableGet, imm.index, index.node, decoder->position());
+ result->node =
+ builder_->TableGet(imm.index, index.node, decoder->position());
}
void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- BUILD(TableSet, imm.index, index.node, value.node, decoder->position());
+ builder_->TableSet(imm.index, index.node, value.node, decoder->position());
}
- void Unreachable(FullDecoder* decoder) {
- StackValueVector values;
+ void Trap(FullDecoder* decoder, TrapReason reason) {
+ ValueVector values;
if (FLAG_wasm_loop_unrolling) {
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
values);
}
- BUILD(Trap, wasm::TrapReason::kTrapUnreachable, decoder->position());
+ builder_->Trap(reason, decoder->position());
+ }
+
+ void AssertNull(FullDecoder* decoder, const Value& obj, Value* result) {
+ builder_->TrapIfFalse(
+ wasm::TrapReason::kTrapIllegalCast,
+ builder_->Binop(kExprRefEq, obj.node, builder_->RefNull(),
+ decoder->position()),
+ decoder->position());
+ result->node = obj.node;
}
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {}
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
- TFNode* controls[2];
- BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
- TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* inputs[] = {tval.node, fval.node, merge};
- TFNode* phi = BUILD(Phi, tval.type, 2, inputs);
- result->node = phi;
- builder_->SetControl(merge);
- }
-
- StackValueVector CopyStackValues(FullDecoder* decoder, uint32_t count) {
- Value* stack_base = count > 0 ? decoder->stack_value(count) : nullptr;
- StackValueVector stack_values(count);
+ result->node =
+ builder_->Select(cond.node, tval.node, fval.node, result->type);
+ }
+
+ ValueVector CopyStackValues(FullDecoder* decoder, uint32_t count,
+ uint32_t drop_values) {
+ Value* stack_base =
+ count > 0 ? decoder->stack_value(count + drop_values) : nullptr;
+ ValueVector stack_values(count);
for (uint32_t i = 0; i < count; i++) {
stack_values[i] = stack_base[i];
}
return stack_values;
}
- void DoReturn(FullDecoder* decoder) {
+ void DoReturn(FullDecoder* decoder, uint32_t drop_values) {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
NodeVector values(ret_count);
SsaEnv* internal_env = ssa_env_;
if (FLAG_wasm_loop_unrolling) {
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
- auto stack_values = CopyStackValues(decoder, ret_count);
+ auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
stack_values);
GetNodes(values.begin(), VectorOf(stack_values));
} else {
- Value* stack_base =
- ret_count == 0 ? nullptr : decoder->stack_value(ret_count);
+ Value* stack_base = ret_count == 0
+ ? nullptr
+ : decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
if (FLAG_trace_wasm) {
- BUILD(TraceFunctionExit, VectorOf(values), decoder->position());
+ builder_->TraceFunctionExit(VectorOf(values), decoder->position());
}
- BUILD(Return, VectorOf(values));
+ builder_->Return(VectorOf(values));
SetEnv(internal_env);
}
- void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ void BrOrRet(FullDecoder* decoder, uint32_t depth, uint32_t drop_values) {
if (depth == decoder->control_depth() - 1) {
- DoReturn(decoder);
+ DoReturn(decoder, drop_values);
} else {
Control* target = decoder->control_at(depth);
if (FLAG_wasm_loop_unrolling) {
@@ -448,13 +467,13 @@ class WasmGraphBuildingInterface {
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
uint32_t value_count = target->br_merge()->arity;
- auto stack_values = CopyStackValues(decoder, value_count);
+ auto stack_values = CopyStackValues(decoder, value_count, drop_values);
BuildNestedLoopExits(decoder, depth, true, stack_values);
MergeValuesInto(decoder, target, target->br_merge(),
stack_values.data());
SetEnv(internal_env);
} else {
- MergeValuesInto(decoder, target, target->br_merge());
+ MergeValuesInto(decoder, target, target->br_merge(), drop_values);
}
}
}
@@ -463,10 +482,10 @@ class WasmGraphBuildingInterface {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder->zone(), fenv);
fenv->SetNotMerged();
- BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
+ builder_->BranchNoHint(cond.node, &tenv->control, &fenv->control);
builder_->SetControl(fenv->control);
SetEnv(tenv);
- BrOrRet(decoder, depth);
+ BrOrRet(decoder, depth, 1);
SetEnv(fenv);
}
@@ -475,13 +494,13 @@ class WasmGraphBuildingInterface {
if (imm.table_count == 0) {
// Only a default target. Do the equivalent of br.
uint32_t target = BranchTableIterator<validate>(decoder, imm).next();
- BrOrRet(decoder, target);
+ BrOrRet(decoder, target, 1);
return;
}
SsaEnv* branch_env = ssa_env_;
// Build branches to the various blocks based on the table.
- TFNode* sw = BUILD(Switch, imm.table_count + 1, key.node);
+ TFNode* sw = builder_->Switch(imm.table_count + 1, key.node);
SsaEnv* copy = Steal(decoder->zone(), branch_env);
SetEnv(copy);
@@ -490,9 +509,9 @@ class WasmGraphBuildingInterface {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
SetEnv(Split(decoder->zone(), copy));
- builder_->SetControl(i == imm.table_count ? BUILD(IfDefault, sw)
- : BUILD(IfValue, i, sw));
- BrOrRet(decoder, target);
+ builder_->SetControl(i == imm.table_count ? builder_->IfDefault(sw)
+ : builder_->IfValue(i, sw));
+ BrOrRet(decoder, target, 1);
}
DCHECK(decoder->ok());
SetEnv(branch_env);
@@ -506,57 +525,52 @@ class WasmGraphBuildingInterface {
SetEnv(if_block->false_env);
}
- void Prefetch(FullDecoder* decoder,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
- bool temporal) {
- BUILD(Prefetch, index.node, imm.offset, imm.alignment, temporal);
- }
-
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
Value* result) {
result->node =
- BUILD(LoadMem, type.value_type(), type.mem_type(), index.node,
- imm.offset, imm.alignment, decoder->position());
+ builder_->LoadMem(type.value_type(), type.mem_type(), index.node,
+ imm.offset, imm.alignment, decoder->position());
}
void LoadTransform(FullDecoder* decoder, LoadType type,
LoadTransformationKind transform,
const MemoryAccessImmediate<validate>& imm,
const Value& index, Value* result) {
- result->node =
- BUILD(LoadTransform, type.value_type(), type.mem_type(), transform,
- index.node, imm.offset, imm.alignment, decoder->position());
+ result->node = builder_->LoadTransform(type.value_type(), type.mem_type(),
+ transform, index.node, imm.offset,
+ imm.alignment, decoder->position());
}
void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
const Value& index, const MemoryAccessImmediate<validate>& imm,
const uint8_t laneidx, Value* result) {
- result->node = BUILD(LoadLane, type.value_type(), type.mem_type(),
- value.node, index.node, imm.offset, imm.alignment,
- laneidx, decoder->position());
+ result->node = builder_->LoadLane(
+ type.value_type(), type.mem_type(), value.node, index.node, imm.offset,
+ imm.alignment, laneidx, decoder->position());
}
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
- BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment,
- value.node, decoder->position(), type.value_type());
+ builder_->StoreMem(type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, decoder->position(), type.value_type());
}
void StoreLane(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value, const uint8_t laneidx) {
- BUILD(StoreLane, type.mem_rep(), index.node, imm.offset, imm.alignment,
- value.node, laneidx, decoder->position(), type.value_type());
+ builder_->StoreLane(type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, laneidx, decoder->position(),
+ type.value_type());
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- result->node = BUILD(CurrentMemoryPages);
+ result->node = builder_->CurrentMemoryPages();
}
void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result) {
- result->node = BUILD(MemoryGrow, value.node);
+ result->node = builder_->MemoryGrow(value.node);
// Always reload the instance cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
@@ -574,7 +588,7 @@ class WasmGraphBuildingInterface {
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
DoReturnCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck,
- nullptr, imm.sig, imm.index, args);
+ Value{nullptr, kWasmBottom}, imm.sig, imm.index, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
@@ -589,8 +603,8 @@ class WasmGraphBuildingInterface {
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
DoReturnCall(decoder, kCallIndirect, imm.table_index,
- CheckForNull::kWithoutNullCheck, index.node, imm.sig,
- imm.sig_index, args);
+ CheckForNull::kWithoutNullCheck, index, imm.sig, imm.sig_index,
+ args);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
@@ -609,27 +623,27 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = func_ref.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kCallRef, 0, null_check, func_ref.node, sig,
- sig_index, args);
+ DoReturnCall(decoder, kCallRef, 0, null_check, func_ref, sig, sig_index,
+ args);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
- SsaEnv* non_null_env = ssa_env_;
- SsaEnv* null_env = Split(decoder->zone(), non_null_env);
- non_null_env->SetNotMerged();
- BUILD(BrOnNull, ref_object.node, &null_env->control,
- &non_null_env->control);
- builder_->SetControl(non_null_env->control);
- SetEnv(null_env);
- BrOrRet(decoder, depth);
- SetEnv(non_null_env);
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(decoder->zone(), false_env);
+ false_env->SetNotMerged();
+ builder_->BrOnNull(ref_object.node, &true_env->control,
+ &false_env->control);
+ builder_->SetControl(false_env->control);
+ SetEnv(true_env);
+ BrOrRet(decoder, depth, 1);
+ SetEnv(false_env);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
- TFNode* node = BUILD(SimdOp, opcode, inputs.begin());
+ TFNode* node = builder_->SimdOp(opcode, inputs.begin());
if (result) result->node = node;
}
@@ -638,7 +652,7 @@ class WasmGraphBuildingInterface {
Value* result) {
NodeVector nodes(inputs.size());
GetNodes(nodes.begin(), inputs);
- result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin());
+ result->node = builder_->SimdLaneOp(opcode, imm.lane, nodes.begin());
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
@@ -646,7 +660,7 @@ class WasmGraphBuildingInterface {
const Value& input0, const Value& input1,
Value* result) {
TFNode* input_nodes[] = {input0.node, input1.node};
- result->node = BUILD(Simd8x16ShuffleOp, imm.value, input_nodes);
+ result->node = builder_->Simd8x16ShuffleOp(imm.value, input_nodes);
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
@@ -656,7 +670,9 @@ class WasmGraphBuildingInterface {
for (int i = 0; i < count; ++i) {
args[i] = value_args[i].node;
}
- BUILD(Throw, imm.index, imm.exception, VectorOf(args), decoder->position());
+ CheckForException(decoder,
+ builder_->Throw(imm.index, imm.exception, VectorOf(args),
+ decoder->position()));
TerminateThrow(decoder);
}
@@ -665,7 +681,7 @@ class WasmGraphBuildingInterface {
block->is_try_unwind());
TFNode* exception = block->try_info->exception;
DCHECK_NOT_NULL(exception);
- BUILD(Rethrow, exception);
+ CheckForException(decoder, builder_->Rethrow(exception));
TerminateThrow(decoder);
}
@@ -691,10 +707,10 @@ class WasmGraphBuildingInterface {
TFNode* if_no_catch = nullptr;
// Get the exception tag and see if it matches the expected one.
- TFNode* caught_tag = BUILD(GetExceptionTag, exception);
- TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
- TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
- BUILD(BranchNoHint, compare, &if_catch, &if_no_catch);
+ TFNode* caught_tag = builder_->GetExceptionTag(exception);
+ TFNode* exception_tag = builder_->LoadExceptionTagFromTable(imm.index);
+ TFNode* compare = builder_->ExceptionTagEqual(caught_tag, exception_tag);
+ builder_->BranchNoHint(compare, &if_catch, &if_no_catch);
// If the tags don't match we continue with the next tag by setting the
// false environment as the new {TryInfo::catch_env} here.
@@ -709,7 +725,7 @@ class WasmGraphBuildingInterface {
SetEnv(if_catch_env);
NodeVector caught_values(values.size());
Vector<TFNode*> caught_vector = VectorOf(caught_values);
- BUILD(GetExceptionValues, exception, imm.exception, caught_vector);
+ builder_->GetExceptionValues(exception, imm.exception, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
values[i].node = caught_values[i];
}
@@ -723,6 +739,8 @@ class WasmGraphBuildingInterface {
// Merge the current env into the target handler's env.
SetEnv(block->try_info->catch_env);
if (depth == decoder->control_depth() - 1) {
+ // We just throw to the caller here, so no need to generate IfSuccess
+ // and IfFailure nodes.
builder_->Rethrow(block->try_info->exception);
TerminateThrow(decoder);
current_catch_ = block->previous_catch;
@@ -731,8 +749,9 @@ class WasmGraphBuildingInterface {
DCHECK(decoder->control_at(depth)->is_try());
TryInfo* target_try = decoder->control_at(depth)->try_info;
if (FLAG_wasm_loop_unrolling) {
- StackValueVector stack_values;
- BuildNestedLoopExits(decoder, depth, true, stack_values);
+ ValueVector stack_values;
+ BuildNestedLoopExits(decoder, depth, true, stack_values,
+ &block->try_info->exception);
}
Goto(decoder, target_try->catch_env);
@@ -771,65 +790,65 @@ class WasmGraphBuildingInterface {
const MemoryAccessImmediate<validate>& imm, Value* result) {
NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
- TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment,
- imm.offset, decoder->position());
+ TFNode* node = builder_->AtomicOp(opcode, inputs.begin(), imm.alignment,
+ imm.offset, decoder->position());
if (result) result->node = node;
}
- void AtomicFence(FullDecoder* decoder) { BUILD(AtomicFence); }
+ void AtomicFence(FullDecoder* decoder) { builder_->AtomicFence(); }
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- BUILD(MemoryInit, imm.data_segment_index, dst.node, src.node, size.node,
- decoder->position());
+ builder_->MemoryInit(imm.data_segment_index, dst.node, src.node, size.node,
+ decoder->position());
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
- BUILD(DataDrop, imm.index, decoder->position());
+ builder_->DataDrop(imm.index, decoder->position());
}
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- BUILD(MemoryCopy, dst.node, src.node, size.node, decoder->position());
+ builder_->MemoryCopy(dst.node, src.node, size.node, decoder->position());
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
- BUILD(MemoryFill, dst.node, value.node, size.node, decoder->position());
+ builder_->MemoryFill(dst.node, value.node, size.node, decoder->position());
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableInit, imm.table.index, imm.elem_segment_index, args[0].node,
- args[1].node, args[2].node, decoder->position());
+ builder_->TableInit(imm.table.index, imm.elem_segment_index, args[0].node,
+ args[1].node, args[2].node, decoder->position());
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
- BUILD(ElemDrop, imm.index, decoder->position());
+ builder_->ElemDrop(imm.index, decoder->position());
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableCopy, imm.table_dst.index, imm.table_src.index, args[0].node,
- args[1].node, args[2].node, decoder->position());
+ builder_->TableCopy(imm.table_dst.index, imm.table_src.index, args[0].node,
+ args[1].node, args[2].node, decoder->position());
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& value, const Value& delta, Value* result) {
- result->node = BUILD(TableGrow, imm.index, value.node, delta.node);
+ result->node = builder_->TableGrow(imm.index, value.node, delta.node);
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- result->node = BUILD(TableSize, imm.index);
+ result->node = builder_->TableSize(imm.index);
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
- BUILD(TableFill, imm.index, start.node, value.node, count.node);
+ builder_->TableFill(imm.index, start.node, value.node, count.node);
}
void StructNewWithRtt(FullDecoder* decoder,
@@ -840,8 +859,8 @@ class WasmGraphBuildingInterface {
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = args[i].node;
}
- result->node = BUILD(StructNewWithRtt, imm.index, imm.struct_type, rtt.node,
- VectorOf(arg_nodes));
+ result->node = builder_->StructNewWithRtt(imm.index, imm.struct_type,
+ rtt.node, VectorOf(arg_nodes));
}
void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
@@ -851,8 +870,8 @@ class WasmGraphBuildingInterface {
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = DefaultValue(imm.struct_type->field(i));
}
- result->node = BUILD(StructNewWithRtt, imm.index, imm.struct_type, rtt.node,
- VectorOf(arg_nodes));
+ result->node = builder_->StructNewWithRtt(imm.index, imm.struct_type,
+ rtt.node, VectorOf(arg_nodes));
}
void StructGet(FullDecoder* decoder, const Value& struct_object,
@@ -861,9 +880,9 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = struct_object.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- result->node =
- BUILD(StructGet, struct_object.node, field.struct_index.struct_type,
- field.index, null_check, is_signed, decoder->position());
+ result->node = builder_->StructGet(
+ struct_object.node, field.struct_index.struct_type, field.index,
+ null_check, is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
@@ -872,17 +891,18 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = struct_object.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- BUILD(StructSet, struct_object.node, field.struct_index.struct_type,
- field.index, field_value.node, null_check, decoder->position());
+ builder_->StructSet(struct_object.node, field.struct_index.struct_type,
+ field.index, field_value.node, null_check,
+ decoder->position());
}
void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& initial_value,
const Value& rtt, Value* result) {
- result->node =
- BUILD(ArrayNewWithRtt, imm.index, imm.array_type, length.node,
- initial_value.node, rtt.node, decoder->position());
+ result->node = builder_->ArrayNewWithRtt(imm.index, imm.array_type,
+ length.node, initial_value.node,
+ rtt.node, decoder->position());
}
void ArrayNewDefault(FullDecoder* decoder,
@@ -890,8 +910,8 @@ class WasmGraphBuildingInterface {
const Value& length, const Value& rtt, Value* result) {
TFNode* initial_value = DefaultValue(imm.array_type->element_type());
result->node =
- BUILD(ArrayNewWithRtt, imm.index, imm.array_type, length.node,
- initial_value, rtt.node, decoder->position());
+ builder_->ArrayNewWithRtt(imm.index, imm.array_type, length.node,
+ initial_value, rtt.node, decoder->position());
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
@@ -900,8 +920,9 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = array_obj.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- result->node = BUILD(ArrayGet, array_obj.node, imm.array_type, index.node,
- null_check, is_signed, decoder->position());
+ result->node =
+ builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
+ null_check, is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
@@ -910,8 +931,8 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = array_obj.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- BUILD(ArraySet, array_obj.node, imm.array_type, index.node, value.node,
- null_check, decoder->position());
+ builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
+ null_check, decoder->position());
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
@@ -919,28 +940,28 @@ class WasmGraphBuildingInterface {
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
result->node =
- BUILD(ArrayLen, array_obj.node, null_check, decoder->position());
+ builder_->ArrayLen(array_obj.node, null_check, decoder->position());
}
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
- result->node = BUILD(I31New, input.node);
+ result->node = builder_->I31New(input.node);
}
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
- result->node = BUILD(I31GetS, input.node);
+ result->node = builder_->I31GetS(input.node);
}
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
- result->node = BUILD(I31GetU, input.node);
+ result->node = builder_->I31GetU(input.node);
}
void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
- result->node = BUILD(RttCanon, type_index);
+ result->node = builder_->RttCanon(type_index);
}
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
Value* result) {
- result->node = BUILD(RttSub, type_index, parent.node);
+ result->node = builder_->RttSub(type_index, parent.node);
}
using StaticKnowledge = compiler::WasmGraphBuilder::ObjectReferenceKnowledge;
@@ -950,7 +971,7 @@ class WasmGraphBuildingInterface {
const WasmModule* module) {
StaticKnowledge result;
result.object_can_be_null = object_type.is_nullable();
- DCHECK(object_type.is_object_reference_type()); // Checked by validation.
+ DCHECK(object_type.is_object_reference()); // Checked by validation.
// In the bottom case, the result is irrelevant.
result.reference_kind =
rtt_type != kWasmBottom && module->has_signature(rtt_type.ref_index())
@@ -964,7 +985,7 @@ class WasmGraphBuildingInterface {
Value* result) {
StaticKnowledge config =
ComputeStaticKnowledge(object.type, rtt.type, decoder->module_);
- result->node = BUILD(RefTest, object.node, rtt.node, config);
+ result->node = builder_->RefTest(object.node, rtt.node, config);
}
void RefCast(FullDecoder* decoder, const Value& object, const Value& rtt,
@@ -972,10 +993,10 @@ class WasmGraphBuildingInterface {
StaticKnowledge config =
ComputeStaticKnowledge(object.type, rtt.type, decoder->module_);
result->node =
- BUILD(RefCast, object.node, rtt.node, config, decoder->position());
+ builder_->RefCast(object.node, rtt.node, config, decoder->position());
}
- template <TFNode* (compiler::WasmGraphBuilder::*branch_function)(
+ template <void (compiler::WasmGraphBuilder::*branch_function)(
TFNode*, TFNode*, StaticKnowledge, TFNode**, TFNode**, TFNode**,
TFNode**)>
void BrOnCastAbs(FullDecoder* decoder, const Value& object, const Value& rtt,
@@ -985,16 +1006,15 @@ class WasmGraphBuildingInterface {
SsaEnv* match_env = Split(decoder->zone(), ssa_env_);
SsaEnv* no_match_env = Steal(decoder->zone(), ssa_env_);
no_match_env->SetNotMerged();
- DCHECK(decoder->ok());
- CheckForException(
- decoder,
- (builder_->*branch_function)(
- object.node, rtt.node, config, &match_env->control,
- &match_env->effect, &no_match_env->control, &no_match_env->effect));
+ (builder_->*branch_function)(object.node, rtt.node, config,
+ &match_env->control, &match_env->effect,
+ &no_match_env->control, &no_match_env->effect);
builder_->SetControl(no_match_env->control);
SetEnv(match_env);
value_on_branch->node = object.node;
- BrOrRet(decoder, br_depth);
+ // Currently, br_on_* instructions modify the value stack before calling
+ // the interface function, so we don't need to drop any values here.
+ BrOrRet(decoder, br_depth, 0);
SetEnv(no_match_env);
}
@@ -1005,12 +1025,12 @@ class WasmGraphBuildingInterface {
}
void RefIsData(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefIsData, object.node, object.type.is_nullable());
+ result->node = builder_->RefIsData(object.node, object.type.is_nullable());
}
void RefAsData(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefAsData, object.node, object.type.is_nullable(),
- decoder->position());
+ result->node = builder_->RefAsData(object.node, object.type.is_nullable(),
+ decoder->position());
}
void BrOnData(FullDecoder* decoder, const Value& object,
@@ -1021,12 +1041,12 @@ class WasmGraphBuildingInterface {
}
void RefIsFunc(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefIsFunc, object.node, object.type.is_nullable());
+ result->node = builder_->RefIsFunc(object.node, object.type.is_nullable());
}
void RefAsFunc(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefAsFunc, object.node, object.type.is_nullable(),
- decoder->position());
+ result->node = builder_->RefAsFunc(object.node, object.type.is_nullable(),
+ decoder->position());
}
void BrOnFunc(FullDecoder* decoder, const Value& object,
@@ -1037,11 +1057,11 @@ class WasmGraphBuildingInterface {
}
void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefIsI31, object.node);
+ result->node = builder_->RefIsI31(object.node);
}
void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
- result->node = BUILD(RefAsI31, object.node, decoder->position());
+ result->node = builder_->RefAsI31(object.node, decoder->position());
}
void BrOnI31(FullDecoder* decoder, const Value& object,
@@ -1055,10 +1075,14 @@ class WasmGraphBuildingInterface {
to->node = from.node;
}
+ std::vector<compiler::WasmLoopInfo> loop_infos() { return loop_infos_; }
+
private:
SsaEnv* ssa_env_ = nullptr;
compiler::WasmGraphBuilder* builder_;
uint32_t current_catch_ = kNullCatch;
+ // Tracks loop data for loop unrolling.
+ std::vector<compiler::WasmLoopInfo> loop_infos_;
TFNode* effect() { return builder_->effect(); }
@@ -1119,13 +1143,17 @@ class WasmGraphBuildingInterface {
builder_->set_instance_cache(&env->instance_cache);
}
- TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
+ V8_INLINE TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
if (node == nullptr) return nullptr;
const bool inside_try_scope = current_catch_ != kNullCatch;
-
if (!inside_try_scope) return node;
+ return CheckForExceptionImpl(decoder, node);
+ }
+
+ V8_NOINLINE TFNode* CheckForExceptionImpl(FullDecoder* decoder,
+ TFNode* node) {
TFNode* if_success = nullptr;
TFNode* if_exception = nullptr;
if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
@@ -1141,9 +1169,9 @@ class WasmGraphBuildingInterface {
SetEnv(exception_env);
TryInfo* try_info = current_try_info(decoder);
if (FLAG_wasm_loop_unrolling) {
- StackValueVector values;
+ ValueVector values;
BuildNestedLoopExits(decoder, control_depth_of_current_catch(decoder),
- true, values);
+ true, values, &if_exception);
}
Goto(decoder, try_info->catch_env);
if (try_info->exception == nullptr) {
@@ -1179,7 +1207,7 @@ class WasmGraphBuildingInterface {
return builder_->RefNull();
case kRtt:
case kRttWithDepth:
- case kStmt:
+ case kVoid:
case kBottom:
case kRef:
UNREACHABLE();
@@ -1209,14 +1237,16 @@ class WasmGraphBuildingInterface {
}
}
- void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge,
+ uint32_t drop_values = 0) {
#ifdef DEBUG
- uint32_t avail =
- decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ uint32_t avail = decoder->stack_size() -
+ decoder->control_at(0)->stack_depth - drop_values;
DCHECK_GE(avail, merge->arity);
#endif
- Value* stack_values =
- merge->arity > 0 ? decoder->stack_value(merge->arity) : nullptr;
+ Value* stack_values = merge->arity > 0
+ ? decoder->stack_value(merge->arity + drop_values)
+ : nullptr;
MergeValuesInto(decoder, c, merge, stack_values);
}
@@ -1346,16 +1376,22 @@ class WasmGraphBuildingInterface {
}
switch (call_mode) {
case kCallIndirect:
- BUILD(CallIndirect, table_index, sig_index, VectorOf(arg_nodes),
- VectorOf(return_nodes), decoder->position());
+ CheckForException(decoder,
+ builder_->CallIndirect(
+ table_index, sig_index, VectorOf(arg_nodes),
+ VectorOf(return_nodes), decoder->position()));
break;
case kCallDirect:
- BUILD(CallDirect, sig_index, VectorOf(arg_nodes),
- VectorOf(return_nodes), decoder->position());
+ CheckForException(
+ decoder,
+ builder_->CallDirect(sig_index, VectorOf(arg_nodes),
+ VectorOf(return_nodes), decoder->position()));
break;
case kCallRef:
- BUILD(CallRef, sig_index, VectorOf(arg_nodes), VectorOf(return_nodes),
- null_check, decoder->position());
+ CheckForException(decoder,
+ builder_->CallRef(sig_index, VectorOf(arg_nodes),
+ VectorOf(return_nodes), null_check,
+ decoder->position()));
break;
}
for (size_t i = 0; i < return_count; ++i) {
@@ -1368,31 +1404,45 @@ class WasmGraphBuildingInterface {
void DoReturnCall(FullDecoder* decoder, CallMode call_mode,
uint32_t table_index, CheckForNull null_check,
- TFNode* index_node, const FunctionSig* sig,
+ Value index_or_caller_value, const FunctionSig* sig,
uint32_t sig_index, const Value args[]) {
size_t arg_count = sig->parameter_count();
- NodeVector arg_nodes(arg_count + 1);
- arg_nodes[0] = index_node;
- for (size_t i = 0; i < arg_count; ++i) {
- arg_nodes[i + 1] = args[i].node;
+
+ ValueVector arg_values(arg_count + 1);
+ arg_values[0] = index_or_caller_value;
+ for (uint32_t i = 0; i < arg_count; i++) {
+ arg_values[i + 1] = args[i];
+ }
+ if (FLAG_wasm_loop_unrolling) {
+ BuildNestedLoopExits(decoder, decoder->control_depth(), false,
+ arg_values);
}
+
+ NodeVector arg_nodes(arg_count + 1);
+ GetNodes(arg_nodes.data(), VectorOf(arg_values));
+
switch (call_mode) {
case kCallIndirect:
- BUILD(ReturnCallIndirect, table_index, sig_index, VectorOf(arg_nodes),
- decoder->position());
+ CheckForException(decoder,
+ builder_->ReturnCallIndirect(table_index, sig_index,
+ VectorOf(arg_nodes),
+ decoder->position()));
break;
case kCallDirect:
- BUILD(ReturnCall, sig_index, VectorOf(arg_nodes), decoder->position());
+ CheckForException(decoder,
+ builder_->ReturnCall(sig_index, VectorOf(arg_nodes),
+ decoder->position()));
break;
case kCallRef:
- BUILD(ReturnCallRef, sig_index, VectorOf(arg_nodes), null_check,
- decoder->position());
+ CheckForException(
+ decoder, builder_->ReturnCallRef(sig_index, VectorOf(arg_nodes),
+ null_check, decoder->position()));
break;
}
}
void BuildLoopExits(FullDecoder* decoder, Control* loop) {
- BUILD(LoopExit, loop->loop_node);
+ builder_->LoopExit(loop->loop_node);
ssa_env_->control = control();
ssa_env_->effect = effect();
}
@@ -1420,16 +1470,29 @@ class WasmGraphBuildingInterface {
}
void BuildNestedLoopExits(FullDecoder* decoder, uint32_t depth_limit,
- bool wrap_exit_values,
- StackValueVector& stack_values) {
+ bool wrap_exit_values, ValueVector& stack_values,
+ TFNode** exception_value = nullptr) {
DCHECK(FLAG_wasm_loop_unrolling);
+ Control* control = nullptr;
+ // We are only interested in exits from the innermost loop.
for (uint32_t i = 0; i < depth_limit; i++) {
- Control* control = decoder->control_at(i);
- if (!control->is_loop()) continue;
+ Control* c = decoder->control_at(i);
+ if (c->is_loop()) {
+ control = c;
+ break;
+ }
+ }
+ if (control != nullptr) {
BuildLoopExits(decoder, control);
for (Value& value : stack_values) {
- value.node = builder_->LoopExitValue(
- value.node, value.type.machine_representation());
+ if (value.node != nullptr) {
+ value.node = builder_->LoopExitValue(
+ value.node, value.type.machine_representation());
+ }
+ }
+ if (exception_value != nullptr) {
+ *exception_value = builder_->LoopExitValue(
+ *exception_value, MachineRepresentation::kWord32);
}
if (wrap_exit_values) {
WrapLocalsAtLoopExit(decoder, control);
@@ -1442,7 +1505,7 @@ class WasmGraphBuildingInterface {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
- StackValueVector stack_values;
+ ValueVector stack_values;
BuildNestedLoopExits(decoder, decoder->control_depth(), false,
stack_values);
builder_->TerminateThrow(effect(), control());
@@ -1459,6 +1522,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
const WasmFeatures& enabled, const WasmModule* module,
compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
+ std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
@@ -1470,11 +1534,12 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
+ if (FLAG_wasm_loop_unrolling) {
+ *loop_infos = decoder.interface().loop_infos();
+ }
return decoder.toResult(nullptr);
}
-#undef BUILD
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index e7101468274..ce125313e44 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_GRAPH_BUILDER_INTERFACE_H_
#define V8_WASM_GRAPH_BUILDER_INTERFACE_H_
@@ -15,6 +19,7 @@ namespace internal {
namespace compiler { // external declarations from compiler.
class NodeOriginTable;
class WasmGraphBuilder;
+struct WasmLoopInfo;
} // namespace compiler
namespace wasm {
@@ -27,6 +32,7 @@ V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
+ std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins);
} // namespace wasm
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index b14d66eafe4..3963de9824a 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
diff --git a/deps/v8/src/wasm/leb-helper.h b/deps/v8/src/wasm/leb-helper.h
index b598ee8578f..f3737600df4 100644
--- a/deps/v8/src/wasm/leb-helper.h
+++ b/deps/v8/src/wasm/leb-helper.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_LEB_HELPER_H_
#define V8_WASM_LEB_HELPER_H_
diff --git a/deps/v8/src/wasm/local-decl-encoder.h b/deps/v8/src/wasm/local-decl-encoder.h
index 5bce05871ce..987807cf292 100644
--- a/deps/v8/src/wasm/local-decl-encoder.h
+++ b/deps/v8/src/wasm/local-decl-encoder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_LOCAL_DECL_ENCODER_H_
#define V8_WASM_LOCAL_DECL_ENCODER_H_
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index f025f07ded1..450fc61d0d3 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_MEMORY_TRACING_H_
#define V8_WASM_MEMORY_TRACING_H_
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 00d7dab43ce..3b1d8750bac 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -1152,12 +1152,11 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
if (WasmCode::ShouldBeLogged(isolate)) {
DisallowGarbageCollection no_gc;
- Object source_url_obj = module_object->script().source_url();
- DCHECK(source_url_obj.IsString() || source_url_obj.IsUndefined());
- std::unique_ptr<char[]> source_url =
- source_url_obj.IsString() ? String::cast(source_url_obj).ToCString()
- : nullptr;
- code->LogCode(isolate, source_url.get(), module_object->script().id());
+ Object url_obj = module_object->script().name();
+ DCHECK(url_obj.IsString() || url_obj.IsUndefined());
+ std::unique_ptr<char[]> url =
+ url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
+ code->LogCode(isolate, url.get(), module_object->script().id());
}
counters->wasm_lazily_compiled_functions()->Increment();
@@ -3343,6 +3342,8 @@ void CompilationStateImpl::WaitForCompilationEvent(
return done_->load(std::memory_order_relaxed);
}
+ bool IsJoiningThread() const override { return true; }
+
void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); }
uint8_t GetTaskId() override { return kMainTaskId; }
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 9c2b7556cb3..c45ca2a03e4 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_MODULE_COMPILER_H_
#define V8_WASM_MODULE_COMPILER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 61d0f691d1a..f2c77efb230 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -647,7 +647,7 @@ class ModuleDecoderImpl : public Decoder {
// ===== Imported global =============================================
import->index = static_cast<uint32_t>(module_->globals.size());
module_->globals.push_back(
- {kWasmStmt, false, WasmInitExpr(), {0}, true, false});
+ {kWasmVoid, false, WasmInitExpr(), {0}, true, false});
WasmGlobal* global = &module_->globals.back();
global->type = consume_value_type();
global->mutability = consume_mutability();
@@ -728,6 +728,9 @@ class ModuleDecoderImpl : public Decoder {
"table elements", "elements", std::numeric_limits<uint32_t>::max(),
&table->initial_size, &table->has_maximum_size,
std::numeric_limits<uint32_t>::max(), &table->maximum_size, flags);
+ if (!table_type.is_defaultable()) {
+ table->initial_value = consume_init_expr(module_.get(), table_type, 0);
+ }
}
}
@@ -753,7 +756,7 @@ class ModuleDecoderImpl : public Decoder {
TRACE("DecodeGlobal[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
// Add an uninitialized global and pass a pointer to it.
module_->globals.push_back(
- {kWasmStmt, false, WasmInitExpr(), {0}, false, false});
+ {kWasmVoid, false, WasmInitExpr(), {0}, false, false});
WasmGlobal* global = &module_->globals.back();
global->type = consume_value_type();
global->mutability = consume_mutability();
@@ -1302,7 +1305,7 @@ class ModuleDecoderImpl : public Decoder {
}
WasmInitExpr DecodeInitExprForTesting() {
- return consume_init_expr(nullptr, kWasmStmt, 0);
+ return consume_init_expr(nullptr, kWasmVoid, 0);
}
const std::shared_ptr<WasmModule>& shared_module() const { return module_; }
@@ -1337,20 +1340,16 @@ class ModuleDecoderImpl : public Decoder {
kLastKnownModuleSection,
"not enough bits");
WasmError intermediate_error_;
- // Set of type offsets discovered in field types during type section decoding.
- // Since struct types may be recursive, this is used for checking and error
- // reporting once the whole type section is parsed.
- std::unordered_map<uint32_t, int> deferred_check_type_index_;
ModuleOrigin origin_;
ValueType TypeOf(const WasmInitExpr& expr) {
switch (expr.kind()) {
case WasmInitExpr::kNone:
- return kWasmStmt;
+ return kWasmVoid;
case WasmInitExpr::kGlobalGet:
return expr.immediate().index < module_->globals.size()
? module_->globals[expr.immediate().index].type
- : kWasmStmt;
+ : kWasmVoid;
case WasmInitExpr::kI32Const:
return kWasmI32;
case WasmInitExpr::kI64Const:
@@ -1379,7 +1378,7 @@ class ModuleDecoderImpl : public Decoder {
return ValueType::Rtt(expr.immediate().heap_type,
operand_type.depth() + 1);
} else {
- return kWasmStmt;
+ return kWasmVoid;
}
}
}
@@ -1436,7 +1435,7 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
if (global.mutability && global.imported) {
global.index = num_imported_mutable_globals++;
- } else if (global.type.is_reference_type()) {
+ } else if (global.type.is_reference()) {
global.offset = tagged_offset;
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
@@ -1713,7 +1712,7 @@ class ModuleDecoderImpl : public Decoder {
if (V8_UNLIKELY(!enabled_features_.has_reftypes() &&
!enabled_features_.has_eh())) {
errorf(pc(),
- "invalid opcode 0x%x in global initializer, enable with "
+ "invalid opcode 0x%x in initializer expression, enable with "
"--experimental-wasm-reftypes or --experimental-wasm-eh",
kExprRefNull);
return {};
@@ -1729,7 +1728,7 @@ class ModuleDecoderImpl : public Decoder {
case kExprRefFunc: {
if (V8_UNLIKELY(!enabled_features_.has_reftypes())) {
errorf(pc(),
- "invalid opcode 0x%x in global initializer, enable with "
+ "invalid opcode 0x%x in initializer expression, enable with "
"--experimental-wasm-reftypes",
kExprRefFunc);
return {};
@@ -1752,7 +1751,7 @@ class ModuleDecoderImpl : public Decoder {
// the type check or stack height check at the end.
opcode = read_prefixed_opcode<validate>(pc(), &len);
if (V8_UNLIKELY(opcode != kExprS128Const)) {
- errorf(pc(), "invalid SIMD opcode 0x%x in global initializer",
+ errorf(pc(), "invalid SIMD opcode 0x%x in initializer expression",
opcode);
return {};
}
@@ -1804,7 +1803,8 @@ class ModuleDecoderImpl : public Decoder {
break;
}
default: {
- errorf(pc(), "invalid opcode 0x%x in global initializer", opcode);
+ errorf(pc(), "invalid opcode 0x%x in initializer expression",
+ opcode);
return {};
}
}
@@ -1813,7 +1813,7 @@ class ModuleDecoderImpl : public Decoder {
case kExprEnd:
break;
default: {
- errorf(pc(), "invalid opcode 0x%x in global initializer", opcode);
+ errorf(pc(), "invalid opcode 0x%x in initializer expression", opcode);
return {};
}
}
@@ -1821,23 +1821,23 @@ class ModuleDecoderImpl : public Decoder {
}
if (V8_UNLIKELY(pc() > end())) {
- error(end(), "Global initializer extending beyond code end");
+ error(end(), "Initializer expression extending beyond code end");
return {};
}
if (V8_UNLIKELY(opcode != kExprEnd)) {
- error(pc(), "Global initializer is missing 'end'");
+ error(pc(), "Initializer expression is missing 'end'");
return {};
}
if (V8_UNLIKELY(stack.size() != 1)) {
errorf(pc(),
- "Found 'end' in global initalizer, but %s expressions were "
+ "Found 'end' in initializer expression, but %s expressions were "
"found on the stack",
stack.size() > 1 ? "more than one" : "no");
return {};
}
WasmInitExpr expr = std::move(stack.back());
- if (expected != kWasmStmt && !IsSubtypeOf(TypeOf(expr), expected, module)) {
+ if (expected != kWasmVoid && !IsSubtypeOf(TypeOf(expr), expected, module)) {
errorf(pc(), "type error in init expression, expected %s, got %s",
expected.name().c_str(), TypeOf(expr).name().c_str());
}
@@ -1892,7 +1892,7 @@ class ModuleDecoderImpl : public Decoder {
} else {
const byte* position = pc();
ValueType result = consume_value_type();
- if (!result.is_reference_type()) {
+ if (!result.is_reference()) {
error(position, "expected reference type");
}
return result;
@@ -2416,11 +2416,44 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
-LocalNames DecodeLocalNames(Vector<const uint8_t> module_bytes) {
+NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
+ uint8_t name_section_kind) {
+ Decoder decoder(module_bytes);
+ if (!FindNameSection(&decoder)) return NameMap{{}};
+
+ std::vector<NameAssoc> names;
+ while (decoder.ok() && decoder.more()) {
+ uint8_t name_type = decoder.consume_u8("name type");
+ if (name_type & 0x80) break; // no varuint7
+
+ uint32_t name_payload_len = decoder.consume_u32v("name payload length");
+ if (!decoder.checkAvailable(name_payload_len)) break;
+
+ if (name_type != name_section_kind) {
+ decoder.consume_bytes(name_payload_len, "name subsection payload");
+ continue;
+ }
+
+ uint32_t count = decoder.consume_u32v("names count");
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index = decoder.consume_u32v("index");
+ WireBytesRef name = consume_string(&decoder, false, "name");
+ if (!decoder.ok()) break;
+ if (index > kMaxInt) continue;
+ if (!validate_utf8(&decoder, name)) continue;
+ names.emplace_back(static_cast<int>(index), name);
+ }
+ }
+ std::stable_sort(names.begin(), names.end(), NameAssoc::IndexLess{});
+ return NameMap{std::move(names)};
+}
+
+IndirectNameMap DecodeIndirectNameMap(Vector<const uint8_t> module_bytes,
+ uint8_t name_section_kind) {
Decoder decoder(module_bytes);
- if (!FindNameSection(&decoder)) return LocalNames{{}};
+ if (!FindNameSection(&decoder)) return IndirectNameMap{{}};
- std::vector<LocalNamesPerFunction> functions;
+ std::vector<IndirectNameMapEntry> entries;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
if (name_type & 0x80) break; // no varuint7
@@ -2428,35 +2461,35 @@ LocalNames DecodeLocalNames(Vector<const uint8_t> module_bytes) {
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionKindCode::kLocal) {
+ if (name_type != name_section_kind) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
- uint32_t local_names_count = decoder.consume_u32v("local names count");
- for (uint32_t i = 0; i < local_names_count; ++i) {
- uint32_t func_index = decoder.consume_u32v("function index");
- if (func_index > kMaxInt) continue;
- std::vector<LocalName> names;
- uint32_t num_names = decoder.consume_u32v("namings count");
- for (uint32_t k = 0; k < num_names; ++k) {
- uint32_t local_index = decoder.consume_u32v("local index");
- WireBytesRef name = consume_string(&decoder, false, "local name");
+ uint32_t outer_count = decoder.consume_u32v("outer count");
+ for (uint32_t i = 0; i < outer_count; ++i) {
+ uint32_t outer_index = decoder.consume_u32v("outer index");
+ if (outer_index > kMaxInt) continue;
+ std::vector<NameAssoc> names;
+ uint32_t inner_count = decoder.consume_u32v("inner count");
+ for (uint32_t k = 0; k < inner_count; ++k) {
+ uint32_t inner_index = decoder.consume_u32v("inner index");
+ WireBytesRef name = consume_string(&decoder, false, "name");
if (!decoder.ok()) break;
- if (local_index > kMaxInt) continue;
+ if (inner_index > kMaxInt) continue;
// Ignore non-utf8 names.
if (!validate_utf8(&decoder, name)) continue;
- names.emplace_back(static_cast<int>(local_index), name);
+ names.emplace_back(static_cast<int>(inner_index), name);
}
// Use stable sort to get deterministic names (the first one declared)
// even in the presence of duplicates.
- std::stable_sort(names.begin(), names.end(), LocalName::IndexLess{});
- functions.emplace_back(static_cast<int>(func_index), std::move(names));
+ std::stable_sort(names.begin(), names.end(), NameAssoc::IndexLess{});
+ entries.emplace_back(static_cast<int>(outer_index), std::move(names));
}
}
- std::stable_sort(functions.begin(), functions.end(),
- LocalNamesPerFunction::FunctionIndexLess{});
- return LocalNames{std::move(functions)};
+ std::stable_sort(entries.begin(), entries.end(),
+ IndirectNameMapEntry::IndexLess{});
+ return IndirectNameMap{std::move(entries)};
}
#undef TRACE
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 767f4fd088e..2d33f51f319 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_MODULE_DECODER_H_
#define V8_WASM_MODULE_DECODER_H_
@@ -50,15 +54,18 @@ struct AsmJsOffsets {
};
using AsmJsOffsetsResult = Result<AsmJsOffsets>;
-class LocalName {
+// The class names "NameAssoc", "NameMap", and "IndirectNameMap" match
+// the terms used by the spec:
+// https://webassembly.github.io/spec/core/bikeshed/index.html#name-section%E2%91%A0
+class NameAssoc {
public:
- LocalName(int index, WireBytesRef name) : index_(index), name_(name) {}
+ NameAssoc(int index, WireBytesRef name) : index_(index), name_(name) {}
int index() const { return index_; }
WireBytesRef name() const { return name_; }
struct IndexLess {
- bool operator()(const LocalName& a, const LocalName& b) const {
+ bool operator()(const NameAssoc& a, const NameAssoc& b) const {
return a.index() < b.index();
}
};
@@ -68,62 +75,71 @@ class LocalName {
WireBytesRef name_;
};
-class LocalNamesPerFunction {
+class NameMap {
public:
- // For performance reasons, {LocalNamesPerFunction} should not be copied.
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LocalNamesPerFunction);
+ // For performance reasons, {NameMap} should not be copied.
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(NameMap);
- LocalNamesPerFunction(int function_index, std::vector<LocalName> names)
- : function_index_(function_index), names_(std::move(names)) {
+ explicit NameMap(std::vector<NameAssoc> names) : names_(std::move(names)) {
DCHECK(
- std::is_sorted(names_.begin(), names_.end(), LocalName::IndexLess{}));
+ std::is_sorted(names_.begin(), names_.end(), NameAssoc::IndexLess{}));
}
- int function_index() const { return function_index_; }
-
- WireBytesRef GetName(int local_index) {
- auto it =
- std::lower_bound(names_.begin(), names_.end(),
- LocalName{local_index, {}}, LocalName::IndexLess{});
+ WireBytesRef GetName(int index) {
+ auto it = std::lower_bound(names_.begin(), names_.end(),
+ NameAssoc{index, {}}, NameAssoc::IndexLess{});
if (it == names_.end()) return {};
- if (it->index() != local_index) return {};
+ if (it->index() != index) return {};
return it->name();
}
- struct FunctionIndexLess {
- bool operator()(const LocalNamesPerFunction& a,
- const LocalNamesPerFunction& b) const {
- return a.function_index() < b.function_index();
+ private:
+ std::vector<NameAssoc> names_;
+};
+
+class IndirectNameMapEntry : public NameMap {
+ public:
+ // For performance reasons, {IndirectNameMapEntry} should not be copied.
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(IndirectNameMapEntry);
+
+ IndirectNameMapEntry(int index, std::vector<NameAssoc> names)
+ : NameMap(std::move(names)), index_(index) {}
+
+ int index() const { return index_; }
+
+ struct IndexLess {
+ bool operator()(const IndirectNameMapEntry& a,
+ const IndirectNameMapEntry& b) const {
+ return a.index() < b.index();
}
};
private:
- int function_index_;
- std::vector<LocalName> names_;
+ int index_;
};
-class LocalNames {
+class IndirectNameMap {
public:
- // For performance reasons, {LocalNames} should not be copied.
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LocalNames);
+ // For performance reasons, {IndirectNameMap} should not be copied.
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(IndirectNameMap);
- explicit LocalNames(std::vector<LocalNamesPerFunction> functions)
+ explicit IndirectNameMap(std::vector<IndirectNameMapEntry> functions)
: functions_(std::move(functions)) {
DCHECK(std::is_sorted(functions_.begin(), functions_.end(),
- LocalNamesPerFunction::FunctionIndexLess{}));
+ IndirectNameMapEntry::IndexLess{}));
}
WireBytesRef GetName(int function_index, int local_index) {
auto it = std::lower_bound(functions_.begin(), functions_.end(),
- LocalNamesPerFunction{function_index, {}},
- LocalNamesPerFunction::FunctionIndexLess{});
+ IndirectNameMapEntry{function_index, {}},
+ IndirectNameMapEntry::IndexLess{});
if (it == functions_.end()) return {};
- if (it->function_index() != function_index) return {};
+ if (it->index() != function_index) return {};
return it->GetName(local_index);
}
private:
- std::vector<LocalNamesPerFunction> functions_;
+ std::vector<IndirectNameMapEntry> functions_;
};
enum class DecodingMethod {
@@ -179,11 +195,14 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
std::unordered_map<uint32_t, WireBytesRef>* names,
const Vector<const WasmExport> export_table);
-// Decode the local names assignment from the name section.
+// Decode the requested subsection of the name section.
// The result will be empty if no name section is present. On encountering an
// error in the name section, returns all information decoded up to the first
// error.
-LocalNames DecodeLocalNames(Vector<const uint8_t> module_bytes);
+NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
+ uint8_t name_section_kind);
+IndirectNameMap DecodeIndirectNameMap(Vector<const uint8_t> module_bytes,
+ uint8_t name_section_kind);
class ModuleDecoderImpl;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 5e7637de540..f64a657eb8e 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -923,7 +923,7 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
- case kStmt:
+ case kVoid:
case kS128:
case kBottom:
case kI8:
@@ -1268,7 +1268,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer;
Address address_or_offset;
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into "
"the imported_mutable_globals array");
@@ -1347,7 +1347,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
const char* error_message;
if (!wasm::TypecheckJSObject(isolate_, module_, value, global.type,
&error_message)) {
@@ -1605,7 +1605,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
uint32_t old_offset =
module_->globals[global.init.immediate().index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
DCHECK(enabled_.has_reftypes());
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
} else {
@@ -1686,18 +1686,12 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<JSObject> exports_object;
MaybeHandle<String> single_function_name;
bool is_asm_js = is_asmjs_module(module_);
- // TODO(clemensb): Remove this #if once this compilation unit is fully
- // excluded from non-wasm builds.
if (is_asm_js) {
-#if V8_ENABLE_WEBASSEMBLY
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
single_function_name =
isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-#else
- UNREACHABLE();
-#endif
} else {
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
}
@@ -1758,7 +1752,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (global.mutability && global.imported) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
tagged_buffer = handle(
FixedArray::cast(buffers_array->get(global.index)), isolate_);
// For externref globals we store the relative offset in the
@@ -1782,7 +1776,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
offset = static_cast<uint32_t>(global_addr - backing_store);
}
} else {
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
} else {
untagged_buffer =
@@ -1841,12 +1835,50 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
void InstanceBuilder::InitializeIndirectFunctionTables(
Handle<WasmInstanceObject> instance) {
- for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
- const WasmTable& table = module_->tables[i];
+ for (int table_index = 0;
+ table_index < static_cast<int>(module_->tables.size()); ++table_index) {
+ const WasmTable& table = module_->tables[table_index];
if (IsSubtypeOf(table.type, kWasmFuncRef, module_)) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, i, table.initial_size);
+ instance, table_index, table.initial_size);
+ }
+
+ if (!table.type.is_defaultable()) {
+ // Function constant is currently the only viable initializer.
+ DCHECK(table.initial_value.kind() == WasmInitExpr::kRefFuncConst);
+ uint32_t func_index = table.initial_value.immediate().index;
+
+ uint32_t sig_id =
+ module_->canonicalized_type_ids[module_->functions[func_index]
+ .sig_index];
+ MaybeHandle<WasmExternalFunction> wasm_external_function =
+ WasmInstanceObject::GetWasmExternalFunction(isolate_, instance,
+ func_index);
+ auto table_object = handle(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate_);
+ for (uint32_t entry_index = 0; entry_index < table.initial_size;
+ entry_index++) {
+ // Update the local dispatch table first.
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
+ .Set(sig_id, instance, func_index);
+
+ // Update the table object's other dispatch tables.
+ if (wasm_external_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create a {Tuple2}
+ // holding the information to lazily allocate one.
+ WasmTableObject::SetFunctionTablePlaceholder(
+ isolate_, table_object, entry_index, instance, func_index);
+ } else {
+ table_object->entries().set(
+ entry_index, *wasm_external_function.ToHandleChecked());
+ }
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_object, entry_index,
+ module_->functions[func_index].sig, instance, func_index);
+ }
}
}
}
diff --git a/deps/v8/src/wasm/module-instantiate.h b/deps/v8/src/wasm/module-instantiate.h
index ea245a646e1..baa064f20d7 100644
--- a/deps/v8/src/wasm/module-instantiate.h
+++ b/deps/v8/src/wasm/module-instantiate.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_MODULE_INSTANTIATE_H_
#define V8_WASM_MODULE_INSTANTIATE_H_
diff --git a/deps/v8/src/wasm/object-access.h b/deps/v8/src/wasm/object-access.h
index 9fea179ecf0..ac30f1f1d4a 100644
--- a/deps/v8/src/wasm/object-access.h
+++ b/deps/v8/src/wasm/object-access.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_OBJECT_ACCESS_H_
#define V8_WASM_OBJECT_ACCESS_H_
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index d947dcd26d0..31a536cf1b1 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_SIGNATURE_MAP_H_
#define V8_WASM_SIGNATURE_MAP_H_
diff --git a/deps/v8/src/wasm/simd-shuffle.cc b/deps/v8/src/wasm/simd-shuffle.cc
index 1d3a2775b87..dac73a90c4f 100644
--- a/deps/v8/src/wasm/simd-shuffle.cc
+++ b/deps/v8/src/wasm/simd-shuffle.cc
@@ -4,6 +4,8 @@
#include "src/wasm/simd-shuffle.h"
+#include <algorithm>
+
#include "src/common/globals.h"
namespace v8 {
@@ -161,6 +163,12 @@ void SimdShuffle::Pack16Lanes(uint32_t* dst, const uint8_t* shuffle) {
}
}
+bool SimdSwizzle::AllInRangeOrTopBitSet(
+ std::array<uint8_t, kSimd128Size> shuffle) {
+ return std::all_of(shuffle.begin(), shuffle.end(),
+ [](auto i) { return (i < kSimd128Size) || (i & 0x80); });
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/simd-shuffle.h b/deps/v8/src/wasm/simd-shuffle.h
index 48630fbb801..15728fad125 100644
--- a/deps/v8/src/wasm/simd-shuffle.h
+++ b/deps/v8/src/wasm/simd-shuffle.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_SIMD_SHUFFLE_H_
#define V8_WASM_SIMD_SHUFFLE_H_
@@ -91,6 +95,14 @@ class V8_EXPORT_PRIVATE SimdShuffle {
// Packs 16 bytes of shuffle into an array of 4 uint32_t.
static void Pack16Lanes(uint32_t* dst, const uint8_t* shuffle);
};
+
+class V8_EXPORT_PRIVATE SimdSwizzle {
+ public:
+ // Checks if all the immediates are in range (< kSimd128Size), and if they are
+ // not, the top bit is set.
+ static bool AllInRangeOrTopBitSet(std::array<uint8_t, kSimd128Size> shuffle);
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 4f46a63455a..0dfbf1bf78d 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_STREAMING_DECODER_H_
#define V8_WASM_STREAMING_DECODER_H_
diff --git a/deps/v8/src/wasm/struct-types.h b/deps/v8/src/wasm/struct-types.h
index 33af7de6bd6..1bc0a1666fd 100644
--- a/deps/v8/src/wasm/struct-types.h
+++ b/deps/v8/src/wasm/struct-types.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_STRUCT_TYPES_H_
#define V8_WASM_STRUCT_TYPES_H_
@@ -70,6 +74,7 @@ class StructType : public ZoneObject {
uint32_t offset = field(0).element_size_bytes();
for (uint32_t i = 1; i < field_count(); i++) {
uint32_t field_size = field(i).element_size_bytes();
+ // TODO(jkummerow): Don't round up to more than kTaggedSize-alignment.
offset = RoundUp(offset, field_size);
field_offsets_[i - 1] = offset;
offset += field_size;
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 2dbd337b0aa..983e2090b64 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_VALUE_TYPE_H_
#define V8_WASM_VALUE_TYPE_H_
@@ -9,6 +13,7 @@
#include "src/base/optional.h"
#include "src/codegen/machine-type.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -37,7 +42,7 @@ class Simd128;
V(I16, 1, I16, Int16, 'h', "i16")
#define FOREACH_VALUE_TYPE(V) \
- V(Stmt, -1, Void, None, 'v', "<stmt>") \
+ V(Void, -1, Void, None, 'v', "<void>") \
FOREACH_NUMERIC_VALUE_TYPE(V) \
V(Rtt, kTaggedSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
V(RttWithDepth, kTaggedSizeLog2, RttWithDepth, TaggedPointer, 'k', "rtt") \
@@ -179,12 +184,12 @@ enum ValueKind : uint8_t {
#undef DEF_ENUM
};
-constexpr bool is_reference_type(ValueKind kind) {
+constexpr bool is_reference(ValueKind kind) {
return kind == kRef || kind == kOptRef || kind == kRtt ||
kind == kRttWithDepth;
}
-constexpr bool is_object_reference_type(ValueKind kind) {
+constexpr bool is_object_reference(ValueKind kind) {
return kind == kRef || kind == kOptRef;
}
@@ -257,7 +262,7 @@ constexpr bool is_rtt(ValueKind kind) {
}
constexpr bool is_defaultable(ValueKind kind) {
- CONSTEXPR_DCHECK(kind != kBottom && kind != kStmt);
+ CONSTEXPR_DCHECK(kind != kBottom && kind != kVoid);
return kind != kRef && !is_rtt(kind);
}
@@ -270,7 +275,7 @@ constexpr bool is_defaultable(ValueKind kind) {
class ValueType {
public:
/******************************* Constructors *******************************/
- constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
+ constexpr ValueType() : bit_field_(KindField::encode(kVoid)) {}
static constexpr ValueType Primitive(ValueKind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
return ValueType(KindField::encode(kind));
@@ -305,12 +310,10 @@ class ValueType {
}
/******************************** Type checks *******************************/
- constexpr bool is_reference_type() const {
- return wasm::is_reference_type(kind());
- }
+ constexpr bool is_reference() const { return wasm::is_reference(kind()); }
- constexpr bool is_object_reference_type() const {
- return wasm::is_object_reference_type(kind());
+ constexpr bool is_object_reference() const {
+ return wasm::is_object_reference(kind());
}
constexpr bool is_nullable() const { return kind() == kOptRef; }
@@ -324,7 +327,7 @@ class ValueType {
constexpr bool has_depth() const { return kind() == kRttWithDepth; }
constexpr bool has_index() const {
- return is_rtt() || (is_object_reference_type() && heap_type().is_index());
+ return is_rtt() || (is_object_reference() && heap_type().is_index());
}
constexpr bool is_defaultable() const { return wasm::is_defaultable(kind()); }
@@ -340,12 +343,12 @@ class ValueType {
/***************************** Field Accessors ******************************/
constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
- CONSTEXPR_DCHECK(is_object_reference_type());
+ CONSTEXPR_DCHECK(is_object_reference());
return static_cast<HeapType::Representation>(
HeapTypeField::decode(bit_field_));
}
constexpr HeapType heap_type() const {
- CONSTEXPR_DCHECK(is_object_reference_type());
+ CONSTEXPR_DCHECK(is_object_reference());
return HeapType(heap_representation());
}
constexpr uint8_t depth() const {
@@ -357,7 +360,7 @@ class ValueType {
return HeapTypeField::decode(bit_field_);
}
constexpr Nullability nullability() const {
- CONSTEXPR_DCHECK(is_object_reference_type());
+ CONSTEXPR_DCHECK(is_object_reference());
return kind() == kOptRef ? kNullable : kNonNullable;
}
@@ -447,7 +450,7 @@ class ValueType {
default:
return kRefCode;
}
- case kStmt:
+ case kVoid:
return kVoidCode;
case kRtt:
return kRttCode;
@@ -532,6 +535,8 @@ class ValueType {
static_assert(sizeof(ValueType) <= kUInt32Size,
"ValueType is small and can be passed by value");
+static_assert(ValueType::kLastUsedBit < 8 * sizeof(ValueType) - kSmiTagSize,
+ "ValueType has space to be encoded in a Smi");
inline size_t hash_value(ValueType type) {
return static_cast<size_t>(type.kind());
@@ -550,7 +555,7 @@ constexpr ValueType kWasmF64 = ValueType::Primitive(kF64);
constexpr ValueType kWasmS128 = ValueType::Primitive(kS128);
constexpr ValueType kWasmI8 = ValueType::Primitive(kI8);
constexpr ValueType kWasmI16 = ValueType::Primitive(kI16);
-constexpr ValueType kWasmStmt = ValueType::Primitive(kStmt);
+constexpr ValueType kWasmVoid = ValueType::Primitive(kVoid);
constexpr ValueType kWasmBottom = ValueType::Primitive(kBottom);
// Established reference-type proposal shorthands.
constexpr ValueType kWasmFuncRef = ValueType::Ref(HeapType::kFunc, kNullable);
@@ -562,6 +567,10 @@ constexpr ValueType kWasmDataRef =
ValueType::Ref(HeapType::kData, kNonNullable);
constexpr ValueType kWasmAnyRef = ValueType::Ref(HeapType::kAny, kNullable);
+// This is used in wasm.tq.
+constexpr ValueType kWasmExternNonNullableRef =
+ ValueType::Ref(HeapType::kExtern, kNonNullable);
+
#define FOREACH_WASMVALUE_CTYPES(V) \
V(kI32, int32_t) \
V(kI64, int64_t) \
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
index e6f212b2d90..4e59a232641 100644
--- a/deps/v8/src/wasm/wasm-arguments.h
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_ARGUMENTS_H_
#define V8_WASM_WASM_ARGUMENTS_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 86726dcaf0e..d9225103bbd 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -28,6 +28,7 @@
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module-sourcemap.h"
#include "src/wasm/wasm-module.h"
@@ -267,9 +268,13 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
"wasm-function[%d]", index()));
name = VectorOf(name_buffer);
}
+ // TODO(clemensb): Remove this #if once this compilation unit is excluded in
+ // no-wasm builds.
+#if V8_ENABLE_WEBASSEMBLY
int code_offset = module->functions[index_].code.offset();
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
source_url, code_offset, script_id));
+#endif // V8_ENABLE_WEBASSEMBLY
if (!source_positions().empty()) {
LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
@@ -416,8 +421,13 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (entry.trampoline_pc() != SafepointEntry::kNoTrampolinePC) {
os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
}
- if (entry.has_deoptimization_index()) {
- os << " deopt: " << std::setw(6) << entry.deoptimization_index();
+ if (entry.has_register_bits()) {
+ os << " registers: ";
+ uint32_t register_bits = entry.register_bits();
+ int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
+ for (int i = bits - 1; i >= 0; --i) {
+ os << ((register_bits >> i) & 1);
+ }
}
os << "\n";
}
@@ -862,11 +872,10 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
module_->num_declared_functions);
- Object source_url_obj = script.source_url();
- DCHECK(source_url_obj.IsString() || source_url_obj.IsUndefined());
+ Object url_obj = script.name();
+ DCHECK(url_obj.IsString() || url_obj.IsUndefined());
std::unique_ptr<char[]> source_url =
- source_url_obj.IsString() ? String::cast(source_url_obj).ToCString()
- : nullptr;
+ url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
@@ -892,7 +901,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
reloc_info = OwnedVector<byte>::Of(
Vector<byte>{code->relocation_start(), relocation_size});
}
- Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
+ Handle<ByteArray> source_pos_table(code->source_position_table(),
code->GetIsolate());
OwnedVector<byte> source_pos =
OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
@@ -1127,68 +1136,98 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
}
}
-WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
+WasmCode* NativeModule::PublishCodeLocked(
+ std::unique_ptr<WasmCode> owned_code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
+ WasmCode* code = owned_code.get();
+ new_owned_code_.emplace_back(std::move(owned_code));
+
// Add the code to the surrounding code ref scope, so the returned pointer is
// guaranteed to be valid.
- WasmCodeRefScope::AddRef(code.get());
-
- if (!code->IsAnonymous() &&
- code->index() >= module_->num_imported_functions) {
- DCHECK_LT(code->index(), num_functions());
-
- code->RegisterTrapHandlerData();
-
- // Assume an order of execution tiers that represents the quality of their
- // generated code.
- static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
- ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
- "Assume an order on execution tiers");
-
- uint32_t slot_idx = declared_function_index(module(), code->index());
- WasmCode* prior_code = code_table_[slot_idx];
- // If we are tiered down, install all debugging code (except for stepping
- // code, which is only used for a single frame and never installed in the
- // code table of jump table). Otherwise, install code if it was compiled
- // with a higher tier.
- static_assert(
- kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
- "for_debugging is ordered");
- const bool update_code_table =
- // Never install stepping code.
- code->for_debugging() != kForStepping &&
- (!prior_code ||
- (tiering_state_ == kTieredDown
- // Tiered down: Install breakpoints over normal debug code.
- ? prior_code->for_debugging() <= code->for_debugging()
- // Tiered up: Install if the tier is higher than before.
- : prior_code->tier() < code->tier()));
- if (update_code_table) {
- code_table_[slot_idx] = code.get();
- if (prior_code) {
- WasmCodeRefScope::AddRef(prior_code);
- // The code is added to the current {WasmCodeRefScope}, hence the ref
- // count cannot drop to zero here.
- prior_code->DecRefOnLiveCode();
- }
-
- PatchJumpTablesLocked(slot_idx, code->instruction_start());
- } else {
- // The code tables does not hold a reference to the code, hence decrement
- // the initial ref count of 1. The code was added to the
- // {WasmCodeRefScope} though, so it cannot die here.
- code->DecRefOnLiveCode();
- }
- if (!code->for_debugging() && tiering_state_ == kTieredDown &&
- code->tier() == ExecutionTier::kTurbofan) {
- liftoff_bailout_count_.fetch_add(1);
+ WasmCodeRefScope::AddRef(code);
+
+ if (code->IsAnonymous() || code->index() < module_->num_imported_functions) {
+ return code;
+ }
+
+ DCHECK_LT(code->index(), num_functions());
+
+ code->RegisterTrapHandlerData();
+
+ // Put the code in the debugging cache, if needed.
+ if (V8_UNLIKELY(cached_code_)) InsertToCodeCache(code);
+
+ // Assume an order of execution tiers that represents the quality of their
+ // generated code.
+ static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+
+ uint32_t slot_idx = declared_function_index(module(), code->index());
+ WasmCode* prior_code = code_table_[slot_idx];
+ // If we are tiered down, install all debugging code (except for stepping
+ // code, which is only used for a single frame and never installed in the
+ // code table of jump table). Otherwise, install code if it was compiled
+ // with a higher tier.
+ static_assert(
+ kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
+ "for_debugging is ordered");
+ const bool update_code_table =
+ // Never install stepping code.
+ code->for_debugging() != kForStepping &&
+ (!prior_code ||
+ (tiering_state_ == kTieredDown
+ // Tiered down: Install breakpoints over normal debug code.
+ ? prior_code->for_debugging() <= code->for_debugging()
+ // Tiered up: Install if the tier is higher than before.
+ : prior_code->tier() < code->tier()));
+ if (update_code_table) {
+ code_table_[slot_idx] = code;
+ if (prior_code) {
+ WasmCodeRefScope::AddRef(prior_code);
+ // The code is added to the current {WasmCodeRefScope}, hence the ref
+ // count cannot drop to zero here.
+ prior_code->DecRefOnLiveCode();
}
+
+ PatchJumpTablesLocked(slot_idx, code->instruction_start());
+ } else {
+ // The code tables does not hold a reference to the code, hence decrement
+ // the initial ref count of 1. The code was added to the
+ // {WasmCodeRefScope} though, so it cannot die here.
+ code->DecRefOnLiveCode();
}
- WasmCode* result = code.get();
- new_owned_code_.emplace_back(std::move(code));
- return result;
+ if (!code->for_debugging() && tiering_state_ == kTieredDown &&
+ code->tier() == ExecutionTier::kTurbofan) {
+ liftoff_bailout_count_.fetch_add(1);
+ }
+
+ return code;
+}
+
+void NativeModule::ReinstallDebugCode(WasmCode* code) {
+ base::MutexGuard lock(&allocation_mutex_);
+
+ DCHECK_EQ(this, code->native_module());
+ DCHECK_EQ(kWithBreakpoints, code->for_debugging());
+ DCHECK(!code->IsAnonymous());
+ DCHECK_LE(module_->num_imported_functions, code->index());
+ DCHECK_LT(code->index(), num_functions());
+ DCHECK_EQ(kTieredDown, tiering_state_);
+
+ uint32_t slot_idx = declared_function_index(module(), code->index());
+ if (WasmCode* prior_code = code_table_[slot_idx]) {
+ WasmCodeRefScope::AddRef(prior_code);
+ // The code is added to the current {WasmCodeRefScope}, hence the ref
+ // count cannot drop to zero here.
+ prior_code->DecRefOnLiveCode();
+ }
+ code_table_[slot_idx] = code;
+ code->IncRef();
+
+ PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
@@ -1484,6 +1523,23 @@ void NativeModule::TransferNewOwnedCodeLocked() const {
new_owned_code_.clear();
}
+void NativeModule::InsertToCodeCache(WasmCode* code) {
+ // The caller holds {allocation_mutex_}.
+ DCHECK(!allocation_mutex_.TryLock());
+ DCHECK_NOT_NULL(cached_code_);
+ if (code->IsAnonymous()) return;
+ // Only cache Liftoff debugging code or TurboFan code (no breakpoints or
+ // stepping).
+ if (code->tier() == ExecutionTier::kLiftoff &&
+ code->for_debugging() != kForDebugging) {
+ return;
+ }
+ auto key = std::make_pair(code->tier(), code->index());
+ if (cached_code_->insert(std::make_pair(key, code)).second) {
+ code->IncRef();
+ }
+}
+
WasmCode* NativeModule::Lookup(Address pc) const {
base::MutexGuard lock(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
@@ -2000,23 +2056,55 @@ void NativeModule::RecompileForTiering() {
{
base::MutexGuard lock(&allocation_mutex_);
current_state = tiering_state_;
+
+ // Initialize {cached_code_} to signal that this cache should get filled
+ // from now on.
+ if (!cached_code_) {
+ cached_code_ = std::make_unique<
+ std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
+ // Fill with existing code.
+ for (auto& code_entry : owned_code_) {
+ InsertToCodeCache(code_entry.second.get());
+ }
+ }
}
RecompileNativeModule(this, current_state);
}
std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
+ WasmCodeRefScope code_ref_scope;
base::MutexGuard guard(&allocation_mutex_);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
+ const bool tier_down = new_tiering_state == kTieredDown;
for (int slot_index = 0; slot_index < declared; ++slot_index) {
int function_index = imported + slot_index;
- WasmCode* code = code_table_[slot_index];
- bool code_is_good = new_tiering_state == kTieredDown
- ? code && code->for_debugging()
- : code && code->tier() == ExecutionTier::kTurbofan;
- if (!code_is_good) function_indexes.push_back(function_index);
+ WasmCode* old_code = code_table_[slot_index];
+ bool code_is_good =
+ tier_down ? old_code && old_code->for_debugging()
+ : old_code && old_code->tier() == ExecutionTier::kTurbofan;
+ if (code_is_good) continue;
+ DCHECK_NOT_NULL(cached_code_);
+ auto cache_it = cached_code_->find(std::make_pair(
+ tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
+ function_index));
+ if (cache_it != cached_code_->end()) {
+ WasmCode* cached_code = cache_it->second;
+ if (old_code) {
+ WasmCodeRefScope::AddRef(old_code);
+ // The code is added to the current {WasmCodeRefScope}, hence the ref
+ // count cannot drop to zero here.
+ old_code->DecRefOnLiveCode();
+ }
+ code_table_[slot_index] = cached_code;
+ PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
+ cached_code->IncRef();
+ continue;
+ }
+ // Otherwise add the function to the set of functions to recompile.
+ function_indexes.push_back(function_index);
}
return function_indexes;
}
@@ -2151,6 +2239,19 @@ void WasmCodeRefScope::AddRef(WasmCode* code) {
code->IncRef();
}
+Builtins::Name RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id) {
+#define RUNTIME_STUB_NAME(Name) Builtins::k##Name,
+#define RUNTIME_STUB_NAME_TRAP(Name) Builtins::kThrowWasm##Name,
+ constexpr Builtins::Name builtin_names[] = {
+ WASM_RUNTIME_STUB_LIST(RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP)};
+#undef RUNTIME_STUB_NAME
+#undef RUNTIME_STUB_NAME_TRAP
+ STATIC_ASSERT(arraysize(builtin_names) == WasmCode::kRuntimeStubCount);
+
+ DCHECK_GT(arraysize(builtin_names), stub_id);
+ return builtin_names[stub_id];
+}
+
const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
#define RUNTIME_STUB_NAME(Name) #Name,
#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 0924fd17b5a..2996a6e2c67 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
@@ -16,7 +20,7 @@
#include "src/base/bit-field.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
-#include "src/builtins/builtins-definitions.h"
+#include "src/builtins/builtins.h"
#include "src/handles/handles.h"
#include "src/tasks/operations-barrier.h"
#include "src/trap-handler/trap-handler.h"
@@ -63,10 +67,13 @@ struct WasmModule;
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
+ V(WasmGetOwnProperty) \
V(WasmRefFunc) \
V(WasmMemoryGrow) \
V(WasmTableInit) \
V(WasmTableCopy) \
+ V(WasmTableFill) \
+ V(WasmTableGrow) \
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmStackGuard) \
@@ -509,6 +516,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
std::vector<WasmCode*> PublishCode(Vector<std::unique_ptr<WasmCode>>);
+ // ReinstallDebugCode does a subset of PublishCode: It installs the code in
+ // the code table and patches the jump table. The given code must be debug
+ // code (with breakpoints) and must be owned by this {NativeModule} already.
+ // This method is used to re-instantiate code that was removed from the code
+ // table and jump table via another {PublishCode}.
+ void ReinstallDebugCode(WasmCode*);
+
Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
@@ -735,6 +749,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Transfer owned code from {new_owned_code_} to {owned_code_}.
void TransferNewOwnedCodeLocked() const;
+ // Add code to the code cache, if it meets criteria for being cached and we do
+ // not have code in the cache yet.
+ void InsertToCodeCache(WasmCode* code);
+
// -- Fields of {NativeModule} start here.
WasmEngine* const engine_;
@@ -820,6 +838,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
TieringState tiering_state_ = kTieredUp;
+ // Cache both baseline and top-tier code if we are debugging, to speed up
+ // repeated enabling/disabling of the debugger or profiler.
+ // Maps <tier, function_index> to WasmCode.
+ std::unique_ptr<std::map<std::pair<ExecutionTier, int>, WasmCode*>>
+ cached_code_;
+
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -972,6 +996,7 @@ class GlobalWasmCodeRef {
const std::shared_ptr<NativeModule> native_module_;
};
+Builtins::Name RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 79356a203d6..a02ea78e850 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_CONSTANTS_H_
#define V8_WASM_WASM_CONSTANTS_H_
@@ -109,7 +113,21 @@ constexpr uint8_t kDefaultCompilationHint = 0x0;
constexpr uint8_t kNoCompilationHint = kMaxUInt8;
// Binary encoding of name section kinds.
-enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
+enum NameSectionKindCode : uint8_t {
+ kModule = 0,
+ kFunction = 1,
+ kLocal = 2,
+ // https://github.com/WebAssembly/extended-name-section/
+ kLabel = 3,
+ kType = 4,
+ kTable = 5,
+ kMemory = 6,
+ kGlobal = 7,
+ kElementSegment = 8,
+ kDataSegment = 9,
+ // https://github.com/WebAssembly/gc/issues/193
+ kField = 10
+};
constexpr size_t kWasmPageSize = 0x10000;
constexpr uint32_t kWasmPageSizeLog2 = 16;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 6b67f6029d2..22872f5d884 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -20,6 +20,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -95,7 +96,7 @@ void DebugSideTable::Entry::Print(std::ostream& os) const {
os << std::setw(6) << std::hex << pc_offset_ << std::dec << " stack height "
<< stack_height_ << " [";
for (auto& value : changed_values_) {
- os << " " << name(value.kind) << ":";
+ os << " " << value.type.name() << ":";
switch (value.storage) {
case kConstant:
os << "const#" << value.i32_const;
@@ -126,10 +127,10 @@ class DebugInfoImpl {
}
WasmValue GetLocalValue(int local, Address pc, Address fp,
- Address debug_break_fp) {
+ Address debug_break_fp, Isolate* isolate) {
FrameInspectionScope scope(this, pc);
return GetValue(scope.debug_side_table, scope.debug_side_table_entry, local,
- fp, debug_break_fp);
+ fp, debug_break_fp, isolate);
}
int GetStackDepth(Address pc) {
@@ -141,13 +142,13 @@ class DebugInfoImpl {
}
WasmValue GetStackValue(int index, Address pc, Address fp,
- Address debug_break_fp) {
+ Address debug_break_fp, Isolate* isolate) {
FrameInspectionScope scope(this, pc);
int num_locals = scope.debug_side_table->num_locals();
int value_count = scope.debug_side_table_entry->stack_height();
if (num_locals + index >= value_count) return {};
return GetValue(scope.debug_side_table, scope.debug_side_table_entry,
- num_locals + index, fp, debug_break_fp);
+ num_locals + index, fp, debug_break_fp, isolate);
}
const WasmFunction& GetFunctionAtAddress(Address pc) {
@@ -189,25 +190,40 @@ class DebugInfoImpl {
return {};
}
+ WireBytesRef GetTypeName(int type_index) {
+ base::MutexGuard guard(&mutex_);
+ if (!type_names_) {
+ type_names_ = std::make_unique<NameMap>(DecodeNameMap(
+ native_module_->wire_bytes(), NameSectionKindCode::kType));
+ }
+ return type_names_->GetName(type_index);
+ }
+
WireBytesRef GetLocalName(int func_index, int local_index) {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
- local_names_ = std::make_unique<LocalNames>(
- DecodeLocalNames(native_module_->wire_bytes()));
+ local_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
+ native_module_->wire_bytes(), NameSectionKindCode::kLocal));
}
return local_names_->GetName(func_index, local_index);
}
- // If the top frame is a Wasm frame and its position is not in the list of
- // breakpoints, return that position. Return 0 otherwise.
+ WireBytesRef GetFieldName(int struct_index, int field_index) {
+ base::MutexGuard guard(&mutex_);
+ if (!field_names_) {
+ field_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
+ native_module_->wire_bytes(), NameSectionKindCode::kField));
+ }
+ return field_names_->GetName(struct_index, field_index);
+ }
+
+ // If the frame position is not in the list of breakpoints, return that
+ // position. Return 0 otherwise.
// This is used to generate a "dead breakpoint" in Liftoff, which is necessary
// for OSR to find the correct return address.
- int DeadBreakpoint(int func_index, std::vector<int>& breakpoints,
- Isolate* isolate) {
- StackTraceFrameIterator it(isolate);
- if (it.done() || !it.is_wasm()) return 0;
- WasmFrame* frame = WasmFrame::cast(it.frame());
- const auto& function = native_module_->module()->functions[func_index];
+ int DeadBreakpoint(WasmFrame* frame, Vector<const int> breakpoints) {
+ const auto& function =
+ native_module_->module()->functions[frame->function_index()];
int offset = frame->position() - function.code.offset();
if (std::binary_search(breakpoints.begin(), breakpoints.end(), offset)) {
return 0;
@@ -215,10 +231,43 @@ class DebugInfoImpl {
return offset;
}
+ // Find the dead breakpoint (see above) for the top wasm frame, if that frame
+ // is in the function of the given index.
+ int DeadBreakpoint(int func_index, Vector<const int> breakpoints,
+ Isolate* isolate) {
+ StackTraceFrameIterator it(isolate);
+ if (it.done() || !it.is_wasm()) return 0;
+ auto* wasm_frame = WasmFrame::cast(it.frame());
+ if (static_cast<int>(wasm_frame->function_index()) != func_index) return 0;
+ return DeadBreakpoint(wasm_frame, breakpoints);
+ }
+
WasmCode* RecompileLiftoffWithBreakpoints(int func_index,
Vector<const int> offsets,
int dead_breakpoint) {
DCHECK(!mutex_.TryLock()); // Mutex is held externally.
+
+ ForDebugging for_debugging = offsets.size() == 1 && offsets[0] == 0
+ ? kForStepping
+ : kWithBreakpoints;
+
+ // Check the cache first.
+ for (auto begin = cached_debugging_code_.begin(), it = begin,
+ end = cached_debugging_code_.end();
+ it != end; ++it) {
+ if (it->func_index == func_index &&
+ it->breakpoint_offsets.as_vector() == offsets &&
+ it->dead_breakpoint == dead_breakpoint) {
+ // Rotate the cache entry to the front (for LRU).
+ for (; it != begin; --it) std::iter_swap(it, it - 1);
+ if (for_debugging == kWithBreakpoints) {
+ // Re-install the code, in case it was replaced in the meantime.
+ native_module_->ReinstallDebugCode(it->code);
+ }
+ return it->code;
+ }
+ }
+
// Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
@@ -229,9 +278,6 @@ class DebugInfoImpl {
wire_bytes.begin() + function->code.end_offset()};
std::unique_ptr<DebugSideTable> debug_sidetable;
- ForDebugging for_debugging = offsets.size() == 1 && offsets[0] == 0
- ? kForStepping
- : kWithBreakpoints;
// Debug side tables for stepping are generated lazily.
bool generate_debug_sidetable = for_debugging == kWithBreakpoints;
Counters* counters = nullptr;
@@ -255,6 +301,23 @@ class DebugInfoImpl {
debug_side_tables_.emplace(new_code, std::move(debug_sidetable));
}
+ // Insert new code into the cache. Insert before existing elements for LRU.
+ cached_debugging_code_.insert(
+ cached_debugging_code_.begin(),
+ CachedDebuggingCode{func_index, OwnedVector<int>::Of(offsets),
+ dead_breakpoint, new_code});
+ // Increase the ref count (for the cache entry).
+ new_code->IncRef();
+ // Remove exceeding element.
+ if (cached_debugging_code_.size() > kMaxCachedDebuggingCode) {
+ // Put the code in the surrounding CodeRefScope to delay deletion until
+ // after the mutex is released.
+ WasmCodeRefScope::AddRef(cached_debugging_code_.back().code);
+ cached_debugging_code_.back().code->DecRefOnLiveCode();
+ cached_debugging_code_.pop_back();
+ }
+ DCHECK_GE(kMaxCachedDebuggingCode, cached_debugging_code_.size());
+
return new_code;
}
@@ -300,7 +363,7 @@ class DebugInfoImpl {
} else {
all_breakpoints.insert(insertion_point, offset);
int dead_breakpoint =
- DeadBreakpoint(func_index, all_breakpoints, isolate);
+ DeadBreakpoint(func_index, VectorOf(all_breakpoints), isolate);
new_code = RecompileLiftoffWithBreakpoints(
func_index, VectorOf(all_breakpoints), dead_breakpoint);
}
@@ -356,6 +419,19 @@ class DebugInfoImpl {
FloodWithBreakpoints(frame, kAfterWasmCall);
}
+ void ClearStepping(WasmFrame* frame) {
+ WasmCodeRefScope wasm_code_ref_scope;
+ base::MutexGuard guard(&mutex_);
+ auto* code = frame->wasm_code();
+ if (code->for_debugging() != kForStepping) return;
+ int func_index = code->index();
+ std::vector<int> breakpoints = FindAllBreakpoints(func_index);
+ int dead_breakpoint = DeadBreakpoint(frame, VectorOf(breakpoints));
+ WasmCode* new_code = RecompileLiftoffWithBreakpoints(
+ func_index, VectorOf(breakpoints), dead_breakpoint);
+ UpdateReturnAddress(frame, new_code, kAfterBreakpoint);
+ }
+
void ClearStepping(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
auto it = per_isolate_data_.find(isolate);
@@ -397,7 +473,8 @@ class DebugInfoImpl {
// If the breakpoint is still set in another isolate, don't remove it.
DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
if (std::binary_search(remaining.begin(), remaining.end(), offset)) return;
- int dead_breakpoint = DeadBreakpoint(func_index, remaining, isolate);
+ int dead_breakpoint =
+ DeadBreakpoint(func_index, VectorOf(remaining), isolate);
UpdateBreakpoints(func_index, VectorOf(remaining), isolate,
isolate_data.stepping_frame, dead_breakpoint);
}
@@ -506,13 +583,13 @@ class DebugInfoImpl {
WasmValue GetValue(const DebugSideTable* debug_side_table,
const DebugSideTable::Entry* debug_side_table_entry,
int index, Address stack_frame_base,
- Address debug_break_fp) const {
+ Address debug_break_fp, Isolate* isolate) const {
const auto* value =
debug_side_table->FindValue(debug_side_table_entry, index);
if (value->is_constant()) {
- DCHECK(value->kind == kI32 || value->kind == kI64);
- return value->kind == kI32 ? WasmValue(value->i32_const)
- : WasmValue(int64_t{value->i32_const});
+ DCHECK(value->type == kWasmI32 || value->type == kWasmI64);
+ return value->type == kWasmI32 ? WasmValue(value->i32_const)
+ : WasmValue(int64_t{value->i32_const});
}
if (value->is_register()) {
@@ -523,16 +600,24 @@ class DebugInfoImpl {
reg.code());
};
if (reg.is_gp_pair()) {
- DCHECK_EQ(kI64, value->kind);
+ DCHECK_EQ(kWasmI64, value->type);
uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp()));
uint32_t high_word =
ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp()));
return WasmValue((uint64_t{high_word} << 32) | low_word);
}
if (reg.is_gp()) {
- return value->kind == kI32
- ? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())))
- : WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
+ if (value->type == kWasmI32) {
+ return WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())));
+ } else if (value->type == kWasmI64) {
+ return WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
+ } else if (value->type.is_reference()) {
+ Handle<Object> obj(
+ Object(ReadUnalignedValue<Address>(gp_addr(reg.gp()))), isolate);
+ return WasmValue(obj, value->type);
+ } else {
+ UNREACHABLE();
+ }
}
DCHECK(reg.is_fp() || reg.is_fp_pair());
// ifdef here to workaround unreachable code for is_fp_pair.
@@ -544,11 +629,11 @@ class DebugInfoImpl {
Address spilled_addr =
debug_break_fp +
WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(code);
- if (value->kind == kF32) {
+ if (value->type == kWasmF32) {
return WasmValue(ReadUnalignedValue<float>(spilled_addr));
- } else if (value->kind == kF64) {
+ } else if (value->type == kWasmF64) {
return WasmValue(ReadUnalignedValue<double>(spilled_addr));
- } else if (value->kind == kS128) {
+ } else if (value->type == kWasmS128) {
return WasmValue(Simd128(ReadUnalignedValue<int16>(spilled_addr)));
} else {
// All other cases should have been handled above.
@@ -558,7 +643,7 @@ class DebugInfoImpl {
// Otherwise load the value from the stack.
Address stack_address = stack_frame_base - value->stack_offset;
- switch (value->kind) {
+ switch (value->type.kind()) {
case kI32:
return WasmValue(ReadUnalignedValue<int32_t>(stack_address));
case kI64:
@@ -567,11 +652,21 @@ class DebugInfoImpl {
return WasmValue(ReadUnalignedValue<float>(stack_address));
case kF64:
return WasmValue(ReadUnalignedValue<double>(stack_address));
- case kS128: {
+ case kS128:
return WasmValue(Simd128(ReadUnalignedValue<int16>(stack_address)));
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth: {
+ Handle<Object> obj(Object(ReadUnalignedValue<Address>(stack_address)),
+ isolate);
+ return WasmValue(obj, value->type);
}
- default:
- UNIMPLEMENTED();
+ case kI8:
+ case kI16:
+ case kVoid:
+ case kBottom:
+ UNREACHABLE();
}
}
@@ -649,6 +744,19 @@ class DebugInfoImpl {
// {mutex_} protects all fields below.
mutable base::Mutex mutex_;
+ // Cache a fixed number of WasmCode objects that were generated for debugging.
+ // This is useful especially in stepping, because stepping code is cleared on
+ // every pause and re-installed on the next step.
+ // This is a LRU cache (most recently used entries first).
+ static constexpr size_t kMaxCachedDebuggingCode = 3;
+ struct CachedDebuggingCode {
+ int func_index;
+ OwnedVector<const int> breakpoint_offsets;
+ int dead_breakpoint;
+ WasmCode* code;
+ };
+ std::vector<CachedDebuggingCode> cached_debugging_code_;
+
// Names of exports, lazily derived from the exports table.
std::unique_ptr<std::map<ImportExportKey, wasm::WireBytesRef>> export_names_;
@@ -657,8 +765,12 @@ class DebugInfoImpl {
std::pair<wasm::WireBytesRef, wasm::WireBytesRef>>>
import_names_;
+ // Names of types, lazily decoded from the wire bytes.
+ std::unique_ptr<NameMap> type_names_;
// Names of locals, lazily decoded from the wire bytes.
- std::unique_ptr<LocalNames> local_names_;
+ std::unique_ptr<IndirectNameMap> local_names_;
+ // Names of struct fields, lazily decoded from the wire bytes.
+ std::unique_ptr<IndirectNameMap> field_names_;
// Isolate-specific data.
std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
@@ -672,15 +784,15 @@ DebugInfo::~DebugInfo() = default;
int DebugInfo::GetNumLocals(Address pc) { return impl_->GetNumLocals(pc); }
WasmValue DebugInfo::GetLocalValue(int local, Address pc, Address fp,
- Address debug_break_fp) {
- return impl_->GetLocalValue(local, pc, fp, debug_break_fp);
+ Address debug_break_fp, Isolate* isolate) {
+ return impl_->GetLocalValue(local, pc, fp, debug_break_fp, isolate);
}
int DebugInfo::GetStackDepth(Address pc) { return impl_->GetStackDepth(pc); }
WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
- Address debug_break_fp) {
- return impl_->GetStackValue(index, pc, fp, debug_break_fp);
+ Address debug_break_fp, Isolate* isolate) {
+ return impl_->GetStackValue(index, pc, fp, debug_break_fp, isolate);
}
const wasm::WasmFunction& DebugInfo::GetFunctionAtAddress(Address pc) {
@@ -697,10 +809,18 @@ std::pair<WireBytesRef, WireBytesRef> DebugInfo::GetImportName(
return impl_->GetImportName(code, index);
}
+WireBytesRef DebugInfo::GetTypeName(int type_index) {
+ return impl_->GetTypeName(type_index);
+}
+
WireBytesRef DebugInfo::GetLocalName(int func_index, int local_index) {
return impl_->GetLocalName(func_index, local_index);
}
+WireBytesRef DebugInfo::GetFieldName(int struct_index, int field_index) {
+ return impl_->GetFieldName(struct_index, field_index);
+}
+
void DebugInfo::SetBreakpoint(int func_index, int offset,
Isolate* current_isolate) {
impl_->SetBreakpoint(func_index, offset, current_isolate);
@@ -718,6 +838,8 @@ void DebugInfo::ClearStepping(Isolate* isolate) {
impl_->ClearStepping(isolate);
}
+void DebugInfo::ClearStepping(WasmFrame* frame) { impl_->ClearStepping(frame); }
+
bool DebugInfo::IsStepping(WasmFrame* frame) {
return impl_->IsStepping(frame);
}
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 837692644c1..1babd0650fe 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -2,6 +2,10 @@
// this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_DEBUG_H_
#define V8_WASM_WASM_DEBUG_H_
@@ -26,7 +30,7 @@ class WasmFrame;
namespace wasm {
class DebugInfoImpl;
-class LocalNames;
+class IndirectNameMap;
class NativeModule;
class WasmCode;
class WireBytesRef;
@@ -43,7 +47,7 @@ class DebugSideTable {
enum Storage : int8_t { kConstant, kRegister, kStack };
struct Value {
int index;
- ValueKind kind;
+ ValueType type;
Storage storage;
union {
int32_t i32_const; // if kind == kConstant
@@ -53,7 +57,7 @@ class DebugSideTable {
bool operator==(const Value& other) const {
if (index != other.index) return false;
- if (kind != other.kind) return false;
+ if (type != other.type) return false;
if (storage != other.storage) return false;
switch (storage) {
case kConstant:
@@ -171,13 +175,13 @@ class V8_EXPORT_PRIVATE DebugInfo {
// the {WasmDebugBreak} frame (if any).
int GetNumLocals(Address pc);
WasmValue GetLocalValue(int local, Address pc, Address fp,
- Address debug_break_fp);
+ Address debug_break_fp, Isolate* isolate);
int GetStackDepth(Address pc);
const wasm::WasmFunction& GetFunctionAtAddress(Address pc);
WasmValue GetStackValue(int index, Address pc, Address fp,
- Address debug_break_fp);
+ Address debug_break_fp, Isolate* isolate);
// Returns the name of the entity (with the given |index| and |kind|) derived
// from the exports table. If the entity is not exported, an empty reference
@@ -190,7 +194,9 @@ class V8_EXPORT_PRIVATE DebugInfo {
std::pair<WireBytesRef, WireBytesRef> GetImportName(ImportExportKindCode kind,
uint32_t index);
+ WireBytesRef GetTypeName(int type_index);
WireBytesRef GetLocalName(int func_index, int local_index);
+ WireBytesRef GetFieldName(int struct_index, int field_index);
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate);
@@ -202,6 +208,11 @@ class V8_EXPORT_PRIVATE DebugInfo {
void ClearStepping(Isolate*);
+ // Remove stepping code from a single frame; this is a performance
+ // optimization only, hitting debug breaks while not stepping and not at a set
+ // breakpoint would be unobservable otherwise.
+ void ClearStepping(WasmFrame*);
+
bool IsStepping(WasmFrame*);
void RemoveBreakpoint(int func_index, int offset, Isolate* current_isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 339e6c97758..ed19f89a5e8 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -130,11 +130,10 @@ class WasmGCForegroundTask : public CancelableTask {
class WeakScriptHandle {
public:
explicit WeakScriptHandle(Handle<Script> script) : script_id_(script->id()) {
- DCHECK(script->source_url().IsString() ||
- script->source_url().IsUndefined());
- if (script->source_url().IsString()) {
+ DCHECK(script->name().IsString() || script->name().IsUndefined());
+ if (script->name().IsString()) {
std::unique_ptr<char[]> source_url =
- String::cast(script->source_url()).ToCString();
+ String::cast(script->name()).ToCString();
// Convert from {unique_ptr} to {shared_ptr}.
source_url_ = {source_url.release(), source_url.get_deleter()};
}
@@ -802,7 +801,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
.ToHandleChecked();
}
}
- script->set_source_url(*url_str);
+ script->set_name(*url_str);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
if (debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index d04578e5574..5a26bd44577 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_ENGINE_H_
#define V8_WASM_WASM_ENGINE_H_
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 3bb3b0cb890..e8363d59367 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_EXTERNAL_REFS_H_
#define V8_WASM_WASM_EXTERNAL_REFS_H_
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 37e985ddff3..9c790d7c67e 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_FEATURE_FLAGS_H_
#define V8_WASM_WASM_FEATURE_FLAGS_H_
@@ -28,7 +32,12 @@
/* Memory64 proposal. */ \
/* https://github.com/WebAssembly/memory64 */ \
/* V8 side owner: clemensb */ \
- V(memory64, "memory64", false)
+ V(memory64, "memory64", false) \
+ \
+ /* Relaxed SIMD proposal. */ \
+ /* https://github.com/WebAssembly/relaxed-simd */ \
+ /* V8 side owner: zhin */ \
+ V(relaxed_simd, "relaxed simd", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -57,20 +66,6 @@
/* Staged in v8.7 * */ \
V(return_call, "return call opcodes", false) \
\
- /* Fixed-width SIMD operations. */ \
- /* https://github.com/webassembly/simd */ \
- /* V8 side owner: gdeepti, zhin */ \
- /* Staged in v8.7 * */ \
- V(simd, "SIMD opcodes", false) \
- \
- /* Threads proposal. */ \
- /* https://github.com/webassembly/threads */ \
- /* NOTE: This is enabled via chromium flag on desktop systems since v7.4 */ \
- /* (see https://crrev.com/c/1487808). ITS: https://groups.google.com/a/ */ \
- /* chromium.org/d/msg/blink-dev/tD6np-OG2PU/rcNGROOMFQAJ */ \
- /* V8 side owner: gdeepti */ \
- V(threads, "thread opcodes", false) \
- \
/* Type reflection proposal. */ \
/* https://github.com/webassembly/js-types */ \
/* V8 side owner: ahaas */ \
@@ -86,8 +81,26 @@
/* V8 side owner: thibaudm */ \
/* Shipped in v8.6. */ \
/* ITS: https://groups.google.com/g/v8-users/c/pv2E4yFWeF0 */ \
- V(mv, "multi-value support", true)
-
+ V(mv, "multi-value support", true) \
+ \
+ /* Fixed-width SIMD operations. */ \
+ /* https://github.com/webassembly/simd */ \
+ /* V8 side owner: gdeepti, zhin */ \
+ /* Staged in v8.7 * */ \
+ /* Shipped in v9.1 * */ \
+ V(simd, "SIMD opcodes", true) \
+ \
+ /* Threads proposal. */ \
+ /* https://github.com/webassembly/threads */ \
+ /* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
+ /* and on android from 9.1. Threads are only available when */ \
+ /* SharedArrayBuffers are enabled as well, and are gated by COOP/COEP */ \
+ /* headers, more fine grained control is in the chromium codebase */ \
+ /* ITS: https://groups.google.com/a/chromium.org/d/msg/blink-dev/ */ \
+ /* tD6np-OG2PU/rcNGROOMFQAJ */ \
+ /* V8 side owner: gdeepti */ \
+ V(threads, "thread opcodes", true) \
+ \
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index c236df670c1..ebc04766fcb 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -23,11 +23,17 @@ WasmFeatures WasmFeatures::FromFlags() {
// static
WasmFeatures WasmFeatures::FromIsolate(Isolate* isolate) {
+ return FromContext(isolate, handle(isolate->context(), isolate));
+}
+
+// static
+WasmFeatures WasmFeatures::FromContext(Isolate* isolate,
+ Handle<Context> context) {
WasmFeatures features = WasmFeatures::FromFlags();
- if (isolate->IsWasmSimdEnabled(handle(isolate->context(), isolate))) {
+ if (isolate->IsWasmSimdEnabled(context)) {
features.Add(kFeature_simd);
}
- if (isolate->AreWasmExceptionsEnabled(handle(isolate->context(), isolate))) {
+ if (isolate->AreWasmExceptionsEnabled(context)) {
features.Add(kFeature_eh);
}
return features;
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
index 92dbc4a543c..8a72c593edb 100644
--- a/deps/v8/src/wasm/wasm-features.h
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_FEATURES_H_
#define V8_WASM_WASM_FEATURES_H_
@@ -16,6 +20,9 @@
namespace v8 {
namespace internal {
+class Context;
+template <typename T>
+class Handle;
class Isolate;
namespace wasm {
@@ -53,6 +60,8 @@ class WasmFeatures : public base::EnumSet<WasmFeature> {
static inline constexpr WasmFeatures ForAsmjs();
static WasmFeatures FromFlags();
static V8_EXPORT_PRIVATE WasmFeatures FromIsolate(Isolate*);
+ static V8_EXPORT_PRIVATE WasmFeatures FromContext(Isolate*,
+ Handle<Context> context);
};
// static
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 9d159006421..abf0cf7d687 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
#define V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 163b89bc73e..bc9c5557eb2 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1199,7 +1199,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Determines the type encoded in a value type property (e.g. type reflection).
// Returns false if there was an exception, true upon success. On success the
-// outgoing {type} is set accordingly, or set to {wasm::kWasmStmt} in case the
+// outgoing {type} is set accordingly, or set to {wasm::kWasmVoid} in case the
// type could not be properly recognized.
bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
Local<Context> context, i::wasm::ValueType* type,
@@ -1228,7 +1228,7 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmEqRef;
} else {
// Unrecognized type.
- *type = i::wasm::kWasmStmt;
+ *type = i::wasm::kWasmVoid;
}
return true;
}
@@ -1273,7 +1273,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::MaybeLocal<v8::Value> maybe =
descriptor->Get(context, v8_str(isolate, "value"));
if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
- if (type == i::wasm::kWasmStmt) {
+ if (type == i::wasm::kWasmVoid) {
thrower.TypeError(
"Descriptor property 'value' must be a WebAssembly type");
return;
@@ -1386,7 +1386,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
UNIMPLEMENTED();
case i::wasm::kI8:
case i::wasm::kI16:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
case i::wasm::kS128:
case i::wasm::kBottom:
UNREACHABLE();
@@ -1489,7 +1489,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = parameters->Get(context, i);
if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
- if (type == i::wasm::kWasmStmt) {
+ if (type == i::wasm::kWasmVoid) {
thrower.TypeError(
"Argument 0 parameter type at index #%u must be a value type", i);
return;
@@ -1500,7 +1500,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = results->Get(context, i);
if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
- if (type == i::wasm::kWasmStmt) {
+ if (type == i::wasm::kWasmVoid) {
thrower.TypeError(
"Argument 0 result type at index #%u must be a value type", i);
return;
@@ -1863,7 +1863,7 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kBottom:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
UNREACHABLE();
}
}
@@ -1960,7 +1960,7 @@ void WebAssemblyGlobalSetValue(
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kBottom:
- case i::wasm::kStmt:
+ case i::wasm::kVoid:
UNREACHABLE();
}
}
@@ -2145,7 +2145,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::cast(module_constructor->instance_prototype()), isolate);
Handle<Map> module_map = isolate->factory()->NewMap(
i::WASM_MODULE_OBJECT_TYPE, WasmModuleObject::kHeaderSize);
- JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
+ JSFunction::SetInitialMap(isolate, module_constructor, module_map,
+ module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
1);
InstallFunc(isolate, module_constructor, "exports", WebAssemblyModuleExports,
@@ -2165,7 +2166,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::cast(instance_constructor->instance_prototype()), isolate);
Handle<Map> instance_map = isolate->factory()->NewMap(
i::WASM_INSTANCE_OBJECT_TYPE, WasmInstanceObject::kHeaderSize);
- JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
+ JSFunction::SetInitialMap(isolate, instance_constructor, instance_map,
+ instance_proto);
InstallGetter(isolate, instance_proto, "exports",
WebAssemblyInstanceGetExports);
JSObject::AddProperty(isolate, instance_proto,
@@ -2187,7 +2189,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::cast(table_constructor->instance_prototype()), isolate);
Handle<Map> table_map = isolate->factory()->NewMap(
i::WASM_TABLE_OBJECT_TYPE, WasmTableObject::kHeaderSize);
- JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
+ JSFunction::SetInitialMap(isolate, table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
@@ -2208,7 +2210,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::cast(memory_constructor->instance_prototype()), isolate);
Handle<Map> memory_map = isolate->factory()->NewMap(
i::WASM_MEMORY_OBJECT_TYPE, WasmMemoryObject::kHeaderSize);
- JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
+ JSFunction::SetInitialMap(isolate, memory_constructor, memory_map,
+ memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
if (enabled_features.has_type_reflection()) {
@@ -2227,7 +2230,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::cast(global_constructor->instance_prototype()), isolate);
Handle<Map> global_map = isolate->factory()->NewMap(
i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kHeaderSize);
- JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
+ JSFunction::SetInitialMap(isolate, global_constructor, global_map,
+ global_proto);
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
@@ -2238,26 +2242,26 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
// Setup Exception
- Handle<String> exception_name = v8_str(isolate, "Exception");
- Handle<JSFunction> exception_constructor =
- CreateFunc(isolate, exception_name, WebAssemblyException, true,
- SideEffectType::kHasSideEffect);
- exception_constructor->shared().set_length(1);
if (enabled_features.has_eh()) {
+ Handle<String> exception_name = v8_str(isolate, "Exception");
+ Handle<JSFunction> exception_constructor =
+ CreateFunc(isolate, exception_name, WebAssemblyException, true,
+ SideEffectType::kHasSideEffect);
+ exception_constructor->shared().set_length(1);
JSObject::AddProperty(isolate, webassembly, exception_name,
exception_constructor, DONT_ENUM);
+ // Install the constructor on the context unconditionally so that it is also
+ // available when the feature is enabled via the origin trial.
+ context->set_wasm_exception_constructor(*exception_constructor);
+ SetDummyInstanceTemplate(isolate, exception_constructor);
+ JSFunction::EnsureHasInitialMap(exception_constructor);
+ Handle<JSObject> exception_proto(
+ JSObject::cast(exception_constructor->instance_prototype()), isolate);
+ Handle<Map> exception_map = isolate->factory()->NewMap(
+ i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
+ JSFunction::SetInitialMap(isolate, exception_constructor, exception_map,
+ exception_proto);
}
- // Install the constructor on the context unconditionally so that it is also
- // available when the feature is enabled via the origin trial.
- context->set_wasm_exception_constructor(*exception_constructor);
- SetDummyInstanceTemplate(isolate, exception_constructor);
- JSFunction::EnsureHasInitialMap(exception_constructor);
- Handle<JSObject> exception_proto(
- JSObject::cast(exception_constructor->instance_prototype()), isolate);
- Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
- JSFunction::SetInitialMap(exception_constructor, exception_map,
- exception_proto);
// Setup Function
if (enabled_features.has_type_reflection()) {
@@ -2274,7 +2278,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
handle(context->function_function().prototype(), isolate), false,
kDontThrow)
.FromJust());
- JSFunction::SetInitialMap(function_constructor, function_map,
+ JSFunction::SetInitialMap(isolate, function_constructor, function_map,
function_proto);
InstallFunc(isolate, function_constructor, "type", WebAssemblyFunctionType,
1);
@@ -2304,6 +2308,40 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
runtime_error, DONT_ENUM);
}
+// static
+void WasmJs::InstallConditionalFeatures(Isolate* isolate,
+ Handle<Context> context) {
+ // Exception handling may have been enabled by an origin trial. If so, make
+ // sure that the {WebAssembly.Exception} constructor is set up.
+ auto enabled_features = i::wasm::WasmFeatures::FromContext(isolate, context);
+ if (enabled_features.has_eh()) {
+ Handle<JSGlobalObject> global = handle(context->global_object(), isolate);
+ MaybeHandle<Object> maybe_webassembly =
+ JSObject::GetProperty(isolate, global, "WebAssembly");
+ Handle<JSObject> webassembly =
+ Handle<JSObject>::cast(maybe_webassembly.ToHandleChecked());
+ // Setup Exception
+ Handle<String> exception_name = v8_str(isolate, "Exception");
+ if (!JSObject::HasProperty(webassembly, exception_name).FromMaybe(true)) {
+ Handle<JSFunction> exception_constructor =
+ CreateFunc(isolate, exception_name, WebAssemblyException, true,
+ SideEffectType::kHasSideEffect);
+ exception_constructor->shared().set_length(1);
+ JSObject::AddProperty(isolate, webassembly, exception_name,
+ exception_constructor, DONT_ENUM);
+ // Install the constructor on the context.
+ context->set_wasm_exception_constructor(*exception_constructor);
+ SetDummyInstanceTemplate(isolate, exception_constructor);
+ JSFunction::EnsureHasInitialMap(exception_constructor);
+ Handle<JSObject> exception_proto(
+ JSObject::cast(exception_constructor->instance_prototype()), isolate);
+ Handle<Map> exception_map = isolate->factory()->NewMap(
+ i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
+ JSFunction::SetInitialMap(isolate, exception_constructor, exception_map,
+ exception_proto);
+ }
+ }
+}
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index e5948d12bb9..ce50b2822a6 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_JS_H_
#define V8_WASM_WASM_JS_H_
@@ -9,7 +13,9 @@
namespace v8 {
namespace internal {
-class WasmFrame;
+class Context;
+template <typename T>
+class Handle;
namespace wasm {
class StreamingDecoder;
@@ -20,6 +26,9 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
+
+ V8_EXPORT_PRIVATE static void InstallConditionalFeatures(
+ Isolate* isolate, Handle<Context> context);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 3e4fc6fe3b7..78a8f0afd43 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_LIMITS_H_
#define V8_WASM_WASM_LIMITS_H_
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index fd27d7108d3..2d980555192 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -2,9 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_LINKAGE_H_
#define V8_WASM_WASM_LINKAGE_H_
+#include "src/codegen/aligned-slot-allocator.h"
#include "src/codegen/assembler-arch.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/signature.h"
@@ -44,7 +49,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {xmm1, xmm2};
// ===========================================================================
constexpr Register kGpParamRegisters[] = {r3, r0, r2, r6};
constexpr Register kGpReturnRegisters[] = {r0, r1};
-// ARM d-registers must be in ascending order for correct allocation.
+// ARM d-registers must be in even/odd D-register pairs for correct allocation.
constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
@@ -145,18 +150,27 @@ class LinkageAllocator {
bool CanAllocateFP(MachineRepresentation rep) const {
#if V8_TARGET_ARCH_ARM
switch (rep) {
- case MachineRepresentation::kFloat32:
- return fp_offset_ < fp_count_ && fp_regs_[fp_offset_].code() < 16;
- case MachineRepresentation::kFloat64:
- return extra_double_reg_ >= 0 || fp_offset_ < fp_count_;
- case MachineRepresentation::kSimd128:
- return ((fp_offset_ + 1) & ~1) + 1 < fp_count_;
+ case MachineRepresentation::kFloat32: {
+ // Get the next D-register (Liftoff only uses the even S-registers).
+ int next = fp_allocator_.NextSlot(2) / 2;
+ // Only the lower 16 D-registers alias S-registers.
+ return next < fp_count_ && fp_regs_[next].code() < 16;
+ }
+ case MachineRepresentation::kFloat64: {
+ int next = fp_allocator_.NextSlot(2) / 2;
+ return next < fp_count_;
+ }
+ case MachineRepresentation::kSimd128: {
+ int next = fp_allocator_.NextSlot(4) / 2;
+ return next < fp_count_ - 1; // 2 D-registers are required.
+ }
default:
UNREACHABLE();
return false;
}
-#endif
+#else
return fp_offset_ < fp_count_;
+#endif
}
int NextGpReg() {
@@ -165,80 +179,58 @@ class LinkageAllocator {
}
int NextFpReg(MachineRepresentation rep) {
+ DCHECK(CanAllocateFP(rep));
#if V8_TARGET_ARCH_ARM
switch (rep) {
case MachineRepresentation::kFloat32: {
- // Liftoff uses only even-numbered f32 registers, and encodes them using
- // the code of the corresponding f64 register. This limits the calling
- // interface to only using the even-numbered f32 registers.
+ // Liftoff uses only even-numbered S-registers, and encodes them using
+ // the code of the corresponding D-register. This limits the calling
+ // interface to only using the even-numbered S-registers.
int d_reg_code = NextFpReg(MachineRepresentation::kFloat64);
- DCHECK_GT(16, d_reg_code); // D-registers 16 - 31 can't split.
+ DCHECK_GT(16, d_reg_code); // D16 - D31 don't alias S-registers.
return d_reg_code * 2;
}
case MachineRepresentation::kFloat64: {
- // Use the extra D-register if there is one.
- if (extra_double_reg_ >= 0) {
- int reg_code = extra_double_reg_;
- extra_double_reg_ = -1;
- return reg_code;
- }
- DCHECK_LT(fp_offset_, fp_count_);
- return fp_regs_[fp_offset_++].code();
+ int next = fp_allocator_.Allocate(2) / 2;
+ return fp_regs_[next].code();
}
case MachineRepresentation::kSimd128: {
- // Q-register must be an even-odd pair, so we must try to allocate at
- // the end, not using extra_double_reg_. If we are at an odd D-register,
- // skip past it (saving it to extra_double_reg_).
- DCHECK_LT(((fp_offset_ + 1) & ~1) + 1, fp_count_);
- int d_reg1_code = fp_regs_[fp_offset_++].code();
- if (d_reg1_code % 2 != 0) {
- // If we're misaligned then extra_double_reg_ must have been consumed.
- DCHECK_EQ(-1, extra_double_reg_);
- int odd_double_reg = d_reg1_code;
- d_reg1_code = fp_regs_[fp_offset_++].code();
- extra_double_reg_ = odd_double_reg;
- }
- // Combine the current D-register with the next to form a Q-register.
- int d_reg2_code = fp_regs_[fp_offset_++].code();
- DCHECK_EQ(0, d_reg1_code % 2);
- DCHECK_EQ(d_reg1_code + 1, d_reg2_code);
- USE(d_reg2_code);
- return d_reg1_code / 2;
+ int next = fp_allocator_.Allocate(4) / 2;
+ int d_reg_code = fp_regs_[next].code();
+ // Check that result and the next D-register pair.
+ DCHECK_EQ(0, d_reg_code % 2);
+ DCHECK_EQ(d_reg_code + 1, fp_regs_[next + 1].code());
+ return d_reg_code / 2;
}
default:
UNREACHABLE();
}
#else
- DCHECK_LT(fp_offset_, fp_count_);
return fp_regs_[fp_offset_++].code();
#endif
}
// Stackslots are counted upwards starting from 0 (or the offset set by
- // {SetStackOffset}.
- int NumStackSlots(MachineRepresentation type) {
- return std::max(1, ElementSizeInBytes(type) / kSystemPointerSize);
- }
-
- // Stackslots are counted upwards starting from 0 (or the offset set by
- // {SetStackOffset}. If {type} needs more than
- // one stack slot, the lowest used stack slot is returned.
+ // {SetStackOffset}. If {type} needs more than one stack slot, the lowest
+ // used stack slot is returned.
int NextStackSlot(MachineRepresentation type) {
- int num_stack_slots = NumStackSlots(type);
- int offset = stack_offset_;
- stack_offset_ += num_stack_slots;
- return offset;
+ int num_slots =
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(type));
+ int slot = slot_allocator_.Allocate(num_slots);
+ return slot;
}
// Set an offset for the stack slots returned by {NextStackSlot} and
// {NumStackSlots}. Can only be called before any call to {NextStackSlot}.
- void SetStackOffset(int num) {
- DCHECK_LE(0, num);
- DCHECK_EQ(0, stack_offset_);
- stack_offset_ = num;
+ void SetStackOffset(int offset) {
+ DCHECK_LE(0, offset);
+ DCHECK_EQ(0, slot_allocator_.Size());
+ slot_allocator_.AllocateUnaligned(offset);
}
- int NumStackSlots() const { return stack_offset_; }
+ int NumStackSlots() const { return slot_allocator_.Size(); }
+
+ void EndSlotArea() { slot_allocator_.AllocateUnaligned(0); }
private:
const int gp_count_;
@@ -246,16 +238,16 @@ class LinkageAllocator {
const Register* const gp_regs_;
const int fp_count_;
- int fp_offset_ = 0;
- const DoubleRegister* const fp_regs_;
-
#if V8_TARGET_ARCH_ARM
- // Track fragments of registers below fp_offset_ here. There can only be one
- // extra double register.
- int extra_double_reg_ = -1;
+ // Use an aligned slot allocator to model ARM FP register aliasing. The slots
+ // are 32 bits, so 2 slots are required for a D-register, 4 for a Q-register.
+ AlignedSlotAllocator fp_allocator_;
+#else
+ int fp_offset_ = 0;
#endif
+ const DoubleRegister* const fp_regs_;
- int stack_offset_ = 0;
+ AlignedSlotAllocator slot_allocator_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index a3bd33c1d6d..67f826f2fd9 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -315,7 +315,7 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
if (tables_.empty()) {
// This cannot use {AddTable} because that would flip the
// {allocating_indirect_functions_allowed_} flag.
- tables_.push_back({kWasmFuncRef, new_size, max, true});
+ tables_.push_back({kWasmFuncRef, new_size, max, true, {}});
} else {
// There can only be the indirect function table so far, otherwise the
// {allocating_indirect_functions_allowed_} flag would have been false.
@@ -347,7 +347,7 @@ uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
#if DEBUG
allocating_indirect_functions_allowed_ = false;
#endif
- tables_.push_back({type, min_size, 0, false});
+ tables_.push_back({type, min_size, 0, false, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
@@ -356,7 +356,16 @@ uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
#if DEBUG
allocating_indirect_functions_allowed_ = false;
#endif
- tables_.push_back({type, min_size, max_size, true});
+ tables_.push_back({type, min_size, max_size, true, {}});
+ return static_cast<uint32_t>(tables_.size() - 1);
+}
+
+uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
+ uint32_t max_size, WasmInitExpr init) {
+#if DEBUG
+ allocating_indirect_functions_allowed_ = false;
+#endif
+ tables_.push_back({type, min_size, max_size, true, std::move(init)});
return static_cast<uint32_t>(tables_.size() - 1);
}
@@ -432,8 +441,8 @@ void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
}
}
-void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
- ValueType type) {
+void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
+ ValueType type) {
switch (init.kind()) {
case WasmInitExpr::kI32Const:
buffer->write_u8(kExprI32Const);
@@ -494,7 +503,7 @@ void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
break;
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kS128:
case kBottom:
case kRef:
@@ -512,7 +521,7 @@ void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
break;
case WasmInitExpr::kRttSub:
// The operand to rtt.sub must be emitted first.
- WriteGlobalInitializer(buffer, *init.operand(), kWasmBottom);
+ WriteInitializerExpression(buffer, *init.operand(), kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprRttSub));
@@ -611,6 +620,9 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_u8(table.has_maximum ? kWithMaximum : kNoMaximum);
buffer->write_size(table.min_size);
if (table.has_maximum) buffer->write_size(table.max_size);
+ if (table.init.kind() != WasmInitExpr::kNone) {
+ WriteInitializerExpression(buffer, table.init, table.type);
+ }
}
FixupSection(buffer, start);
}
@@ -651,7 +663,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
for (const WasmGlobal& global : globals_) {
WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
- WriteGlobalInitializer(buffer, global.init, global.type);
+ WriteInitializerExpression(buffer, global.init, global.type);
buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index f93b981d7c9..f7b5ff1b768 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_MODULE_BUILDER_H_
#define V8_WASM_WASM_MODULE_BUILDER_H_
@@ -261,6 +265,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
void SetMaxTableSize(uint32_t max);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
+ uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
+ WasmInitExpr init);
void MarkStartFunction(WasmFunctionBuilder* builder);
void AddExport(Vector<const char> name, ImportExportKindCode kind,
uint32_t index);
@@ -340,6 +346,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t min_size;
uint32_t max_size;
bool has_maximum;
+ WasmInitExpr init;
};
struct WasmDataSegment {
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.h b/deps/v8/src/wasm/wasm-module-sourcemap.h
index 7d3116f3b3e..fd8c1117fa7 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.h
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_MODULE_SOURCEMAP_H_
#define V8_WASM_WASM_MODULE_SOURCEMAP_H_
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 439be1d2c7e..bbfcf9623b5 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_MODULE_H_
#define V8_WASM_WASM_MODULE_H_
@@ -354,19 +358,20 @@ struct WasmTable {
// TODO(9495): Update this function as more table types are supported, or
// remove it completely when all reference types are allowed.
static bool IsValidTableType(ValueType type, const WasmModule* module) {
- if (!type.is_nullable()) return false;
+ if (!type.is_object_reference()) return false;
HeapType heap_type = type.heap_type();
return heap_type == HeapType::kFunc || heap_type == HeapType::kExtern ||
(module != nullptr && heap_type.is_index() &&
module->has_signature(heap_type.ref_index()));
}
- ValueType type = kWasmStmt; // table type.
+ ValueType type = kWasmVoid; // table type.
uint32_t initial_size = 0; // initial table size.
uint32_t maximum_size = 0; // maximum table size.
bool has_maximum_size = false; // true if there is a maximum size.
bool imported = false; // true if imported.
bool exported = false; // true if exported.
+ WasmInitExpr initial_value;
};
inline bool is_asmjs_module(const WasmModule* module) {
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 2c76a4ec186..3da7e1650a1 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_OBJECTS_INL_H_
#define V8_WASM_WASM_OBJECTS_INL_H_
@@ -55,13 +59,13 @@ CAST_ACCESSOR(WasmTypeInfo)
CAST_ACCESSOR(WasmStruct)
CAST_ACCESSOR(WasmArray)
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- DEF_GETTER(holder, has_##name, bool) { \
- Object value = TaggedField<Object, offset>::load(isolate, *this); \
- return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
- } \
- ACCESSORS_CHECKED2(holder, name, type, offset, \
- !value.IsUndefined(GetReadOnlyRoots(isolate)), true)
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ DEF_GETTER(holder, has_##name, bool) { \
+ Object value = TaggedField<Object, offset>::load(cage_base, *this); \
+ return !value.IsUndefined(GetReadOnlyRoots(cage_base)); \
+ } \
+ ACCESSORS_CHECKED2(holder, name, type, offset, \
+ !value.IsUndefined(GetReadOnlyRoots(cage_base)), true)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
@@ -167,7 +171,7 @@ double WasmGlobalObject::GetF64() {
Handle<Object> WasmGlobalObject::GetRef() {
// We use this getter for externref and funcref.
- DCHECK(type().is_reference_type());
+ DCHECK(type().is_reference());
return handle(tagged_buffer().get(offset()), GetIsolate());
}
@@ -393,8 +397,7 @@ ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
#undef PRIMITIVE_ACCESSORS
wasm::ValueType WasmTableObject::type() {
- // TODO(7748): Support other table types? Wait for spec to clear up.
- return wasm::ValueType::Ref(raw_type(), wasm::kNullable);
+ return wasm::ValueType::FromRawBitField(raw_type());
}
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
@@ -457,6 +460,12 @@ int WasmArray::GcSafeSizeFor(Map map, int length) {
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
+
+ // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
+#endif
+
// Due to the type-specific pointer tags for external pointers, we need to
// allocate an entry in the table here even though it will just store nullptr.
AllocateExternalPointerEntries(isolate);
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index ce74e732077..d4e7cb65a13 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -291,7 +291,7 @@ Handle<WasmTableObject> WasmTableObject::New(
table_obj->set_entries(*backing_store);
table_obj->set_current_length(initial);
table_obj->set_maximum_length(*max);
- table_obj->set_raw_type(static_cast<int>(type.heap_representation()));
+ table_obj->set_raw_type(static_cast<int>(type.raw_bit_field()));
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
if (entries != nullptr) {
@@ -612,7 +612,7 @@ void WasmTableObject::UpdateDispatchTables(
int total_count = serialized_sig.length() - 1;
std::unique_ptr<wasm::ValueType[]> reps(new wasm::ValueType[total_count]);
int result_count;
- static const wasm::ValueType kMarker = wasm::kWasmStmt;
+ static const wasm::ValueType kMarker = wasm::kWasmVoid;
for (int i = 0, j = 0; i <= total_count; i++) {
if (serialized_sig.get(i) == kMarker) {
result_count = i;
@@ -1030,7 +1030,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_is_mutable(is_mutable);
}
- if (type.is_reference_type()) {
+ if (type.is_reference()) {
DCHECK(maybe_untagged_buffer.is_null());
Handle<FixedArray> tagged_buffer;
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
@@ -1566,7 +1566,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// static
uint8_t* WasmInstanceObject::GetGlobalStorage(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
- DCHECK(!global.type.is_reference_type());
+ DCHECK(!global.type.is_reference());
if (global.mutability && global.imported) {
return reinterpret_cast<byte*>(
instance->imported_mutable_globals()[global.index]);
@@ -1579,7 +1579,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
std::pair<Handle<FixedArray>, uint32_t>
WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
const wasm::WasmGlobal& global) {
- DCHECK(global.type.is_reference_type());
+ DCHECK(global.type.is_reference());
Isolate* isolate = instance->GetIsolate();
if (global.mutability && global.imported) {
Handle<FixedArray> buffer(
@@ -1597,12 +1597,13 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
Isolate* isolate = instance->GetIsolate();
- if (global.type.is_reference_type()) {
+ if (global.type.is_reference()) {
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
std::tie(global_buffer, global_index) =
GetGlobalBufferAndIndex(instance, global);
- return wasm::WasmValue(handle(global_buffer->get(global_index), isolate));
+ return wasm::WasmValue(handle(global_buffer->get(global_index), isolate),
+ global.type);
}
Address ptr = reinterpret_cast<Address>(GetGlobalStorage(instance, global));
using wasm::Simd128;
@@ -1617,6 +1618,65 @@ wasm::WasmValue WasmInstanceObject::GetGlobalValue(
}
}
+wasm::WasmValue WasmStruct::GetFieldValue(uint32_t index) {
+ wasm::ValueType field_type = type()->field(index);
+ int field_offset = WasmStruct::kHeaderSize + type()->field_offset(index);
+ Address field_address = GetFieldAddress(field_offset);
+ using wasm::Simd128;
+ switch (field_type.kind()) {
+#define CASE_TYPE(valuetype, ctype) \
+ case wasm::valuetype: \
+ return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(field_address));
+ CASE_TYPE(kI8, int8_t)
+ CASE_TYPE(kI16, int16_t)
+ FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
+ case wasm::kRef:
+ case wasm::kOptRef: {
+ Handle<Object> ref(TaggedField<Object>::load(*this, field_offset),
+ GetIsolateFromWritableObject(*this));
+ return wasm::WasmValue(ref, field_type);
+ }
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ // TODO(7748): Expose RTTs to DevTools.
+ UNIMPLEMENTED();
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmValue WasmArray::GetElement(uint32_t index) {
+ wasm::ValueType element_type = type()->element_type();
+ int element_offset =
+ WasmArray::kHeaderSize + index * element_type.element_size_bytes();
+ Address element_address = GetFieldAddress(element_offset);
+ using wasm::Simd128;
+ switch (element_type.kind()) {
+#define CASE_TYPE(value_type, ctype) \
+ case wasm::value_type: \
+ return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(element_address));
+ CASE_TYPE(kI8, int8_t)
+ CASE_TYPE(kI16, int16_t)
+ FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
+ case wasm::kRef:
+ case wasm::kOptRef: {
+ Handle<Object> ref(TaggedField<Object>::load(*this, element_offset),
+ GetIsolateFromWritableObject(*this));
+ return wasm::WasmValue(ref, element_type);
+ }
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ // TODO(7748): Expose RTTs to DevTools.
+ UNIMPLEMENTED();
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+}
+
// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
@@ -1673,7 +1733,7 @@ bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
return false;
}
}
- if (serialized_sig.get(serialized_index) != wasm::kWasmStmt) return false;
+ if (serialized_sig.get(serialized_index) != wasm::kWasmVoid) return false;
serialized_index++;
for (int i = 0; i < param_count; i++, serialized_index++) {
if (sig->GetParam(i) != serialized_sig.get(serialized_index)) return false;
@@ -1770,7 +1830,7 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
break;
case wasm::kRtt:
case wasm::kRttWithDepth:
- case wasm::kStmt:
+ case wasm::kVoid:
case wasm::kBottom:
case wasm::kI8:
case wasm::kI16:
@@ -2115,7 +2175,7 @@ namespace wasm {
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
const char** error_message) {
- DCHECK(expected.is_reference_type());
+ DCHECK(expected.is_reference());
switch (expected.kind()) {
case kOptRef:
if (value->IsNull(isolate)) return true;
@@ -2135,27 +2195,46 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
case HeapType::kExtern:
case HeapType::kAny:
return true;
- case HeapType::kData: {
+ case HeapType::kData:
+ case HeapType::kEq:
+ case HeapType::kI31: {
// TODO(7748): Change this when we have a decision on the JS API for
// structs/arrays.
Handle<Name> key = isolate->factory()->wasm_wrapped_object_symbol();
LookupIterator it(isolate, value, key,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::DATA) return true;
- *error_message =
- "dataref object must be null (if nullable) or wrapped with the "
- "wasm object wrapper";
- return false;
+ if (it.state() != LookupIterator::DATA) {
+ *error_message =
+ "eqref/dataref/i31ref object must be null (if nullable) or "
+ "wrapped with the wasm object wrapper";
+ return false;
+ }
+
+ if (expected.is_reference_to(HeapType::kEq)) return true;
+ Handle<Object> value = it.GetDataValue();
+
+ if (expected.is_reference_to(HeapType::kData)) {
+ if (value->IsSmi()) {
+ *error_message = "dataref-typed object must be a heap object";
+ return false;
+ }
+ return true;
+ } else {
+ DCHECK(expected.is_reference_to(HeapType::kI31));
+ if (!value->IsSmi()) {
+ *error_message = "i31ref-typed object cannot be a heap object";
+ return false;
+ }
+ return true;
+ }
}
- case HeapType::kEq:
- case HeapType::kI31:
- // TODO(7748): Implement when the JS API for i31ref is decided on.
- *error_message =
- "Assigning JS objects to eqref/i31ref not supported yet.";
- return false;
default:
- // Tables defined outside a module can't refer to user-defined types.
- if (module == nullptr) return false;
+ if (module == nullptr) {
+ *error_message =
+ "an object defined in JavaScript cannot be compatible with a "
+ "type defined in a Webassembly module";
+ return false;
+ }
DCHECK(module->has_type(expected.ref_index()));
if (module->has_signature(expected.ref_index())) {
if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
@@ -2211,14 +2290,26 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
// TODO(7748): Implement when the JS API for structs/arrays is decided
// on.
*error_message =
- "Assigning to struct/array globals not supported yet.";
+ "passing struct/array-typed objects between Webassembly and "
+ "Javascript is not supported yet.";
return false;
}
case kRtt:
+ case kRttWithDepth:
// TODO(7748): Implement when the JS API for rtts is decided on.
- *error_message = "Assigning to rtt globals not supported yet.";
+ *error_message =
+ "passing rtts between Webassembly and Javascript is not supported "
+ "yet.";
return false;
- default:
+ case kI8:
+ case kI16:
+ case kI32:
+ case kI64:
+ case kF32:
+ case kF64:
+ case kS128:
+ case kVoid:
+ case kBottom:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 21156adab90..47bef60ac71 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -2,6 +2,10 @@
// this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_OBJECTS_H_
#define V8_WASM_WASM_OBJECTS_H_
@@ -914,6 +918,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
inline ObjectSlot RawField(int raw_offset);
+ wasm::WasmValue GetFieldValue(uint32_t field_index);
+
DECL_CAST(WasmStruct)
DECL_PRINTER(WasmStruct)
@@ -928,6 +934,8 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, HeapObject> {
inline wasm::ArrayType* type() const;
static inline wasm::ArrayType* GcSafeType(Map map);
+ wasm::WasmValue GetElement(uint32_t index);
+
static inline int SizeFor(Map map, int length);
static inline int GcSafeSizeFor(Map map, int length);
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index 0d2d7748957..6b124b2dbc3 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_OPCODES_INL_H_
#define V8_WASM_WASM_OPCODES_INL_H_
@@ -54,9 +58,6 @@ namespace wasm {
CASE_I8x16_OP(name, str)
#define CASE_SIMDI_NO64X2_OP(name, str) \
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
-#define CASE_SIMDV_OP(name, str) \
- CASE_V64x2_OP(name, str) CASE_V32x4_OP(name, str) CASE_V16x8_OP(name, str) \
- CASE_V8x16_OP(name, str)
#define CASE_SIGN_OP(TYPE, name, str) \
CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
@@ -246,14 +247,16 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
- CASE_SIMD_OP(Mul, "mul")
+ CASE_I16x8_OP(Mul, "mul")
+ CASE_I32x4_OP(Mul, "mul")
+ CASE_I64x2_OP(Mul, "mul")
+ CASE_SIMDF_OP(Mul, "mul")
CASE_SIMDF_OP(Div, "div")
CASE_SIMDF_OP(Lt, "lt")
CASE_SIMDF_OP(Le, "le")
CASE_SIMDF_OP(Gt, "gt")
CASE_SIMDF_OP(Ge, "ge")
CASE_SIMDF_OP(Abs, "abs")
- CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
CASE_SIMDF_OP(Min, "min")
@@ -287,8 +290,6 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32x4_high", "convert")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
- CASE_I32x4_OP(AddHoriz, "add_horizontal")
- CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSat, "add_sat")
CASE_SIGN_OP(I8x16, AddSat, "add_sat")
CASE_SIGN_OP(I16x8, SubSat, "sub_sat")
@@ -302,7 +303,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I8x16_OP(Swizzle, "swizzle")
CASE_I8x16_OP(Shuffle, "shuffle")
CASE_V128_OP(AnyTrue, "any_true")
- CASE_SIMDV_OP(AllTrue, "all_true")
+ CASE_SIMDI_OP(AllTrue, "all_true")
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
@@ -352,15 +353,10 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
- CASE_SIMDI_NO64X2_OP(SignSelect, "signselect")
- CASE_I64x2_OP(SignSelect, "signselect")
CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
CASE_SIGN_OP(I16x8, ExtAddPairwiseI8x16, "extadd_pairwise_i8x6")
- CASE_OP(PrefetchT, "prefetch_t")
- CASE_OP(PrefetchNT, "prefetch_nt")
-
CASE_F64x2_OP(ConvertLowI32x4S, "convert_low_i32x4_s")
CASE_F64x2_OP(ConvertLowI32x4U, "convert_low_i32x4_u")
CASE_I32x4_OP(TruncSatF64x2SZero, "trunc_sat_f64x2_s_zero")
@@ -543,11 +539,10 @@ constexpr bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
}
// static
-constexpr bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
+constexpr bool WasmOpcodes::IsRelaxedSimdOpcode(WasmOpcode opcode) {
switch (opcode) {
#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
- FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(CHECK_OPCODE)
+ FOREACH_RELAXED_SIMD_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
return true;
default:
@@ -565,7 +560,7 @@ enum WasmOpcodeSig : byte {
#undef DECLARE_SIG_ENUM
#define DECLARE_SIG(name, ...) \
constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
- constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
+ constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmVoid ? 0 : 1; \
constexpr FunctionSig kSig_##name( \
kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
kTypes_##name + (1 - kReturnsCount_##name));
@@ -593,8 +588,7 @@ constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
- FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
+ FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 5d1c89342e5..94618077519 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -39,12 +39,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
return false;
}
for (auto type : sig->all()) {
- // TODO(7748): Allow structs, arrays, rtts and i31s when their
- // JS-interaction is decided on.
- if (type == kWasmS128 || type.is_reference_to(HeapType::kEq) ||
- type.is_reference_to(HeapType::kI31) ||
- (type.has_index() && !module->has_signature(type.ref_index())) ||
- type.is_rtt()) {
+ // TODO(7748): Allow structs, arrays, and rtts when their JS-interaction is
+ // decided on.
+ if (type == kWasmS128 || type.is_rtt() ||
+ (type.has_index() && !module->has_signature(type.ref_index()))) {
return false;
}
}
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index d033fb08d8b..909cacadd25 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_OPCODES_H_
#define V8_WASM_WASM_OPCODES_H_
@@ -293,18 +297,18 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(S128Load32Splat, 0xfd09, s_i) \
V(S128Load64Splat, 0xfd0a, s_i) \
V(S128StoreMem, 0xfd0b, v_is) \
- V(S128Load32Zero, 0xfdfc, s_i) \
- V(S128Load64Zero, 0xfdfd, s_i)
+ V(S128Load32Zero, 0xfd5c, s_i) \
+ V(S128Load64Zero, 0xfd5d, s_i)
#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
- V(S128Load8Lane, 0xfd58, s_is) \
- V(S128Load16Lane, 0xfd59, s_is) \
- V(S128Load32Lane, 0xfd5a, s_is) \
- V(S128Load64Lane, 0xfd5b, s_is) \
- V(S128Store8Lane, 0xfd5c, v_is) \
- V(S128Store16Lane, 0xfd5d, v_is) \
- V(S128Store32Lane, 0xfd5e, v_is) \
- V(S128Store64Lane, 0xfd5f, v_is)
+ V(S128Load8Lane, 0xfd54, s_is) \
+ V(S128Load16Lane, 0xfd55, s_is) \
+ V(S128Load32Lane, 0xfd56, s_is) \
+ V(S128Load64Lane, 0xfd57, s_is) \
+ V(S128Store8Lane, 0xfd58, v_is) \
+ V(S128Store16Lane, 0xfd59, v_is) \
+ V(S128Store32Lane, 0xfd5a, v_is) \
+ V(S128Store64Lane, 0xfd5b, v_is)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
@@ -348,12 +352,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I32x4LeU, 0xfd3e, s_ss) \
V(I32x4GeS, 0xfd3f, s_ss) \
V(I32x4GeU, 0xfd40, s_ss) \
- V(I64x2Eq, 0xfdc0, s_ss) \
- V(I64x2LtS, 0xfd74, s_ss) \
- V(I64x2GtS, 0xfd7a, s_ss) \
- V(I64x2LeS, 0xfdee, s_ss) \
- V(I64x2GeS, 0xfde2, s_ss) \
- V(I64x2Ne, 0xfdd0, s_ss) \
V(F32x4Eq, 0xfd41, s_ss) \
V(F32x4Ne, 0xfd42, s_ss) \
V(F32x4Lt, 0xfd43, s_ss) \
@@ -372,13 +370,20 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(S128Or, 0xfd50, s_ss) \
V(S128Xor, 0xfd51, s_ss) \
V(S128Select, 0xfd52, s_sss) \
+ V(V128AnyTrue, 0xfd53, i_s) \
+ V(F32x4DemoteF64x2Zero, 0xfd5e, s_s) \
+ V(F64x2PromoteLowF32x4, 0xfd5f, s_s) \
V(I8x16Abs, 0xfd60, s_s) \
V(I8x16Neg, 0xfd61, s_s) \
- V(V128AnyTrue, 0xfd62, i_s) \
- V(V8x16AllTrue, 0xfd63, i_s) \
+ V(I8x16Popcnt, 0xfd62, s_s) \
+ V(I8x16AllTrue, 0xfd63, i_s) \
V(I8x16BitMask, 0xfd64, i_s) \
V(I8x16SConvertI16x8, 0xfd65, s_ss) \
V(I8x16UConvertI16x8, 0xfd66, s_ss) \
+ V(F32x4Ceil, 0xfd67, s_s) \
+ V(F32x4Floor, 0xfd68, s_s) \
+ V(F32x4Trunc, 0xfd69, s_s) \
+ V(F32x4NearestInt, 0xfd6a, s_s) \
V(I8x16Shl, 0xfd6b, s_si) \
V(I8x16ShrS, 0xfd6c, s_si) \
V(I8x16ShrU, 0xfd6d, s_si) \
@@ -388,15 +393,22 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I8x16Sub, 0xfd71, s_ss) \
V(I8x16SubSatS, 0xfd72, s_ss) \
V(I8x16SubSatU, 0xfd73, s_ss) \
+ V(F64x2Ceil, 0xfd74, s_s) \
+ V(F64x2Floor, 0xfd75, s_s) \
V(I8x16MinS, 0xfd76, s_ss) \
V(I8x16MinU, 0xfd77, s_ss) \
V(I8x16MaxS, 0xfd78, s_ss) \
V(I8x16MaxU, 0xfd79, s_ss) \
+ V(F64x2Trunc, 0xfd7a, s_s) \
V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
- V(I8x16Popcnt, 0xfd7c, s_s) \
+ V(I16x8ExtAddPairwiseI8x16S, 0xfd7c, s_s) \
+ V(I16x8ExtAddPairwiseI8x16U, 0xfd7d, s_s) \
+ V(I32x4ExtAddPairwiseI16x8S, 0xfd7e, s_s) \
+ V(I32x4ExtAddPairwiseI16x8U, 0xfd7f, s_s) \
V(I16x8Abs, 0xfd80, s_s) \
V(I16x8Neg, 0xfd81, s_s) \
- V(V16x8AllTrue, 0xfd83, i_s) \
+ V(I16x8Q15MulRSatS, 0xfd82, s_ss) \
+ V(I16x8AllTrue, 0xfd83, i_s) \
V(I16x8BitMask, 0xfd84, i_s) \
V(I16x8SConvertI32x4, 0xfd85, s_ss) \
V(I16x8UConvertI32x4, 0xfd86, s_ss) \
@@ -413,22 +425,20 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I16x8Sub, 0xfd91, s_ss) \
V(I16x8SubSatS, 0xfd92, s_ss) \
V(I16x8SubSatU, 0xfd93, s_ss) \
+ V(F64x2NearestInt, 0xfd94, s_s) \
V(I16x8Mul, 0xfd95, s_ss) \
V(I16x8MinS, 0xfd96, s_ss) \
V(I16x8MinU, 0xfd97, s_ss) \
V(I16x8MaxS, 0xfd98, s_ss) \
V(I16x8MaxU, 0xfd99, s_ss) \
V(I16x8RoundingAverageU, 0xfd9b, s_ss) \
- V(I16x8ExtMulLowI8x16S, 0xfd9a, s_ss) \
+ V(I16x8ExtMulLowI8x16S, 0xfd9c, s_ss) \
V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
- V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
- V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
- V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
V(I32x4Abs, 0xfda0, s_s) \
V(I32x4Neg, 0xfda1, s_s) \
- V(V32x4AllTrue, 0xfda3, i_s) \
+ V(I32x4AllTrue, 0xfda3, i_s) \
V(I32x4BitMask, 0xfda4, i_s) \
V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
V(I32x4SConvertI16x8High, 0xfda8, s_s) \
@@ -445,32 +455,34 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I32x4MaxS, 0xfdb8, s_ss) \
V(I32x4MaxU, 0xfdb9, s_ss) \
V(I32x4DotI16x8S, 0xfdba, s_ss) \
- V(I32x4ExtMulLowI16x8S, 0xfdbb, s_ss) \
+ V(I32x4ExtMulLowI16x8S, 0xfdbc, s_ss) \
V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
- V(I32x4TruncSatF64x2SZero, 0xfd55, s_s) \
- V(I32x4TruncSatF64x2UZero, 0xfd56, s_s) \
- V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
- V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
- V(I64x2Abs, 0xfda2, s_s) \
+ V(I64x2Abs, 0xfdc0, s_s) \
V(I64x2Neg, 0xfdc1, s_s) \
- V(V64x2AllTrue, 0xfdcf, i_s) \
+ V(I64x2AllTrue, 0xfdc3, i_s) \
V(I64x2BitMask, 0xfdc4, i_s) \
+ V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
+ V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
+ V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
+ V(I64x2UConvertI32x4High, 0xfdca, s_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
V(I64x2ShrU, 0xfdcd, s_si) \
V(I64x2Add, 0xfdce, s_ss) \
V(I64x2Sub, 0xfdd1, s_ss) \
V(I64x2Mul, 0xfdd5, s_ss) \
- V(I64x2ExtMulLowI32x4S, 0xfdd2, s_ss) \
- V(I64x2ExtMulHighI32x4S, 0xfdd3, s_ss) \
- V(I64x2ExtMulLowI32x4U, 0xfdd6, s_ss) \
- V(I64x2ExtMulHighI32x4U, 0xfdd7, s_ss) \
- V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
- V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
- V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
- V(I64x2UConvertI32x4High, 0xfdca, s_s) \
+ V(I64x2Eq, 0xfdd6, s_ss) \
+ V(I64x2Ne, 0xfdd7, s_ss) \
+ V(I64x2LtS, 0xfdd8, s_ss) \
+ V(I64x2GtS, 0xfdd9, s_ss) \
+ V(I64x2LeS, 0xfdda, s_ss) \
+ V(I64x2GeS, 0xfddb, s_ss) \
+ V(I64x2ExtMulLowI32x4S, 0xfddc, s_ss) \
+ V(I64x2ExtMulHighI32x4S, 0xfddd, s_ss) \
+ V(I64x2ExtMulLowI32x4U, 0xfdde, s_ss) \
+ V(I64x2ExtMulHighI32x4U, 0xfddf, s_ss) \
V(F32x4Abs, 0xfde0, s_s) \
V(F32x4Neg, 0xfde1, s_s) \
V(F32x4Sqrt, 0xfde3, s_s) \
@@ -482,7 +494,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(F32x4Max, 0xfde9, s_ss) \
V(F32x4Pmin, 0xfdea, s_ss) \
V(F32x4Pmax, 0xfdeb, s_ss) \
- V(F32x4DemoteF64x2Zero, 0xfd57, s_s) \
V(F64x2Abs, 0xfdec, s_s) \
V(F64x2Neg, 0xfded, s_s) \
V(F64x2Sqrt, 0xfdef, s_s) \
@@ -498,37 +509,18 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I32x4UConvertF32x4, 0xfdf9, s_s) \
V(F32x4SConvertI32x4, 0xfdfa, s_s) \
V(F32x4UConvertI32x4, 0xfdfb, s_s) \
- V(F32x4Ceil, 0xfdd8, s_s) \
- V(F32x4Floor, 0xfdd9, s_s) \
- V(F32x4Trunc, 0xfdda, s_s) \
- V(F32x4NearestInt, 0xfddb, s_s) \
- V(F64x2Ceil, 0xfddc, s_s) \
- V(F64x2Floor, 0xfddd, s_s) \
- V(F64x2Trunc, 0xfdde, s_s) \
- V(F64x2NearestInt, 0xfddf, s_s) \
- V(F64x2ConvertLowI32x4S, 0xfd53, s_s) \
- V(F64x2ConvertLowI32x4U, 0xfd54, s_s) \
- V(F64x2PromoteLowF32x4, 0xfd69, s_s)
-
-#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
- V(PrefetchT, 0xfdc5, v_i) \
- V(PrefetchNT, 0xfdc6, v_i)
-
-#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
- V(I8x16Mul, 0xfd75, s_ss) \
- V(I8x16SignSelect, 0xfd7d, s_sss) \
- V(I16x8SignSelect, 0xfd7e, s_sss) \
- V(I32x4SignSelect, 0xfd7f, s_sss) \
- V(I64x2SignSelect, 0xfd94, s_sss) \
- V(F32x4Qfma, 0xfdb4, s_sss) \
- V(F32x4Qfms, 0xfdd4, s_sss) \
- V(F64x2Qfma, 0xfdfe, s_sss) \
- V(F64x2Qfms, 0xfdff, s_sss) \
- V(I16x8AddHoriz, 0xfdaf, s_ss) \
- V(I32x4AddHoriz, 0xfdb0, s_ss) \
- V(F32x4AddHoriz, 0xfdb2, s_ss) \
- V(F32x4RecipApprox, 0xfdb3, s_s) \
- V(F32x4RecipSqrtApprox, 0xfdbc, s_s)
+ V(I32x4TruncSatF64x2SZero, 0xfdfc, s_s) \
+ V(I32x4TruncSatF64x2UZero, 0xfdfd, s_s) \
+ V(F64x2ConvertLowI32x4S, 0xfdfe, s_s) \
+ V(F64x2ConvertLowI32x4U, 0xfdff, s_s)
+
+#define FOREACH_RELAXED_SIMD_OPCODE(V) \
+ V(F32x4Qfma, 0xfdaf, s_sss) \
+ V(F32x4Qfms, 0xfdb0, s_sss) \
+ V(F64x2Qfma, 0xfdcf, s_sss) \
+ V(F64x2Qfms, 0xfdd0, s_sss) \
+ V(F32x4RecipApprox, 0xfdd2, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfdd3, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _) \
@@ -550,7 +542,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
- FOREACH_SIMD_POST_MVP_OPCODE(V)
+ FOREACH_RELAXED_SIMD_OPCODE(V)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -562,7 +554,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
FOREACH_SIMD_CONST_OPCODE(V)
#define FOREACH_NUMERIC_OPCODE(V) \
@@ -712,7 +703,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
// All signatures.
#define FOREACH_SIGNATURE(V) \
FOREACH_SIMD_SIGNATURE(V) \
- V(v_v, kWasmStmt) \
+ V(v_v, kWasmVoid) \
V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
V(i_i, kWasmI32, kWasmI32) \
V(i_v, kWasmI32) \
@@ -737,15 +728,15 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(d_f, kWasmF64, kWasmF32) \
V(d_i, kWasmF64, kWasmI32) \
V(d_l, kWasmF64, kWasmI64) \
- V(v_i, kWasmStmt, kWasmI32) \
- V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
- V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
+ V(v_i, kWasmVoid, kWasmI32) \
+ V(v_ii, kWasmVoid, kWasmI32, kWasmI32) \
+ V(v_id, kWasmVoid, kWasmI32, kWasmF64) \
V(d_id, kWasmF64, kWasmI32, kWasmF64) \
- V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
+ V(v_if, kWasmVoid, kWasmI32, kWasmF32) \
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
- V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
+ V(v_il, kWasmVoid, kWasmI32, kWasmI64) \
V(l_il, kWasmI64, kWasmI32, kWasmI64) \
- V(v_iii, kWasmStmt, kWasmI32, kWasmI32, kWasmI32) \
+ V(v_iii, kWasmVoid, kWasmI32, kWasmI32, kWasmI32) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
@@ -763,7 +754,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(s_l, kWasmS128, kWasmI64) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
- V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
+ V(v_is, kWasmVoid, kWasmI32, kWasmS128) \
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
V(s_is, kWasmS128, kWasmI32, kWasmS128)
@@ -800,7 +791,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static constexpr bool IsControlOpcode(WasmOpcode);
static constexpr bool IsExternRefOpcode(WasmOpcode);
static constexpr bool IsThrowingOpcode(WasmOpcode);
- static constexpr bool IsSimdPostMvpOpcode(WasmOpcode);
+ static constexpr bool IsRelaxedSimdOpcode(WasmOpcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static constexpr bool IsUnconditionalJump(WasmOpcode);
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 8f0d5427aaa..17b6970cc6e 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_RESULT_H_
#define V8_WASM_WASM_RESULT_H_
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 447e8140404..b2e6f0c4d8a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -19,6 +19,7 @@
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 9e303ce65c5..a8aff9a6b88 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_SERIALIZATION_H_
#define V8_WASM_WASM_SERIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index 614d95f3a45..b0e8105a605 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -281,7 +281,7 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
case kS128:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
return subtype == supertype;
case kRtt:
@@ -303,14 +303,14 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
break;
}
- DCHECK(subtype.is_object_reference_type());
+ DCHECK(subtype.is_object_reference());
bool compatible_references = subtype.is_nullable()
? supertype.is_nullable()
- : supertype.is_object_reference_type();
+ : supertype.is_object_reference();
if (!compatible_references) return false;
- DCHECK(supertype.is_object_reference_type());
+ DCHECK(supertype.is_object_reference());
// Now check that sub_heap and super_heap are subtype-related.
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 7386baf10f8..0c35f7c4708 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_SUBTYPING_H_
#define V8_WASM_WASM_SUBTYPING_H_
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index 70b8543746f..51c0adedde3 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_TIER_H_
#define V8_WASM_WASM_TIER_H_
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 81dbd3e9cb0..0a1d2b69e25 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
#ifndef V8_WASM_WASM_VALUE_H_
#define V8_WASM_WASM_VALUE_H_
@@ -66,43 +70,56 @@ FOREACH_SIMD_TYPE(DECLARE_CAST)
// - name (for to_<name>() method)
// - wasm type
// - c type
-#define FOREACH_WASMVAL_TYPE(V) \
- V(i32, kWasmI32, int32_t) \
- V(u32, kWasmI32, uint32_t) \
- V(i64, kWasmI64, int64_t) \
- V(u64, kWasmI64, uint64_t) \
- V(f32, kWasmF32, float) \
- V(f32_boxed, kWasmF32, Float32) \
- V(f64, kWasmF64, double) \
- V(f64_boxed, kWasmF64, Float64) \
- V(s128, kWasmS128, Simd128) \
- V(externref, kWasmExternRef, Handle<Object>)
+#define FOREACH_PRIMITIVE_WASMVAL_TYPE(V) \
+ V(i8, kWasmI8, int8_t) \
+ V(i16, kWasmI16, int16_t) \
+ V(i32, kWasmI32, int32_t) \
+ V(u32, kWasmI32, uint32_t) \
+ V(i64, kWasmI64, int64_t) \
+ V(u64, kWasmI64, uint64_t) \
+ V(f32, kWasmF32, float) \
+ V(f32_boxed, kWasmF32, Float32) \
+ V(f64, kWasmF64, double) \
+ V(f64_boxed, kWasmF64, Float64) \
+ V(s128, kWasmS128, Simd128)
ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
// A wasm value with type information.
class WasmValue {
public:
- WasmValue() : type_(kWasmStmt), bit_pattern_{} {}
-
-#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
- static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
- "size too big for WasmValue"); \
- base::WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), \
- v); \
- } \
- ctype to_##name() const { \
- DCHECK_EQ(localtype, type_); \
- return to_##name##_unchecked(); \
- } \
- ctype to_##name##_unchecked() const { \
- return base::ReadUnalignedValue<ctype>( \
- reinterpret_cast<Address>(bit_pattern_)); \
+ WasmValue() : type_(kWasmVoid), bit_pattern_{} {}
+
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
+ static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
+ "size too big for WasmValue"); \
+ base::WriteLittleEndianValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_), v); \
+ } \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ return base::ReadLittleEndianValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_)); \
}
- FOREACH_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
+ FOREACH_PRIMITIVE_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
+ WasmValue(Handle<Object> ref, ValueType type) : type_(type), bit_pattern_{} {
+ static_assert(sizeof(Handle<Object>) <= sizeof(bit_pattern_),
+ "bit_pattern_ must be large enough to fit a Handle");
+ base::WriteUnalignedValue<Handle<Object>>(
+ reinterpret_cast<Address>(bit_pattern_), ref);
+ }
+ Handle<Object> to_ref() const {
+ DCHECK(type_.is_reference());
+ return base::ReadUnalignedValue<Handle<Object>>(
+ reinterpret_cast<Address>(bit_pattern_));
+ }
+
ValueType type() const { return type_; }
// Checks equality of type and bit pattern (also for float and double values).
@@ -137,7 +154,7 @@ class WasmValue {
inline ctype WasmValue::to() const { \
return to_##name(); \
}
-FOREACH_WASMVAL_TYPE(DECLARE_CAST)
+FOREACH_PRIMITIVE_WASMVAL_TYPE(DECLARE_CAST)
#undef DECLARE_CAST
} // namespace wasm
diff --git a/deps/v8/src/web-snapshot/OWNERS b/deps/v8/src/web-snapshot/OWNERS
new file mode 100644
index 00000000000..972edfd4566
--- /dev/null
+++ b/deps/v8/src/web-snapshot/OWNERS
@@ -0,0 +1,4 @@
+marja@chromium.org
+leszeks@chromium.org
+syg@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
new file mode 100644
index 00000000000..16bbc9af3e4
--- /dev/null
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -0,0 +1,845 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/web-snapshot/web-snapshot.h"
+
+#include <limits>
+
+#include "include/v8.h"
+#include "src/api/api-inl.h"
+#include "src/base/platform/wrappers.h"
+#include "src/handles/handles.h"
+#include "src/objects/contexts.h"
+#include "src/objects/script.h"
+
+namespace v8 {
+namespace internal {
+
+// When encountering an error during deserializing, we note down the error but
+// don't bail out from processing the snapshot further. This is to speed up
+// deserialization; the error case is now slower since we don't bail out, but
+// the non-error case is faster, since we don't repeatedly check for errors.
+// (Invariant: we might fill our internal data structures with arbitrary data,
+// but it shouldn't have an observable effect.)
+
+// This doesn't increase the complexity of processing the data in a robust and
+// secure way. We cannot trust the data anyway, so every upcoming byte can have
+// an arbitrary value, not depending on whether or not we've encountered an
+// error before.
+void WebSnapshotSerializerDeserializer::Throw(const char* message) {
+ if (error_message_ != nullptr) {
+ return;
+ }
+ error_message_ = message;
+ if (!isolate_->has_pending_exception()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8_isolate->ThrowException(v8::Exception::Error(
+ v8::String::NewFromUtf8(v8_isolate, message).ToLocalChecked()));
+ }
+}
+
+WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
+ : WebSnapshotSerializerDeserializer(
+ reinterpret_cast<v8::internal::Isolate*>(isolate)),
+ string_serializer_(isolate_, nullptr),
+ map_serializer_(isolate_, nullptr),
+ context_serializer_(isolate_, nullptr),
+ function_serializer_(isolate_, nullptr),
+ object_serializer_(isolate_, nullptr),
+ export_serializer_(isolate_, nullptr),
+ string_ids_(isolate_->heap()),
+ map_ids_(isolate_->heap()),
+ context_ids_(isolate_->heap()),
+ function_ids_(isolate_->heap()),
+ object_ids_(isolate_->heap()) {}
+
+WebSnapshotSerializer::~WebSnapshotSerializer() {}
+
+bool WebSnapshotSerializer::TakeSnapshot(
+ v8::Local<v8::Context> context, const std::vector<std::string>& exports,
+ WebSnapshotData& data_out) {
+ if (string_ids_.size() > 0) {
+ Throw("Web snapshot: Can't reuse WebSnapshotSerializer");
+ return false;
+ }
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ for (const std::string& export_name : exports) {
+ v8::ScriptCompiler::Source source(
+ v8::String::NewFromUtf8(v8_isolate, export_name.c_str(),
+ NewStringType::kNormal,
+ static_cast<int>(export_name.length()))
+ .ToLocalChecked());
+ auto script = ScriptCompiler::Compile(context, &source).ToLocalChecked();
+ v8::MaybeLocal<v8::Value> script_result = script->Run(context);
+ v8::Local<v8::Object> v8_object;
+ if (script_result.IsEmpty() ||
+ !script_result.ToLocalChecked()->ToObject(context).ToLocal(
+ &v8_object)) {
+ Throw("Web snapshot: Exported object not found");
+ return false;
+ }
+
+ auto object = Handle<JSObject>::cast(Utils::OpenHandle(*v8_object));
+ SerializeExport(object, export_name);
+ }
+ WriteSnapshot(data_out.buffer, data_out.buffer_size);
+ return !has_error();
+}
+
+// Format (full snapshot):
+// - String count
+// - For each string:
+// - Serialized string
+// - Shape count
+// - For each shape:
+// - Serialized shape
+// - Context count
+// - For each context:
+// - Serialized context
+// - Function count
+// - For each function:
+// - Serialized function
+// - Object count
+// - For each object:
+// - Serialized object
+// - Export count
+// - For each export:
+// - Serialized export
+void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
+ size_t& buffer_size) {
+ while (!pending_objects_.empty()) {
+ const Handle<JSObject>& object = pending_objects_.front();
+ SerializePendingObject(object);
+ pending_objects_.pop();
+ }
+
+ ValueSerializer total_serializer(isolate_, nullptr);
+ size_t needed_size =
+ string_serializer_.buffer_size_ + map_serializer_.buffer_size_ +
+ context_serializer_.buffer_size_ + function_serializer_.buffer_size_ +
+ object_serializer_.buffer_size_ + export_serializer_.buffer_size_ +
+ 6 * sizeof(uint32_t);
+ if (total_serializer.ExpandBuffer(needed_size).IsNothing()) {
+ Throw("Web snapshot: Out of memory");
+ return;
+ }
+
+ total_serializer.WriteUint32(static_cast<uint32_t>(string_count()));
+ total_serializer.WriteRawBytes(string_serializer_.buffer_,
+ string_serializer_.buffer_size_);
+ total_serializer.WriteUint32(static_cast<uint32_t>(map_count()));
+ total_serializer.WriteRawBytes(map_serializer_.buffer_,
+ map_serializer_.buffer_size_);
+ total_serializer.WriteUint32(static_cast<uint32_t>(context_count()));
+ total_serializer.WriteRawBytes(context_serializer_.buffer_,
+ context_serializer_.buffer_size_);
+ total_serializer.WriteUint32(static_cast<uint32_t>(function_count()));
+ total_serializer.WriteRawBytes(function_serializer_.buffer_,
+ function_serializer_.buffer_size_);
+ total_serializer.WriteUint32(static_cast<uint32_t>(object_count()));
+ total_serializer.WriteRawBytes(object_serializer_.buffer_,
+ object_serializer_.buffer_size_);
+ total_serializer.WriteUint32(export_count_);
+ total_serializer.WriteRawBytes(export_serializer_.buffer_,
+ export_serializer_.buffer_size_);
+
+ if (has_error()) {
+ return;
+ }
+
+ auto result = total_serializer.Release();
+ buffer = result.first;
+ buffer_size = result.second;
+}
+
+bool WebSnapshotSerializer::InsertIntoIndexMap(ObjectCacheIndexMap& map,
+ Handle<HeapObject> object,
+ uint32_t& id) {
+ if (static_cast<uint32_t>(map.size()) >=
+ std::numeric_limits<uint32_t>::max()) {
+ Throw("Web snapshot: Too many objects");
+ return true;
+ }
+ int index_out;
+ bool found = map.LookupOrInsert(object, &index_out);
+ id = static_cast<uint32_t>(index_out);
+ return found;
+}
+
+// Format:
+// - Length
+// - Raw bytes (data)
+void WebSnapshotSerializer::SerializeString(Handle<String> string,
+ uint32_t& id) {
+ if (InsertIntoIndexMap(string_ids_, string, id)) {
+ return;
+ }
+
+ // TODO(v8:11525): Always write strings as UTF-8.
+ string = String::Flatten(isolate_, string);
+ DisallowGarbageCollection no_gc;
+ String::FlatContent flat = string->GetFlatContent(no_gc);
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ string_serializer_.WriteUint32(chars.length());
+ string_serializer_.WriteRawBytes(chars.begin(),
+ chars.length() * sizeof(uint8_t));
+ } else if (flat.IsTwoByte()) {
+ // TODO(v8:11525): Support two-byte strings.
+ UNREACHABLE();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+// Format (serialized shape):
+// - Property count
+// - For each property
+// - String id (name)
+void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
+ if (InsertIntoIndexMap(map_ids_, map, id)) {
+ return;
+ }
+
+ std::vector<uint32_t> string_ids;
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
+ if (!key->IsString()) {
+ Throw("Web snapshot: Key is not a string");
+ return;
+ }
+
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
+ if (details.IsDontEnum()) {
+ Throw("Web snapshot: Non-enumerable properties not supported");
+ return;
+ }
+
+ if (details.location() != kField) {
+ Throw("Web snapshot: Properties which are not fields not supported");
+ return;
+ }
+
+ uint32_t string_id = 0;
+ SerializeString(Handle<String>::cast(key), string_id);
+ string_ids.push_back(string_id);
+
+ // TODO(v8:11525): Support property attributes.
+ }
+ map_serializer_.WriteUint32(static_cast<uint32_t>(string_ids.size()));
+ for (auto i : string_ids) {
+ map_serializer_.WriteUint32(i);
+ }
+}
+
+// Format (serialized function):
+// - 0 if there's no context, 1 + context id otherwise
+// - String id (source string)
+void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
+ uint32_t& id) {
+ if (InsertIntoIndexMap(function_ids_, function, id)) {
+ return;
+ }
+
+ if (!function->shared().HasSourceCode()) {
+ Throw("Web snapshot: Function without source code");
+ return;
+ }
+
+ Handle<Context> context(function->context(), isolate_);
+ if (context->IsNativeContext()) {
+ function_serializer_.WriteUint32(0);
+ } else {
+ DCHECK(context->IsFunctionContext());
+ uint32_t context_id = 0;
+ SerializeContext(context, context_id);
+ function_serializer_.WriteUint32(context_id + 1);
+ }
+
+ // TODO(v8:11525): For inner functions which occur inside a serialized
+ // function, create a "substring" type, so that we don't need to serialize the
+ // same content twice.
+ Handle<String> full_source(
+ String::cast(Script::cast(function->shared().script()).source()),
+ isolate_);
+ int start = function->shared().StartPosition();
+ int end = function->shared().EndPosition();
+ Handle<String> source =
+ isolate_->factory()->NewSubString(full_source, start, end);
+ uint32_t source_id = 0;
+ SerializeString(source, source_id);
+ function_serializer_.WriteUint32(source_id);
+
+ // TODO(v8:11525): Serialize .prototype.
+ // TODO(v8:11525): Support properties in functions.
+}
+
+// Format (serialized context):
+// - 0 if there's no parent context, 1 + parent context id otherwise
+// - Variable count
+// - For each variable:
+// - String id (name)
+// - Serialized value
+void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
+ uint32_t& id) {
+ // Invariant: parent context is serialized first.
+
+ // Can't use InsertIntoIndexMap here, because it might reserve a lower id
+ // for the context than its parent.
+ int index_out = 0;
+ if (context_ids_.Lookup(context, &index_out)) {
+ id = static_cast<uint32_t>(index_out);
+ return;
+ }
+
+ uint32_t parent_context_id = 0;
+ if (!context->previous().IsNativeContext()) {
+ SerializeContext(handle(context->previous(), isolate_), parent_context_id);
+ ++parent_context_id;
+ }
+
+ InsertIntoIndexMap(context_ids_, context, id);
+
+ context_serializer_.WriteUint32(parent_context_id);
+
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ int count = scope_info->ContextLocalCount();
+ context_serializer_.WriteUint32(count);
+
+ for (int i = 0; i < count; ++i) {
+ // TODO(v8:11525): support parameters
+ // TODO(v8:11525): distinguish variable modes
+ Handle<String> name(scope_info->context_local_names(i), isolate_);
+ uint32_t string_id = 0;
+ SerializeString(name, string_id);
+ context_serializer_.WriteUint32(string_id);
+ Handle<Object> value(context->get(scope_info->ContextHeaderLength() + i),
+ isolate_);
+ WriteValue(value, context_serializer_);
+ }
+}
+
+void WebSnapshotSerializer::SerializeObject(Handle<JSObject> object,
+ uint32_t& id) {
+ DCHECK(!object->IsJSFunction());
+ if (InsertIntoIndexMap(object_ids_, object, id)) {
+ return;
+ }
+ pending_objects_.push(object);
+}
+
+// Format (serialized object):
+// - Shape id
+// - For each property:
+// - Serialized value
+void WebSnapshotSerializer::SerializePendingObject(Handle<JSObject> object) {
+ Handle<Map> map(object->map(), isolate_);
+ uint32_t map_id = 0;
+ SerializeMap(map, map_id);
+
+ if (*map != object->map()) {
+ Throw("Web snapshot: Map changed");
+ return;
+ }
+
+ object_serializer_.WriteUint32(map_id);
+
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ Handle<Object> value =
+ JSObject::FastPropertyAt(object, details.representation(), field_index);
+ WriteValue(value, object_serializer_);
+ }
+}
+
+// Format (serialized export):
+// - String id (export name)
+// - Object id (exported object)
+void WebSnapshotSerializer::SerializeExport(Handle<JSObject> object,
+ const std::string& export_name) {
+ // TODO(v8:11525): Support exporting functions.
+ ++export_count_;
+ // TODO(v8:11525): How to avoid creating the String but still de-dupe?
+ Handle<String> export_name_string =
+ isolate_->factory()
+ ->NewStringFromOneByte(Vector<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(export_name.c_str()),
+ static_cast<int>(export_name.length())))
+ .ToHandleChecked();
+ uint32_t string_id = 0;
+ SerializeString(export_name_string, string_id);
+ uint32_t object_id = 0;
+ SerializeObject(object, object_id);
+ export_serializer_.WriteUint32(string_id);
+ export_serializer_.WriteUint32(object_id);
+}
+
+// Format (serialized value):
+// - Type id (ValueType enum)
+// - Value or id (interpretation depends on the type)
+void WebSnapshotSerializer::WriteValue(Handle<Object> object,
+ ValueSerializer& serializer) {
+ uint32_t id = 0;
+ if (object->IsSmi()) {
+ // TODO(v8:11525): Implement.
+ UNREACHABLE();
+ }
+
+ DCHECK(object->IsHeapObject());
+ switch (HeapObject::cast(*object).map().instance_type()) {
+ case ODDBALL_TYPE:
+ // TODO(v8:11525): Implement.
+ UNREACHABLE();
+ case HEAP_NUMBER_TYPE:
+ // TODO(v8:11525): Implement.
+ UNREACHABLE();
+ case JS_FUNCTION_TYPE:
+ SerializeFunction(Handle<JSFunction>::cast(object), id);
+ serializer.WriteUint32(ValueType::FUNCTION_ID);
+ serializer.WriteUint32(id);
+ break;
+ case JS_OBJECT_TYPE:
+ SerializeObject(Handle<JSObject>::cast(object), id);
+ serializer.WriteUint32(ValueType::OBJECT_ID);
+ serializer.WriteUint32(id);
+ break;
+ default:
+ if (object->IsString()) {
+ SerializeString(Handle<String>::cast(object), id);
+ serializer.WriteUint32(ValueType::STRING_ID);
+ serializer.WriteUint32(id);
+ } else {
+ Throw("Web snapshot: Unsupported object");
+ }
+ }
+ // TODO(v8:11525): Support more types.
+}
+
+WebSnapshotDeserializer::WebSnapshotDeserializer(v8::Isolate* isolate)
+ : WebSnapshotSerializerDeserializer(
+ reinterpret_cast<v8::internal::Isolate*>(isolate)) {}
+
+WebSnapshotDeserializer::~WebSnapshotDeserializer() {}
+
+void WebSnapshotDeserializer::Throw(const char* message) {
+ string_count_ = 0;
+ map_count_ = 0;
+ context_count_ = 0;
+ function_count_ = 0;
+ object_count_ = 0;
+ // Make sure we don't read any more data
+ deserializer_->position_ = deserializer_->end_;
+
+ WebSnapshotSerializerDeserializer::Throw(message);
+}
+
+bool WebSnapshotDeserializer::UseWebSnapshot(const uint8_t* data,
+ size_t buffer_size) {
+ if (deserialized_) {
+ Throw("Web snapshot: Can't reuse WebSnapshotDeserializer");
+ return false;
+ }
+ deserialized_ = true;
+
+ // TODO(v8:11525): Add RuntimeCallStats.
+ base::ElapsedTimer timer;
+ if (FLAG_trace_web_snapshot) {
+ timer.Start();
+ }
+
+ deserializer_.reset(new ValueDeserializer(isolate_, data, buffer_size));
+ DeserializeStrings();
+ DeserializeMaps();
+ DeserializeContexts();
+ DeserializeFunctions();
+ DeserializeObjects();
+ DeserializeExports();
+ if (deserializer_->position_ != deserializer_->end_) {
+ Throw("Web snapshot: Snapshot length mismatch");
+ return false;
+ }
+
+ if (FLAG_trace_web_snapshot) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Deserializing snapshot (%zu bytes) took %0.3f ms]\n", buffer_size,
+ ms);
+ }
+
+ // TODO(v8:11525): Add verification mode; verify the objects we just produced.
+ return !has_error();
+}
+
+void WebSnapshotDeserializer::DeserializeStrings() {
+ if (!deserializer_->ReadUint32(&string_count_) ||
+ string_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed string table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ strings_ = isolate_->factory()->NewFixedArray(string_count_);
+ for (uint32_t i = 0; i < string_count_; ++i) {
+ // TODO(v8:11525): Read strings as UTF-8.
+ MaybeHandle<String> maybe_string = deserializer_->ReadOneByteString();
+ Handle<String> string;
+ if (!maybe_string.ToHandle(&string)) {
+ Throw("Web snapshot: Malformed string");
+ return;
+ }
+ strings_->set(i, *string);
+ }
+}
+
+Handle<String> WebSnapshotDeserializer::ReadString(bool internalize) {
+ DCHECK(!strings_->is_null());
+ uint32_t string_id;
+ if (!deserializer_->ReadUint32(&string_id) || string_id >= string_count_) {
+ Throw("Web snapshot: malformed string id\n");
+ return isolate_->factory()->empty_string();
+ }
+ Handle<String> string =
+ handle(String::cast(strings_->get(string_id)), isolate_);
+ if (internalize && !string->IsInternalizedString()) {
+ string = isolate_->factory()->InternalizeString(string);
+ strings_->set(string_id, *string);
+ }
+ return string;
+}
+
+void WebSnapshotDeserializer::DeserializeMaps() {
+ if (!deserializer_->ReadUint32(&map_count_) || map_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed shape table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ maps_ = isolate_->factory()->NewFixedArray(map_count_);
+ for (uint32_t i = 0; i < map_count_; ++i) {
+ uint32_t property_count;
+ if (!deserializer_->ReadUint32(&property_count)) {
+ Throw("Web snapshot: Malformed shape");
+ return;
+ }
+ // TODO(v8:11525): Consider passing the upper bound as a param and
+ // systematically enforcing it on the ValueSerializer side.
+ if (property_count > kMaxNumberOfDescriptors) {
+ Throw("Web snapshot: Malformed shape: too many properties");
+ return;
+ }
+
+ Handle<DescriptorArray> descriptors =
+ isolate_->factory()->NewDescriptorArray(0, property_count);
+ for (uint32_t p = 0; p < property_count; ++p) {
+ Handle<String> key = ReadString(true);
+
+ // Use the "none" representation until we see the first object having this
+ // map. At that point, modify the representation.
+ Descriptor desc = Descriptor::DataField(
+ isolate_, key, static_cast<int>(p), PropertyAttributes::NONE,
+ Representation::None());
+ descriptors->Append(&desc);
+ }
+
+ Handle<Map> map = isolate_->factory()->NewMap(
+ JS_OBJECT_TYPE, JSObject::kHeaderSize * kTaggedSize, HOLEY_ELEMENTS, 0);
+ map->InitializeDescriptors(isolate_, *descriptors);
+
+ maps_->set(i, *map);
+ }
+}
+
+void WebSnapshotDeserializer::DeserializeContexts() {
+ if (!deserializer_->ReadUint32(&context_count_) ||
+ context_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed context table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ contexts_ = isolate_->factory()->NewFixedArray(context_count_);
+ for (uint32_t i = 0; i < context_count_; ++i) {
+ uint32_t parent_context_id;
+ // Parent context is serialized before child context. Note: not >= on
+ // purpose, we're going to subtract 1 later.
+ if (!deserializer_->ReadUint32(&parent_context_id) ||
+ parent_context_id > i) {
+ Throw("Web snapshot: Malformed context");
+ return;
+ }
+
+ uint32_t variable_count;
+ if (!deserializer_->ReadUint32(&variable_count)) {
+ Throw("Web snapshot: Malformed context");
+ return;
+ }
+ // TODO(v8:11525): Enforce upper limit for variable count.
+ Handle<ScopeInfo> scope_info =
+ CreateScopeInfo(variable_count, parent_context_id > 0);
+
+ Handle<Context> parent_context;
+ if (parent_context_id > 0) {
+ parent_context = handle(
+ Context::cast(contexts_->get(parent_context_id - 1)), isolate_);
+ scope_info->set_outer_scope_info(parent_context->scope_info());
+ } else {
+ parent_context = handle(isolate_->context(), isolate_);
+ }
+
+ Handle<Context> context =
+ isolate_->factory()->NewFunctionContext(parent_context, scope_info);
+ contexts_->set(i, *context);
+
+ const int context_local_base = ScopeInfo::kVariablePartIndex;
+ const int context_local_info_base = context_local_base + variable_count;
+ for (int variable_index = 0;
+ variable_index < static_cast<int>(variable_count); ++variable_index) {
+ Handle<String> name = ReadString(true);
+ scope_info->set(context_local_base + variable_index, *name);
+
+ // TODO(v8:11525): Support variable modes etc.
+ uint32_t info =
+ ScopeInfo::VariableModeBits::encode(VariableMode::kLet) |
+ ScopeInfo::InitFlagBit::encode(
+ InitializationFlag::kNeedsInitialization) |
+ ScopeInfo::MaybeAssignedFlagBit::encode(
+ MaybeAssignedFlag::kMaybeAssigned) |
+ ScopeInfo::ParameterNumberBits::encode(
+ ScopeInfo::ParameterNumberBits::kMax) |
+ ScopeInfo::IsStaticFlagBit::encode(IsStaticFlag::kNotStatic);
+ scope_info->set(context_local_info_base + variable_index,
+ Smi::FromInt(info));
+
+ Handle<Object> value;
+ Representation representation;
+ ReadValue(value, representation);
+ context->set(scope_info->ContextHeaderLength() + variable_index, *value);
+ }
+ }
+}
+
+Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
+ uint32_t variable_count, bool has_parent) {
+ // TODO(v8:11525): Decide how to handle language modes. (The code below sets
+ // the language mode as strict.)
+ // TODO(v8:11525): Support (context-allocating) receiver.
+ // TODO(v8:11525): Support function variable & function name.
+ // TODO(v8:11525): Support classes.
+ const int length = ScopeInfo::kVariablePartIndex +
+ ScopeInfo::kPositionInfoEntries + (has_parent ? 1 : 0) +
+ 2 * variable_count;
+
+ Handle<ScopeInfo> scope_info = isolate_->factory()->NewScopeInfo(length);
+ int flags =
+ ScopeInfo::ScopeTypeBits::encode(ScopeType::FUNCTION_SCOPE) |
+ ScopeInfo::SloppyEvalCanExtendVarsBit::encode(false) |
+ ScopeInfo::LanguageModeBit::encode(LanguageMode::kStrict) |
+ ScopeInfo::DeclarationScopeBit::encode(true) |
+ ScopeInfo::ReceiverVariableBits::encode(VariableAllocationInfo::NONE) |
+ ScopeInfo::HasClassBrandBit::encode(false) |
+ ScopeInfo::HasSavedClassVariableIndexBit::encode(false) |
+ ScopeInfo::HasNewTargetBit::encode(false) |
+ ScopeInfo::FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
+ ScopeInfo::HasInferredFunctionNameBit::encode(false) |
+ ScopeInfo::IsAsmModuleBit::encode(false) |
+ ScopeInfo::HasSimpleParametersBit::encode(true) |
+ ScopeInfo::FunctionKindBits::encode(FunctionKind::kNormalFunction) |
+ ScopeInfo::HasOuterScopeInfoBit::encode(has_parent) |
+ ScopeInfo::IsDebugEvaluateScopeBit::encode(false) |
+ ScopeInfo::ForceContextAllocationBit::encode(false) |
+ ScopeInfo::PrivateNameLookupSkipsOuterClassBit::encode(false) |
+ ScopeInfo::HasContextExtensionSlotBit::encode(false) |
+ ScopeInfo::IsReplModeScopeBit::encode(false) |
+ ScopeInfo::HasLocalsBlockListBit::encode(false);
+ scope_info->set_flags(flags);
+ DCHECK(!scope_info->IsEmpty());
+
+ scope_info->set_context_local_count(variable_count);
+ // TODO(v8:11525): Support parameters.
+ scope_info->set_parameter_count(0);
+ scope_info->SetPositionInfo(0, 0);
+ return scope_info;
+}
+
+void WebSnapshotDeserializer::DeserializeFunctions() {
+ if (!deserializer_->ReadUint32(&function_count_) ||
+ function_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed function table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ functions_ = isolate_->factory()->NewFixedArray(function_count_);
+ for (uint32_t i = 0; i < function_count_; ++i) {
+ uint32_t context_id;
+ // Note: > (not >= on purpose, we will subtract 1).
+ if (!deserializer_->ReadUint32(&context_id) ||
+ context_id > context_count_) {
+ Throw("Web snapshot: Malformed function");
+ return;
+ }
+
+ Handle<String> source = ReadString(false);
+
+ // TODO(v8:11525): Support other function kinds.
+ // TODO(v8:11525): Support (exported) top level functions.
+ Handle<Script> script = isolate_->factory()->NewScript(source);
+ // TODO(v8:11525): Deduplicate the SFIs for inner functions the user creates
+ // post-deserialization (by calling the outer function, if it's also in the
+ // snapshot) against the ones we create here.
+ Handle<SharedFunctionInfo> shared =
+ isolate_->factory()->NewSharedFunctionInfo(
+ isolate_->factory()->empty_string(), MaybeHandle<Code>(),
+ Builtins::kCompileLazy, FunctionKind::kNormalFunction);
+ shared->set_function_literal_id(1);
+ // TODO(v8:11525): Decide how to handle language modes.
+ shared->set_language_mode(LanguageMode::kStrict);
+ shared->set_uncompiled_data(
+ *isolate_->factory()->NewUncompiledDataWithoutPreparseData(
+ ReadOnlyRoots(isolate_).empty_string_handle(), 0,
+ source->length()));
+ shared->set_script(*script);
+ Handle<WeakFixedArray> infos(
+ isolate_->factory()->NewWeakFixedArray(3, AllocationType::kOld));
+ infos->Set(1, HeapObjectReference::Weak(*shared));
+ script->set_shared_function_infos(*infos);
+
+ Handle<JSFunction> function =
+ Factory::JSFunctionBuilder(isolate_, shared, isolate_->native_context())
+ .Build();
+ if (context_id > 0) {
+ DCHECK_LT(context_id - 1, context_count_);
+ // Guards raw pointer "context" below.
+ DisallowHeapAllocation no_heap_access;
+ Context context = Context::cast(contexts_->get(context_id - 1));
+ function->set_context(context);
+ shared->set_outer_scope_info(context.scope_info());
+ }
+ functions_->set(i, *function);
+ }
+}
+
+void WebSnapshotDeserializer::DeserializeObjects() {
+ if (!deserializer_->ReadUint32(&object_count_) ||
+ object_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed objects table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ objects_ = isolate_->factory()->NewFixedArray(object_count_);
+ for (size_t object_ix = 0; object_ix < object_count_; ++object_ix) {
+ uint32_t map_id;
+ if (!deserializer_->ReadUint32(&map_id) || map_id >= map_count_) {
+ Throw("Web snapshot: Malformed object");
+ return;
+ }
+ Handle<Map> map = handle(Map::cast(maps_->get(map_id)), isolate_);
+ Handle<DescriptorArray> descriptors =
+ handle(map->instance_descriptors(kRelaxedLoad), isolate_);
+ int no_properties = map->NumberOfOwnDescriptors();
+ Handle<PropertyArray> property_array =
+ isolate_->factory()->NewPropertyArray(no_properties);
+ for (int i = 0; i < no_properties; ++i) {
+ Handle<Object> value;
+ Representation wanted_representation = Representation::None();
+ ReadValue(value, wanted_representation);
+ // Read the representation from the map.
+ PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
+ CHECK_EQ(details.location(), kField);
+ CHECK_EQ(kData, details.kind());
+ Representation r = details.representation();
+ if (r.IsNone()) {
+ // Switch over to wanted_representation.
+ details = details.CopyWithRepresentation(wanted_representation);
+ descriptors->SetDetails(InternalIndex(i), details);
+ } else if (!r.Equals(wanted_representation)) {
+ // TODO(v8:11525): Support this case too.
+ UNREACHABLE();
+ }
+ property_array->set(i, *value);
+ }
+ Handle<JSObject> object = isolate_->factory()->NewJSObjectFromMap(map);
+ object->set_raw_properties_or_hash(*property_array);
+ objects_->set(static_cast<int>(object_ix), *object);
+ }
+}
+
+void WebSnapshotDeserializer::DeserializeExports() {
+ uint32_t count;
+ if (!deserializer_->ReadUint32(&count) || count > kMaxItemCount) {
+ Throw("Web snapshot: Malformed export table");
+ return;
+ }
+ for (uint32_t i = 0; i < count; ++i) {
+ Handle<String> export_name = ReadString(true);
+ uint32_t object_id = 0;
+ if (!deserializer_->ReadUint32(&object_id) || object_id >= object_count_) {
+ Throw("Web snapshot: Malformed export");
+ return;
+ }
+ Handle<Object> exported_object = handle(objects_->get(object_id), isolate_);
+
+ // Check for the correctness of the snapshot (thus far) before producing
+ // something observable. TODO(v8:11525): Strictly speaking, we should
+ // produce observable effects only when we know that the whole snapshot is
+ // correct.
+ if (has_error()) {
+ return;
+ }
+
+ auto result = Object::SetProperty(isolate_, isolate_->global_object(),
+ export_name, exported_object);
+ if (result.is_null()) {
+ Throw("Web snapshot: Setting global property failed");
+ return;
+ }
+ }
+}
+
+void WebSnapshotDeserializer::ReadValue(Handle<Object>& value,
+ Representation& representation) {
+ uint32_t value_type;
+ // TODO(v8:11525): Consider adding a ReadByte.
+ if (!deserializer_->ReadUint32(&value_type)) {
+ Throw("Web snapshot: Malformed variable");
+ return;
+ }
+ switch (value_type) {
+ case ValueType::STRING_ID: {
+ value = ReadString(false);
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::OBJECT_ID:
+ uint32_t object_id;
+ if (!deserializer_->ReadUint32(&object_id) ||
+ object_id >= object_count_) {
+ // TODO(v8:11525): Handle circular references + contexts referencing
+ // objects.
+ Throw("Web snapshot: Malformed variable");
+ return;
+ }
+ value = handle(objects_->get(object_id), isolate_);
+ representation = Representation::Tagged();
+ break;
+ case ValueType::FUNCTION_ID:
+ // TODO(v8:11525): Handle contexts referencing functions.
+ uint32_t function_id;
+ if (!deserializer_->ReadUint32(&function_id) ||
+ function_id >= function_count_) {
+ Throw("Web snapshot: Malformed object property");
+ return;
+ }
+ value = handle(functions_->get(function_id), isolate_);
+ representation = Representation::Tagged();
+ break;
+ default:
+ // TODO(v8:11525): Handle other value types.
+ Throw("Web snapshot: Unsupported value type");
+ return;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
new file mode 100644
index 00000000000..31461c8dbc5
--- /dev/null
+++ b/deps/v8/src/web-snapshot/web-snapshot.h
@@ -0,0 +1,181 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
+#define V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
+
+#include <queue>
+#include <vector>
+
+#include "src/handles/handles.h"
+#include "src/objects/value-serializer.h"
+#include "src/snapshot/serializer.h" // For ObjectCacheIndexMap
+
+namespace v8 {
+
+class Context;
+class Isolate;
+
+template <typename T>
+class Local;
+
+namespace internal {
+
+class Context;
+class Map;
+class Object;
+class String;
+
+struct WebSnapshotData {
+ uint8_t* buffer = nullptr;
+ size_t buffer_size = 0;
+ ~WebSnapshotData() { free(buffer); }
+};
+
+class WebSnapshotSerializerDeserializer {
+ public:
+ inline bool has_error() const { return error_message_ != nullptr; }
+ const char* error_message() const { return error_message_; }
+
+ enum ValueType : uint8_t { STRING_ID, OBJECT_ID, FUNCTION_ID };
+
+ // The maximum count of items for each value type (strings, objects etc.)
+ static constexpr uint32_t kMaxItemCount =
+ static_cast<uint32_t>(FixedArray::kMaxLength);
+ // This ensures indices and lengths can be converted between uint32_t and int
+ // without problems:
+ STATIC_ASSERT(kMaxItemCount < std::numeric_limits<int32_t>::max());
+
+ protected:
+ explicit WebSnapshotSerializerDeserializer(Isolate* isolate)
+ : isolate_(isolate) {}
+ // Not virtual, on purpose (because it doesn't need to be).
+ void Throw(const char* message);
+ Isolate* isolate_;
+ const char* error_message_ = nullptr;
+
+ private:
+ WebSnapshotSerializerDeserializer(const WebSnapshotSerializerDeserializer&) =
+ delete;
+ WebSnapshotSerializerDeserializer& operator=(
+ const WebSnapshotSerializerDeserializer&) = delete;
+};
+
+class V8_EXPORT WebSnapshotSerializer
+ : public WebSnapshotSerializerDeserializer {
+ public:
+ explicit WebSnapshotSerializer(v8::Isolate* isolate);
+ ~WebSnapshotSerializer();
+
+ bool TakeSnapshot(v8::Local<v8::Context> context,
+ const std::vector<std::string>& exports,
+ WebSnapshotData& data_out);
+
+ // For inspecting the state after taking a snapshot.
+ uint32_t string_count() const {
+ return static_cast<uint32_t>(string_ids_.size());
+ }
+
+ uint32_t map_count() const { return static_cast<uint32_t>(map_ids_.size()); }
+
+ uint32_t context_count() const {
+ return static_cast<uint32_t>(context_ids_.size());
+ }
+
+ uint32_t function_count() const {
+ return static_cast<uint32_t>(function_ids_.size());
+ }
+
+ uint32_t object_count() const {
+ return static_cast<uint32_t>(object_ids_.size());
+ }
+
+ private:
+ WebSnapshotSerializer(const WebSnapshotSerializer&) = delete;
+ WebSnapshotSerializer& operator=(const WebSnapshotSerializer&) = delete;
+
+ void WriteSnapshot(uint8_t*& buffer, size_t& buffer_size);
+
+ // Returns true if the object was already in the map, false if it was added.
+ bool InsertIntoIndexMap(ObjectCacheIndexMap& map, Handle<HeapObject> object,
+ uint32_t& id);
+
+ void SerializeString(Handle<String> string, uint32_t& id);
+ void SerializeMap(Handle<Map> map, uint32_t& id);
+ void SerializeFunction(Handle<JSFunction> function, uint32_t& id);
+ void SerializeContext(Handle<Context> context, uint32_t& id);
+ void SerializeObject(Handle<JSObject> object, uint32_t& id);
+ void SerializePendingObject(Handle<JSObject> object);
+ void SerializeExport(Handle<JSObject> object, const std::string& export_name);
+ void WriteValue(Handle<Object> object, ValueSerializer& serializer);
+
+ ValueSerializer string_serializer_;
+ ValueSerializer map_serializer_;
+ ValueSerializer context_serializer_;
+ ValueSerializer function_serializer_;
+ ValueSerializer object_serializer_;
+ ValueSerializer export_serializer_;
+
+ ObjectCacheIndexMap string_ids_;
+ ObjectCacheIndexMap map_ids_;
+ ObjectCacheIndexMap context_ids_;
+ ObjectCacheIndexMap function_ids_;
+ ObjectCacheIndexMap object_ids_;
+ uint32_t export_count_ = 0;
+
+ std::queue<Handle<JSObject>> pending_objects_;
+};
+
+class V8_EXPORT WebSnapshotDeserializer
+ : public WebSnapshotSerializerDeserializer {
+ public:
+ explicit WebSnapshotDeserializer(v8::Isolate* v8_isolate);
+ ~WebSnapshotDeserializer();
+ bool UseWebSnapshot(const uint8_t* data, size_t buffer_size);
+
+ // For inspecting the state after deserializing a snapshot.
+ uint32_t string_count() const { return string_count_; }
+ uint32_t map_count() const { return map_count_; }
+ uint32_t context_count() const { return context_count_; }
+ uint32_t function_count() const { return function_count_; }
+ uint32_t object_count() const { return object_count_; }
+
+ private:
+ WebSnapshotDeserializer(const WebSnapshotDeserializer&) = delete;
+ WebSnapshotDeserializer& operator=(const WebSnapshotDeserializer&) = delete;
+
+ void DeserializeStrings();
+ Handle<String> ReadString(bool internalize = false);
+ void DeserializeMaps();
+ void DeserializeContexts();
+ Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent);
+ void DeserializeFunctions();
+ void DeserializeObjects();
+ void DeserializeExports();
+ void ReadValue(Handle<Object>& value, Representation& representation);
+
+ // Not virtual, on purpose (because it doesn't need to be).
+ void Throw(const char* message);
+
+ Handle<FixedArray> strings_;
+ Handle<FixedArray> maps_;
+ Handle<FixedArray> contexts_;
+ Handle<FixedArray> functions_;
+ Handle<FixedArray> objects_;
+
+ uint32_t string_count_ = 0;
+ uint32_t map_count_ = 0;
+ uint32_t context_count_ = 0;
+ uint32_t function_count_ = 0;
+ uint32_t object_count_ = 0;
+
+ std::unique_ptr<ValueDeserializer> deserializer_;
+
+ bool deserialized_ = false;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
diff --git a/deps/v8/src/zone/OWNERS b/deps/v8/src/zone/OWNERS
index 04bfcc5ec5a..1c725106166 100644
--- a/deps/v8/src/zone/OWNERS
+++ b/deps/v8/src/zone/OWNERS
@@ -1,4 +1,3 @@
clemensb@chromium.org
ishell@chromium.org
-sigurds@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index fb872ad39ff..d90a4c670fd 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -15,17 +15,22 @@ group("gn_all") {
"mjsunit:v8_mjsunit",
"mozilla:v8_mozilla",
"test262:v8_test262",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
deps = [
"inspector:inspector-test",
"mkgrokdump:mkgrokdump",
- "wasm-api-tests:wasm_api_tests",
]
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ deps += [ "wasm-api-tests:wasm_api_tests" ]
+ }
+
if (v8_fuzzilli) {
deps += [ "fuzzilli:v8_fuzzilli_test" ]
}
@@ -79,11 +84,16 @@ group("v8_bot_default") {
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
"unittests:unittests",
- "wasm-api-tests:wasm_api_tests",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
+
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-api-tests:wasm_api_tests",
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ }
}
group("v8_default") {
@@ -99,10 +109,15 @@ group("v8_default") {
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
"unittests:unittests",
- "wasm-api-tests:wasm_api_tests",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
]
+
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-api-tests:wasm_api_tests",
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ }
}
group("v8_optimize_for_size") {
@@ -146,7 +161,7 @@ v8_header_set("common_test_headers") {
configs = []
public_deps = [
- "../:v8_for_testing",
+ "../:v8_internal_headers",
"../:v8_libbase",
]
@@ -154,8 +169,13 @@ v8_header_set("common_test_headers") {
"common/assembler-tester.h",
"common/flag-utils.h",
"common/types-fuzz.h",
- "common/wasm/flag-utils.h",
- "common/wasm/test-signatures.h",
- "common/wasm/wasm-macro-gen.h",
]
+
+ if (v8_enable_webassembly) {
+ sources += [
+ "common/wasm/flag-utils.h",
+ "common/wasm/test-signatures.h",
+ "common/wasm/wasm-macro-gen.h",
+ ]
+ }
}
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index e63fe5ed357..ffa4e3a136a 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -16,10 +16,7 @@ v8_executable("cctest") {
sources = [ "cctest.cc" ]
- deps = [
- ":cctest_sources",
- "../..:v8_wrappers",
- ]
+ deps = [ ":cctest_sources" ]
data_deps = [ "../../tools:v8_testrunner" ]
@@ -60,7 +57,11 @@ v8_header_set("cctest_headers") {
"../..:internal_config_base",
]
- deps = [ "../..:v8_config_headers" ]
+ deps = [
+ "../..:v8_internal_headers",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ ]
sources = [ "cctest.h" ]
}
@@ -78,9 +79,6 @@ v8_source_set("cctest_sources") {
### gcmole(all) ###
"../common/assembler-tester.h",
"../common/flag-utils.h",
- "../common/wasm/flag-utils.h",
- "../common/wasm/test-signatures.h",
- "../common/wasm/wasm-macro-gen.h",
"cctest-utils.h",
"collector.h",
"compiler/c-signature.h",
@@ -109,7 +107,6 @@ v8_source_set("cctest_sources") {
"compiler/test-linkage.cc",
"compiler/test-loop-analysis.cc",
"compiler/test-machine-operator-reducer.cc",
- "compiler/test-multiple-return.cc",
"compiler/test-node.cc",
"compiler/test-operator.cc",
"compiler/test-representation-change.cc",
@@ -124,7 +121,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-jsops.cc",
"compiler/test-run-load-store.cc",
"compiler/test-run-machops.cc",
- "compiler/test-run-native-calls.cc",
"compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
"compiler/test-run-tail-calls.cc",
@@ -198,12 +194,10 @@ v8_source_set("cctest_sources") {
"test-api-interceptors.cc",
"test-api-stack-traces.cc",
"test-api-typed-array.cc",
- "test-api-wasm.cc",
"test-api.cc",
"test-api.h",
"test-array-list.cc",
"test-atomicops.cc",
- "test-backing-store.cc",
"test-bignum-dtoa.cc",
"test-bignum.cc",
"test-bit-vector.cc",
@@ -251,7 +245,6 @@ v8_source_set("cctest_sources") {
"test-inobject-slack-tracking.cc",
"test-inspector.cc",
"test-intl.cc",
- "test-js-to-wasm.cc",
"test-js-weak-refs.cc",
"test-liveedit.cc",
"test-local-handles.cc",
@@ -276,6 +269,8 @@ v8_source_set("cctest_sources") {
"test-smi-lexicographic-compare.cc",
"test-strings.cc",
"test-strtod.cc",
+ "test-swiss-name-dictionary-csa.cc",
+ "test-swiss-name-dictionary-infra.cc",
"test-swiss-name-dictionary.cc",
"test-symbols.cc",
"test-thread-termination.cc",
@@ -294,45 +289,12 @@ v8_source_set("cctest_sources") {
"test-version.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
+ "test-web-snapshots.cc",
"torque/test-torque.cc",
"trace-extension.cc",
"trace-extension.h",
"unicode-helpers.cc",
"unicode-helpers.h",
- "wasm/test-c-wasm-entry.cc",
- "wasm/test-compilation-cache.cc",
- "wasm/test-gc.cc",
- "wasm/test-grow-memory.cc",
- "wasm/test-jump-table-assembler.cc",
- "wasm/test-liftoff-inspection.cc",
- "wasm/test-run-wasm-64.cc",
- "wasm/test-run-wasm-asmjs.cc",
- "wasm/test-run-wasm-atomics.cc",
- "wasm/test-run-wasm-atomics64.cc",
- "wasm/test-run-wasm-bulk-memory.cc",
- "wasm/test-run-wasm-exceptions.cc",
- "wasm/test-run-wasm-interpreter.cc",
- "wasm/test-run-wasm-js.cc",
- "wasm/test-run-wasm-memory64.cc",
- "wasm/test-run-wasm-module.cc",
- "wasm/test-run-wasm-sign-extension.cc",
- "wasm/test-run-wasm-simd-liftoff.cc",
- "wasm/test-run-wasm-simd-scalar-lowering.cc",
- "wasm/test-run-wasm-simd.cc",
- "wasm/test-run-wasm-wrappers.cc",
- "wasm/test-run-wasm.cc",
- "wasm/test-streaming-compilation.cc",
- "wasm/test-wasm-breakpoints.cc",
- "wasm/test-wasm-codegen.cc",
- "wasm/test-wasm-import-wrapper-cache.cc",
- "wasm/test-wasm-metrics.cc",
- "wasm/test-wasm-serialization.cc",
- "wasm/test-wasm-shared-engine.cc",
- "wasm/test-wasm-stack.cc",
- "wasm/test-wasm-trap-position.cc",
- "wasm/wasm-atomics-utils.h",
- "wasm/wasm-run-utils.cc",
- "wasm/wasm-run-utils.h",
]
if (v8_current_cpu == "arm") {
@@ -441,12 +403,63 @@ v8_source_set("cctest_sources") {
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
- "../..:wasm_test_common",
"../../tools/debug_helper:v8_debug_helper",
"//build/win:default_exe_manifest",
]
+ if (v8_enable_webassembly) {
+ sources += [
+ "../common/wasm/flag-utils.h",
+ "../common/wasm/test-signatures.h",
+ "../common/wasm/wasm-macro-gen.h",
+ "compiler/test-multiple-return.cc",
+
+ # test-run-native-calls uses wasm's LinkageAllocator.
+ "compiler/test-run-native-calls.cc",
+ "test-api-wasm.cc",
+ "test-js-to-wasm.cc",
+ "wasm/test-backing-store.cc",
+ "wasm/test-c-wasm-entry.cc",
+ "wasm/test-compilation-cache.cc",
+ "wasm/test-gc.cc",
+ "wasm/test-grow-memory.cc",
+ "wasm/test-jump-table-assembler.cc",
+ "wasm/test-liftoff-inspection.cc",
+ "wasm/test-run-wasm-64.cc",
+ "wasm/test-run-wasm-asmjs.cc",
+ "wasm/test-run-wasm-atomics.cc",
+ "wasm/test-run-wasm-atomics64.cc",
+ "wasm/test-run-wasm-bulk-memory.cc",
+ "wasm/test-run-wasm-exceptions.cc",
+ "wasm/test-run-wasm-interpreter.cc",
+ "wasm/test-run-wasm-js.cc",
+ "wasm/test-run-wasm-memory64.cc",
+ "wasm/test-run-wasm-module.cc",
+ "wasm/test-run-wasm-relaxed-simd.cc",
+ "wasm/test-run-wasm-sign-extension.cc",
+ "wasm/test-run-wasm-simd-liftoff.cc",
+ "wasm/test-run-wasm-simd-scalar-lowering.cc",
+ "wasm/test-run-wasm-simd.cc",
+ "wasm/test-run-wasm-wrappers.cc",
+ "wasm/test-run-wasm.cc",
+ "wasm/test-streaming-compilation.cc",
+ "wasm/test-wasm-breakpoints.cc",
+ "wasm/test-wasm-codegen.cc",
+ "wasm/test-wasm-import-wrapper-cache.cc",
+ "wasm/test-wasm-metrics.cc",
+ "wasm/test-wasm-serialization.cc",
+ "wasm/test-wasm-shared-engine.cc",
+ "wasm/test-wasm-stack.cc",
+ "wasm/test-wasm-trap-position.cc",
+ "wasm/wasm-atomics-utils.h",
+ "wasm/wasm-run-utils.cc",
+ "wasm/wasm-run-utils.h",
+ "wasm/wasm-simd-utils.cc",
+ "wasm/wasm-simd-utils.h",
+ ]
+ public_deps += [ "../..:wasm_test_common" ]
+ }
+
defines = []
deps = [
"../..:run_torque",
@@ -516,7 +529,6 @@ v8_executable("generate-bytecode-expectations") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index d2464494fa3..827fb0a2ef0 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,3 +1,3 @@
-per-file *profile*=alph@chromium.org
+per-file *profile*=file:../../src/inspector/OWNERS
per-file test-debug-helper.cc=seth.brenith@microsoft.com
per-file test-v8windbg.cc=seth.brenith@microsoft.com
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 49969a0508c..7d5115697af 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -341,10 +341,12 @@ int main(int argc, char* argv[]) {
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv[0]);
+#if V8_ENABLE_WEBASSEMBLY
if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
constexpr bool use_default_signal_handler = true;
CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_signal_handler));
}
+#endif // V8_ENABLE_WEBASSEMBLY
CcTest::set_array_buffer_allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index e2fe217fd12..862f347aa4e 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -89,6 +89,19 @@ class JSHeapBroker;
static void Test##Name()
#endif
+// Similar to TEST, but used when test definitions appear as members of a
+// (probably parameterized) class. This allows re-using the given tests multiple
+// times. For this to work, the following conditions must hold:
+// 1. The class has a template parameter named kTestFileName of type char
+// const*, which is instantiated with __FILE__ at the *use site*, in order
+// to correctly associate the tests with the test suite using them.
+// 2. To actually execute the tests, create an instance of the class
+// containing the MEMBER_TESTs.
+#define MEMBER_TEST(Name) \
+ CcTest register_test_##Name = \
+ CcTest(Test##Name, kTestFileName, #Name, true, true); \
+ static void Test##Name()
+
#define EXTENSION_LIST(V) \
V(GC_EXTENSION, "v8/gc") \
V(PRINT_EXTENSION, "v8/print") \
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 03898b82c56..7b1bf8caa5e 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -39,7 +39,6 @@
# These tests are expected to hit a CHECK (i.e. a FAIL result actually means
# the test passed).
- 'test-api/RegExpInterruptAndReenterIrregexp': [FAIL, CRASH],
'test-verifiers/Fail*': [FAIL, CRASH],
# This test always fails. It tests that LiveEdit causes abort when turned off.
@@ -171,7 +170,6 @@
'test-api/ExternalArrays': [PASS, SLOW],
'test-api/Threading*': [SKIP],
'test-cpu-profiler/MultipleIsolates': [PASS, ['not pointer_compression', SLOW]],
- 'test-debug/DebugBreakStackTrace': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run'
['arch == arm64 and system == macos and not simulator_run', {
@@ -278,7 +276,6 @@
['arch == arm and simulator_run', {
# Pass but take too long with the simulator.
'test-api/Threading*': [SKIP],
- 'test-cpu-profiler/MultipleIsolates': [PASS, SLOW],
}], # 'arch == arm and simulator_run'
##############################################################################
@@ -387,6 +384,8 @@
# SIMD not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-run-wasm-simd-scalar-lowering/*':[SKIP],
+ 'test-run-wasm-simd/*':[SKIP],
# Some wasm functionality is not implemented yet
'test-run-wasm-atomics64/*': [SKIP],
@@ -598,6 +597,7 @@
'test-run-variables/*': [SKIP],
'test-serialize/*': [SKIP],
'test-sloppy-equality/*' : [SKIP],
+ 'test-swiss-name-dictionary-csa/*': [SKIP],
'test-torque/*': [SKIP],
'test-unwinder-code-pages/PCIsInV8_LargeCodeObject_CodePagesAPI': [SKIP],
@@ -702,4 +702,10 @@
'test-run-wasm-simd-scalar-lowering/*': [SKIP],
}], # no_simd_sse == True
+################################################################################
+['variant == stress_concurrent_inlining', {
+ # BUG(11524): Crashing flakily.
+ 'test-cpu-profiler/TracingCpuProfiler': [PASS, FAIL],
+}], # variant == stress_concurrent_inlining
+
]
diff --git a/deps/v8/test/cctest/compiler/node-observer-tester.h b/deps/v8/test/cctest/compiler/node-observer-tester.h
index 253eba230ec..202a9250505 100644
--- a/deps/v8/test/cctest/compiler/node-observer-tester.h
+++ b/deps/v8/test/cctest/compiler/node-observer-tester.h
@@ -9,7 +9,6 @@
#include "src/compiler/simplified-operator.h"
#include "src/objects/type-hints.h"
#include "test/cctest/cctest.h"
-#include "test/common/wasm/flag-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 5bbfb1492b0..d52515ec5d9 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -10,7 +10,6 @@
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/objects-inl.h"
@@ -20,6 +19,11 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -229,12 +233,12 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
TNode<FixedArray> vector =
__ Cast(__ LoadFixedArrayElement(result_array, i));
for (int lane = 0; lane < 4; lane++) {
- TNode<Smi> lane_value =
- __ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
+ TNode<Smi> lane_value = __ SmiFromInt32(__ UncheckedCast<Int32T>(
+ tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()
->machine()
->I32x4ExtractLane(lane),
- param));
+ param)));
__ StoreFixedArrayElement(vector, lane, lane_value,
UNSAFE_SKIP_WRITE_BARRIER);
}
@@ -1058,9 +1062,9 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
- ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
- ImmediateOperand(ImmediateOperand::INLINE, stack_slot_delta)};
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, stack_slot_delta)};
Instruction* tail_call =
Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
arraysize(callee), callee, 0, nullptr);
@@ -1147,9 +1151,10 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
- ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
- ImmediateOperand(ImmediateOperand::INLINE, first_unused_stack_slot)};
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32,
+ first_unused_stack_slot)};
Instruction* tail_call =
Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
arraysize(callee), callee, 0, nullptr);
@@ -1432,6 +1437,7 @@ TEST(AssembleTailCallGap) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
@@ -1531,6 +1537,7 @@ TEST(Regress_1171759) {
CHECK_EQ(0, mt.Call());
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index 7a9460a6883..163477d6fcd 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -8,6 +8,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline.h"
+#include "src/debug/debug.h"
#include "src/handles/handles.h"
#include "src/logging/counters.h"
#include "src/objects/js-function.h"
@@ -33,11 +34,11 @@ void ExpectSharedFunctionInfoState(SharedFunctionInfo sfi,
HeapObject script_or_debug_info = sfi.script_or_debug_info(kAcquireLoad);
switch (expectedState) {
case SfiState::Compiled:
- CHECK(function_data.IsBytecodeArray());
+ CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
CHECK(script_or_debug_info.IsScript());
break;
case SfiState::DebugInfo:
- CHECK(function_data.IsBytecodeArray());
+ CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
CHECK(script_or_debug_info.IsDebugInfo());
{
DebugInfo debug_info = DebugInfo::cast(script_or_debug_info);
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 37e12d9ffc4..a66bfb207f5 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -14,12 +14,14 @@ namespace compiler {
class TestCode : public HandleAndZoneScope {
public:
- TestCode()
+ explicit TestCode(size_t block_count)
: HandleAndZoneScope(),
blocks_(main_zone()),
sequence_(main_isolate(), main_zone(), &blocks_),
rpo_number_(RpoNumber::FromInt(0)),
- current_(nullptr) {}
+ current_(nullptr) {
+ sequence_.IncreaseRpoForTesting(block_count);
+ }
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence sequence_;
@@ -138,7 +140,8 @@ void VerifyForwarding(TestCode* code, int count, int* expected) {
}
TEST(FwEmpty1) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -148,13 +151,14 @@ TEST(FwEmpty1) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwEmptyN) {
+ constexpr size_t kBlockCount = 3;
for (int i = 0; i < 9; i++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -165,36 +169,39 @@ TEST(FwEmptyN) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
TEST(FwNone1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.End();
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.RedundantMoves();
code.End();
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.RedundantMoves();
@@ -203,12 +210,13 @@ TEST(FwMoves2) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.NonRedundantMoves();
@@ -217,12 +225,13 @@ TEST(FwMoves2b) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwOther2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Other();
@@ -231,12 +240,13 @@ TEST(FwOther2) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwNone2a) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -244,12 +254,13 @@ TEST(FwNone2a) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwNone2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -257,23 +268,25 @@ TEST(FwNone2b) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.Jump(0);
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -281,12 +294,13 @@ TEST(FwLoop2) {
code.Jump(0);
static int expected[] = {0, 0};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -296,12 +310,13 @@ TEST(FwLoop3) {
code.Jump(0);
static int expected[] = {0, 0, 0};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop1b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -314,7 +329,8 @@ TEST(FwLoop1b) {
TEST(FwLoop2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -324,12 +340,13 @@ TEST(FwLoop2b) {
code.Jump(1);
static int expected[] = {1, 1, 1};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3b) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -341,12 +358,13 @@ TEST(FwLoop3b) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1};
- VerifyForwarding(&code, 4, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1a) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -360,12 +378,13 @@ TEST(FwLoop2_1a) {
code.Jump(2);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1b) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -379,12 +398,13 @@ TEST(FwLoop2_1b) {
code.Jump(2);
static int expected[] = {2, 2, 2, 2, 2};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1c) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -398,12 +418,13 @@ TEST(FwLoop2_1c) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1d) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -417,12 +438,13 @@ TEST(FwLoop2_1d) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3_1a) {
- TestCode code;
+ constexpr size_t kBlockCount = 6;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -438,14 +460,16 @@ TEST(FwLoop3_1a) {
code.Jump(0);
static int expected[] = {2, 2, 2, 2, 2, 2};
- VerifyForwarding(&code, 6, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwDiamonds) {
+ constexpr size_t kBlockCount = 4;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
- TestCode code;
+ TestCode code(kBlockCount);
+
// B0
code.Branch(1, 2);
// B1
@@ -458,17 +482,18 @@ TEST(FwDiamonds) {
code.End();
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3};
- VerifyForwarding(&code, 4, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
TEST(FwDiamonds2) {
+ constexpr size_t kBlockCount = 5;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
// B1
@@ -485,7 +510,7 @@ TEST(FwDiamonds2) {
int merge = k ? 3 : 4;
int expected[] = {0, i ? 1 : merge, j ? 2 : merge, merge, 4};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
@@ -493,11 +518,12 @@ TEST(FwDiamonds2) {
TEST(FwDoubleDiamonds) {
+ constexpr size_t kBlockCount = 7;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int x = 0; x < 2; x++) {
for (int y = 0; y < 2; y++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
// B1
@@ -519,7 +545,7 @@ TEST(FwDoubleDiamonds) {
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3,
x ? 4 : 6, y ? 5 : 6, 6};
- VerifyForwarding(&code, 7, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
@@ -572,7 +598,8 @@ int find(int x, int* permutation, int size) {
void RunPermutedChain(int* permutation, int size) {
- TestCode code;
+ const int kBlockCount = size + 2;
+ TestCode code(kBlockCount);
int cur = -1;
for (int i = 0; i < size; i++) {
code.Jump(find(cur + 1, permutation, size) + 1);
@@ -583,7 +610,7 @@ void RunPermutedChain(int* permutation, int size) {
int expected[] = {size + 1, size + 1, size + 1, size + 1,
size + 1, size + 1, size + 1};
- VerifyForwarding(&code, size + 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
@@ -595,7 +622,8 @@ TEST(FwPermuted_chain) {
void RunPermutedDiamond(int* permutation, int size) {
- TestCode code;
+ constexpr size_t kBlockCount = 6;
+ TestCode code(kBlockCount);
int br = 1 + find(0, permutation, size);
code.Jump(br);
for (int i = 0; i < size; i++) {
@@ -619,7 +647,7 @@ void RunPermutedDiamond(int* permutation, int size) {
int expected[] = {br, 5, 5, 5, 5, 5};
expected[br] = br;
- VerifyForwarding(&code, 6, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
@@ -676,7 +704,8 @@ void CheckAssemblyOrder(TestCode* code, int size, int* expected) {
}
TEST(Rewire1) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
@@ -686,17 +715,18 @@ TEST(Rewire1) {
code.End();
static int forward[] = {2, 2, 2};
- ApplyForwarding(&code, 3, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 2);
CheckNop(&code, j2);
static int assembly[] = {0, 1, 1};
- CheckAssemblyOrder(&code, 3, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire1_deferred) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
@@ -709,18 +739,19 @@ TEST(Rewire1_deferred) {
code.End();
static int forward[] = {3, 3, 3, 3};
- ApplyForwarding(&code, 4, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 3);
CheckNop(&code, j2);
CheckNop(&code, j3);
static int assembly[] = {0, 1, 2, 1};
- CheckAssemblyOrder(&code, 4, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire2_deferred) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Other();
@@ -735,19 +766,20 @@ TEST(Rewire2_deferred) {
code.End();
static int forward[] = {0, 1, 2, 3};
- ApplyForwarding(&code, 4, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 1);
CheckJump(&code, j2, 3);
static int assembly[] = {0, 2, 3, 1};
- CheckAssemblyOrder(&code, 4, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire_diamond) {
+ constexpr size_t kBlockCount = 5;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
// B1
@@ -760,7 +792,7 @@ TEST(Rewire_diamond) {
code.End();
int forward[] = {0, 1, i ? 4 : 2, j ? 4 : 3, 4};
- ApplyForwarding(&code, 5, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 1);
CheckBranch(&code, b1, i ? 4 : 2, j ? 4 : 3);
if (i) {
@@ -781,13 +813,14 @@ TEST(Rewire_diamond) {
if (j) {
for (int k = 4; k < 5; k++) assembly[k]--;
}
- CheckAssemblyOrder(&code, 5, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
}
}
TEST(RewireRet) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -807,7 +840,8 @@ TEST(RewireRet) {
}
TEST(RewireRet1) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -819,15 +853,16 @@ TEST(RewireRet1) {
code.End();
int forward[] = {0, 1, 2, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckRet(&code, j2);
}
TEST(RewireRet2) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -839,15 +874,16 @@ TEST(RewireRet2) {
code.End();
int forward[] = {0, 1, 1, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckNop(&code, j2);
}
TEST(DifferentSizeRet) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -859,8 +895,8 @@ TEST(DifferentSizeRet) {
code.End();
int forward[] = {0, 1, 2, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckRet(&code, j2);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 73aa8064790..bd6d0a5f0f0 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -114,7 +114,7 @@ TEST(TestLinkageStubCall) {
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
CHECK(call_descriptor);
- CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(0, static_cast<int>(call_descriptor->ParameterSlotCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
CHECK_EQ(false, call_descriptor->IsJSFunctionCall());
@@ -124,6 +124,7 @@ TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(TestFPLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -135,7 +136,7 @@ TEST(TestFPLinkageStubCall) {
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
CHECK(call_descriptor);
- CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(0, static_cast<int>(call_descriptor->ParameterSlotCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ParameterCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
@@ -148,6 +149,7 @@ TEST(TestFPLinkageStubCall) {
CHECK_EQ(call_descriptor->GetReturnLocation(0).GetLocation(),
kReturnRegister0.code());
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
index 7a768390810..d62a187a1c6 100644
--- a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -4,11 +4,14 @@
#include "src/codegen/external-reference.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-external-refs.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-external-refs.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -199,6 +202,7 @@ void TestExternalReference_BinOpWithReturn(
}
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(RunCallF32Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_trunc();
@@ -373,6 +377,7 @@ TEST(RunCallFloat64Pow) {
TestExternalReference_BinOp<double>(&m, ref, wasm::float64_pow_wrapper,
ValueHelper::float64_vector());
}
+#endif // V8_ENABLE_WEBASSEMBLY
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 5f7b6eed884..3160848b680 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -400,6 +400,117 @@ TEST(RunWord64Popcnt) {
#endif // V8_TARGET_ARCH_64_BIT
+TEST(RunFloat32SelectRegFloatCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ Node* cmp = m.Float32Equal(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float32Select(cmp, m.Parameter(0), m.Parameter(1)));
+
+ FOR_FLOAT32_INPUTS(pl) {
+ FOR_FLOAT32_INPUTS(pr) {
+ float expected_result = pl == pr ? pl : pr;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat64SelectRegFloatCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ Node* cmp = m.Float64LessThan(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Parameter(0), m.Parameter(1)));
+
+ FOR_FLOAT64_INPUTS(pl) {
+ FOR_FLOAT64_INPUTS(pr) {
+ double expected_result = pl < pr ? pl : pr;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat32SelectImmediateOnLeftFloatCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ const float pl = -5.0;
+ Node* a = m.Float32Constant(pl);
+ Node* cmp = m.Float32LessThan(a, m.Parameter(0));
+ m.Return(m.Float32Select(cmp, a, m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(pr) {
+ float expected_result = pl < pr ? pl : pr;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pr));
+ }
+}
+
+TEST(RunFloat64SelectImmediateOnRightFloatCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ double pr = 5.0;
+ Node* b = m.Float64Constant(pr);
+ Node* cmp = m.Float64LessThanOrEqual(m.Parameter(0), b);
+ m.Return(m.Float64Select(cmp, m.Parameter(0), b));
+
+ FOR_FLOAT64_INPUTS(pl) {
+ double expected_result = pl <= pr ? pl : pr;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl));
+ }
+}
+
+TEST(RunFloat32SelectImmediateIntCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Int32(),
+ MachineType::Int32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ float tval = -1.0;
+ float fval = 1.0;
+ Node* cmp = m.Int32LessThanOrEqual(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Float32Constant(tval),
+ m.Float32Constant(fval)));
+
+ FOR_INT32_INPUTS(pl) {
+ FOR_INT32_INPUTS(pr) {
+ float expected_result = pl <= pr ? tval : fval;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat64SelectImmediateIntCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Int64(),
+ MachineType::Int64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ double tval = -1.0;
+ double fval = 1.0;
+ Node* cmp = m.Int64LessThan(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Float64Constant(tval),
+ m.Float64Constant(fval)));
+
+ FOR_INT64_INPUTS(pl) {
+ FOR_INT64_INPUTS(pr) {
+ double expected_result = pl < pr ? tval : fval;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
switch (index) {
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index c0a8324286a..090351bc389 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -24,9 +24,9 @@ namespace {
Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
- for (int i = 0; i < param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
TNode<IntPtrT> product = __ Signed(__ IntPtrMul(
__ UncheckedParameter<IntPtrT>(i), __ IntPtrConstant(i + 1)));
sum = __ IntPtrAdd(sum, product);
@@ -58,17 +58,17 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
__ BIND(&end);
params.push_back(target_var.value());
- int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
params.push_back(__ IntPtrConstant(i));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
if (tail) {
tester.raw_assembler_for_testing()->TailCallN(
- callee_descriptor, param_count + 1, params.data());
+ callee_descriptor, param_slots + 1, params.data());
} else {
Node* result = tester.raw_assembler_for_testing()->CallN(
- callee_descriptor, param_count + 1, params.data());
+ callee_descriptor, param_slots + 1, params.data());
__ Return(__ UncheckedCast<IntPtrT>(result));
}
return tester.GenerateCodeCloseAndEscape();
@@ -85,31 +85,30 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ HeapConstant(
BuildCaller(isolate, caller_descriptor, callee_descriptor, tail)));
// Set up arguments for "Caller".
- int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
// Use values that are different from the ones we will pass to this
// function's callee later.
params.push_back(__ IntPtrConstant(i + 42));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
TNode<IntPtrT> intptr_result =
__ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data()));
+ caller_descriptor, param_slots + 1, params.data()));
__ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
-CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
- int stack_param_count) {
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
LocationSignature::Builder locations(zone, 1,
- static_cast<size_t>(stack_param_count));
+ static_cast<size_t>(param_slots));
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr()));
- for (int i = 0; i < stack_param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- i - stack_param_count, MachineType::IntPtr()));
+ i - param_slots, MachineType::IntPtr()));
}
return zone->New<CallDescriptor>(
@@ -118,7 +117,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
locations.Build(), // location_sig
- stack_param_count, // stack_parameter_count
+ param_slots, // stack parameter slots
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 0601c161c1c..06b2529ad69 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -25,9 +25,9 @@ namespace {
Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
- for (int i = 0; i < param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
TNode<WordT> product = __ IntPtrMul(__ UncheckedParameter<IntPtrT>(i),
__ IntPtrConstant(i + 1));
sum = __ Signed(__ IntPtrAdd(sum, product));
@@ -45,13 +45,13 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ HeapConstant(BuildCallee(isolate, callee_descriptor)));
- int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
params.push_back(__ IntPtrConstant(i));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
tester.raw_assembler_for_testing()->TailCallN(callee_descriptor,
- param_count + 1, params.data());
+ param_slots + 1, params.data());
return tester.GenerateCodeCloseAndEscape();
}
@@ -66,31 +66,30 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ HeapConstant(
BuildCaller(isolate, caller_descriptor, callee_descriptor)));
// Set up arguments for "Caller".
- int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
// Use values that are different from the ones we will pass to this
// function's callee later.
params.push_back(__ IntPtrConstant(i + 42));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
TNode<IntPtrT> intptr_result =
__ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data()));
+ caller_descriptor, param_slots + 1, params.data()));
__ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
-CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
- int stack_param_count) {
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
LocationSignature::Builder locations(zone, 1,
- static_cast<size_t>(stack_param_count));
+ static_cast<size_t>(param_slots));
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr()));
- for (int i = 0; i < stack_param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- i - stack_param_count, MachineType::IntPtr()));
+ i - param_slots, MachineType::IntPtr()));
}
return zone->New<CallDescriptor>(
@@ -99,7 +98,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
locations.Build(), // location_sig
- stack_param_count, // stack_parameter_count
+ param_slots, // stack parameter slots
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
diff --git a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
index 82450abb7c9..7533000afbd 100644
--- a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
+++ b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "test/cctest/compiler/node-observer-tester.h"
+#include "test/common/flag-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index c8fffb987dc..67040e4d60e 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -151,7 +151,7 @@ TEST(StressJS) {
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map(), isolate);
Handle<DescriptorArray> instance_descriptors(
- map->instance_descriptors(kRelaxedLoad), isolate);
+ map->instance_descriptors(isolate), isolate);
CHECK_EQ(0, instance_descriptors->number_of_descriptors());
PropertyAttributes attrs = NONE;
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 24ddbb4cfc5..cad0d900ae6 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -43,7 +43,7 @@ void CheckAllObjectsOnPage(const std::vector<Handle<FixedArray>>& handles,
} // namespace
HEAP_TEST(CompactionFullAbortedPage) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and the whole page
// is aborted.
@@ -106,7 +106,7 @@ int GetObjectSize(int objects_per_page) {
} // namespace
HEAP_TEST(CompactionPartiallyAbortedPage) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one.
@@ -186,7 +186,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
}
HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test evacuating a page partially when it contains recorded
// slots and invalidated objects.
@@ -269,7 +269,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page
// are linked together. This test makes sure that intra-aborted page pointers
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index 5450e0358df..1a664b95626 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -65,7 +65,8 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
- explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
+ explicit ConcurrentAllocationThread(Heap* heap,
+ std::atomic<int>* pending = nullptr)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
pending_(pending) {}
@@ -74,7 +75,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap);
AllocateSomeObjects(&local_heap);
- pending_->fetch_sub(1);
+ if (pending_) pending_->fetch_sub(1);
}
Heap* heap_;
@@ -128,6 +129,108 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
isolate->Dispose();
}
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ isolate->Dispose();
+}
+
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+ FLAG_incremental_marking = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ for (int i = 0; i < 300'000; i++) {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+ }
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ isolate->Dispose();
+}
+
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+ FLAG_incremental_marking = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ // Some of the following Safepoint() invocations are supposed to perform a GC.
+ for (int i = 0; i < 1'000'000; i++) {
+ i_isolate->main_thread_local_heap()->Safepoint();
+ }
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ i_isolate->main_thread_local_heap()->Safepoint();
+ isolate->Dispose();
+}
+
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
@@ -146,7 +249,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
if (result.IsRetry()) {
- local_heap.PerformCollection();
+ local_heap.TryPerformCollection();
} else {
Address address = result.ToAddress();
CreateFixedArray(heap_, address, kLargeObjectSize);
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 96cb22827a4..d8ad6e65541 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1095,6 +1095,9 @@ TEST(TestBytecodeFlushing) {
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
#endif // V8_LITE_MODE
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
@@ -1156,6 +1159,9 @@ HEAP_TEST(Regress10560) {
// Disable flags that allocate a feedback vector eagerly.
i::FLAG_opt = false;
i::FLAG_always_opt = false;
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_lazy_feedback_allocation = true;
ManualGCScope manual_gc_scope;
@@ -1320,6 +1326,9 @@ TEST(Regress10774) {
TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
FLAG_opt = true;
FLAG_always_opt = false;
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_optimize_for_size = false;
i::FLAG_incremental_marking = true;
i::FLAG_flush_bytecode = true;
@@ -1464,7 +1473,6 @@ TEST(CompilationCacheCachingBehavior) {
"};"
"foo();";
Handle<String> source = factory->InternalizeUtf8String(raw_source);
- Handle<Context> native_context = isolate->native_context();
{
v8::HandleScope scope(CcTest::isolate());
@@ -1477,7 +1485,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(!cached_script.is_null());
}
@@ -1488,7 +1496,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(!cached_script.is_null());
// Progress code age until it's old and ready for GC.
@@ -1508,7 +1516,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(cached_script.is_null());
}
}
diff --git a/deps/v8/test/cctest/heap/test-write-barrier.cc b/deps/v8/test/cctest/heap/test-write-barrier.cc
index f12e3fe8f0a..9ed3eb668e8 100644
--- a/deps/v8/test/cctest/heap/test-write-barrier.cc
+++ b/deps/v8/test/cctest/heap/test-write-barrier.cc
@@ -5,6 +5,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/spaces.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index a127da4b7f8..30baf4afad2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -75,9 +75,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 22 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 22 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 22 S> */ B(Return),
]
@@ -180,9 +180,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 31 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 31 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 31 S> */ B(Return),
]
@@ -352,9 +352,9 @@ bytecodes: [
B(Mov), R(0), R(7),
B(Mov), R(5), R(8),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(7), U8(3),
- /* 50 S> */ B(Return),
+ B(Return),
B(Ldar), R(5),
- /* 50 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 50 S> */ B(Return),
]
@@ -569,9 +569,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 60 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 60 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 60 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
index a1cfdc3bc41..71e54f421ea 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -29,7 +29,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 10 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
B(Mov), R(context), R(2),
@@ -67,7 +67,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 10 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [20],
@@ -101,7 +101,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 21 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
B(Mov), R(context), R(2),
@@ -142,7 +142,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 21 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [20],
@@ -184,7 +184,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 54 S> */ B(Return),
+ B(Return),
B(Mov), R(3), R(2),
B(Ldar), R(2),
B(Mov), R(context), R(3),
@@ -223,7 +223,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 54 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [27],
@@ -264,7 +264,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 49 S> */ B(Return),
+ B(Return),
B(Mov), R(3), R(2),
B(Ldar), R(2),
B(Mov), R(context), R(3),
@@ -305,7 +305,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 49 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [30],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 12a5b13aa0d..a11a4aa4050 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -141,7 +141,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 57 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -282,7 +282,7 @@ bytecodes: [
B(Mov), R(0), R(14),
B(Mov), R(9), R(15),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(3),
- /* 68 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
B(Star6),
B(LdaTrue),
@@ -303,7 +303,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 68 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -469,7 +469,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 114 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -578,7 +578,7 @@ bytecodes: [
B(Mov), R(0), R(11),
B(Mov), R(7), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(3),
- /* 96 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
B(Star4),
B(LdaFalse),
@@ -599,7 +599,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 96 S> */ B(Return),
+ B(Return),
]
constant pool: [
OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 8794a290874..0a954c76b80 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -162,7 +162,7 @@ bytecodes: [
B(Ldar), R(7),
B(ReThrow),
B(Ldar), R(7),
- /* 85 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
@@ -351,7 +351,7 @@ bytecodes: [
B(Ldar), R(5),
B(ReThrow),
B(Ldar), R(5),
- /* 105 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 105 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 7ccbd17f623..d456c0c931e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -429,7 +429,7 @@ bytecodes: [
B(Ldar), R(5),
/* 11 E> */ B(Throw),
B(Ldar), R(5),
- /* 55 S> */ B(Return),
+ B(Return),
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -531,7 +531,7 @@ bytecodes: [
B(Ldar), R(4),
/* 11 E> */ B(Throw),
B(Ldar), R(4),
- /* 49 S> */ B(Return),
+ B(Return),
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -607,7 +607,7 @@ bytecodes: [
B(Ldar), R(8),
B(ReThrow),
B(Ldar), R(8),
- /* 49 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
@@ -726,7 +726,7 @@ bytecodes: [
B(Star9),
B(Mov), R(0), R(7),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
- /* 60 S> */ B(Return),
+ B(Return),
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -850,7 +850,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 54 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [88],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index f28a4e70e00..e5f1c46c668 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -28,7 +28,7 @@ bytecodes: [
B(Ldar), R(1),
/* 11 E> */ B(Throw),
B(Ldar), R(1),
- /* 16 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 16 S> */ B(Return),
]
@@ -62,7 +62,7 @@ bytecodes: [
B(Ldar), R(1),
/* 11 E> */ B(Throw),
B(Ldar), R(1),
- /* 25 S> */ B(Return),
+ B(Return),
/* 16 S> */ B(LdaSmi), I8(42),
B(Star1),
B(LdaFalse),
@@ -76,7 +76,7 @@ bytecodes: [
B(Ldar), R(1),
/* 16 E> */ B(Throw),
B(Ldar), R(1),
- /* 25 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 25 S> */ B(Return),
]
@@ -113,7 +113,7 @@ bytecodes: [
B(Ldar), R(4),
/* 11 E> */ B(Throw),
B(Ldar), R(4),
- /* 44 S> */ B(Return),
+ B(Return),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star6),
B(GetIterator), R(6), U8(1), U8(3),
@@ -191,7 +191,7 @@ bytecodes: [
B(Ldar), R(8),
B(ReThrow),
B(Ldar), R(8),
- /* 44 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
@@ -238,7 +238,7 @@ bytecodes: [
B(Ldar), R(1),
/* 38 E> */ B(Throw),
B(Ldar), R(1),
- /* 54 S> */ B(Return),
+ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
B(Star5),
/* 50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
@@ -263,7 +263,7 @@ bytecodes: [
B(CallProperty1), R(6), R(3), R(4), U8(14),
B(Jump), U8(45),
B(Ldar), R(4),
- /* 54 S> */ B(Return),
+ B(Return),
B(LdaNamedProperty), R(3), U8(9), U8(16),
B(JumpIfUndefinedOrNull), U8(10),
B(Star6),
@@ -296,7 +296,7 @@ bytecodes: [
B(TestReferenceEqual), R(2),
B(JumpIfFalse), U8(5),
B(Ldar), R(3),
- /* 54 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 149140d4a79..670b9c4e7b5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -28,7 +28,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 14 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 14 S> */ B(Return),
@@ -62,7 +62,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 25 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 25 S> */ B(Return),
@@ -98,7 +98,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 65 S> */ B(Return),
+ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(3),
B(Star3),
@@ -148,7 +148,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 50 S> */ B(Return),
+ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -201,7 +201,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 50 S> */ B(Return),
+ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -255,7 +255,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 52 S> */ B(Return),
+ B(Return),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 23 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -307,7 +307,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 33 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(CreateClosure), U8(4), U8(0), U8(0),
B(StaModuleVariable), I8(1), U8(0),
@@ -350,7 +350,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 27 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(LdaTheHole),
B(Star5),
@@ -398,7 +398,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 31 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 31 S> */ B(Return),
@@ -432,7 +432,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 20 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 20 S> */ B(Return),
@@ -472,7 +472,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 46 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaNamedProperty), R(1), U8(3), U8(0),
B(Star3),
/* 42 E> */ B(LdaNamedProperty), R(1), U8(4), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 07a3cffaa0e..cc75b76a47a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -90,7 +90,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 110
+bytecode array length: 117
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
@@ -105,35 +105,40 @@ bytecodes: [
B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
- /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
+ /* 89 S> */ B(CreateEmptyArrayLiteral), U8(0),
B(Star3),
- B(LdaSmi), I8(1),
+ B(LdaZero),
+ B(Star2),
+ B(LdaZero),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
+ B(Ldar), R(2),
+ B(Inc), U8(3),
B(Star2),
- /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(1), U8(37),
+ /* 101 S> */ B(CreateArrayLiteral), U8(3), U8(4), U8(37),
B(Star6),
- /* 101 E> */ B(GetIterator), R(6), U8(2), U8(4),
+ /* 101 E> */ B(GetIterator), R(6), U8(5), U8(7),
B(Mov), R(4), R(1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(5), U8(6),
+ B(LdaNamedProperty), R(5), U8(4), U8(9),
B(Star4),
B(CallProperty0), R(4), R(5), U8(15),
B(Star6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(6), U8(17),
+ B(LdaNamedProperty), R(6), U8(5), U8(17),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(6), U8(7), U8(8),
- B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(LdaNamedProperty), R(6), U8(6), U8(11),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
B(Ldar), R(2),
- B(Inc), U8(12),
+ B(Inc), U8(3),
B(Star2),
B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
B(Mov), R(3), R(2),
- B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
+ /* 89 E> */ B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
@@ -142,7 +147,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 8906df45360..c62a6489e7f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -265,7 +265,7 @@ bytecodes: [
B(Ldar), R(3),
/* 11 E> */ B(Throw),
B(Ldar), R(3),
- /* 62 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaZero),
B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
@@ -311,7 +311,7 @@ bytecodes: [
B(Ldar), R(2),
/* 11 E> */ B(Throw),
B(Ldar), R(2),
- /* 56 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaZero),
B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
@@ -329,7 +329,7 @@ bytecodes: [
B(Ldar), R(2),
/* 47 E> */ B(Throw),
B(Ldar), R(2),
- /* 56 S> */ B(Return),
+ B(Return),
/* 44 S> */ B(Ldar), R(1),
B(Inc), U8(1),
B(Star1),
@@ -394,7 +394,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 67 S> */ B(Return),
+ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
@@ -462,7 +462,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 61 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [42],
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index ccb87108655..450c45fb240 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -5,7 +5,6 @@
#include <fstream>
#include "src/init/v8.h"
-
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 5e0f7d5d984..3596b03c25d 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -4,14 +4,13 @@
#include <tuple>
-#include "src/init/v8.h"
-
#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
#include "src/codegen/compiler.h"
#include "src/execution/execution.h"
#include "src/handles/handles.h"
#include "src/heap/heap-inl.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -5054,6 +5053,9 @@ TEST(InterpreterGenerators) {
#ifndef V8_TARGET_ARCH_ARM
TEST(InterpreterWithNativeStack) {
+ // "Always sparkplug" messes with this test.
+ if (FLAG_always_sparkplug) return;
+
i::FLAG_interpreted_frames_native_stack = true;
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index d30be379236..79c773b22ad 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -527,13 +527,14 @@ THREADED_TEST(Gc) {
static void StackCheck(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::StackFrameIterator iter(reinterpret_cast<i::Isolate*>(info.GetIsolate()));
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ i::StackFrameIterator iter(isolate);
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
i::Code code = frame->LookupCode();
CHECK(code.IsCode());
- CHECK(code.contains(frame->pc()));
+ CHECK(code.contains(isolate, frame->pc()));
iter.Advance();
}
}
@@ -902,3 +903,36 @@ TEST(ObjectSetLazyDataPropertyForIndex) {
CHECK_EQ(1, getter_call_count);
}
}
+
+TEST(ObjectTemplateSetLazyPropertySurvivesIC) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ static int getter_call_count = 0;
+ templ->SetLazyDataProperty(
+ v8_str("foo"), [](v8::Local<v8::Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ getter_call_count++;
+ info.GetReturnValue().Set(getter_call_count);
+ });
+
+ v8::Local<v8::Function> f = CompileRun(
+ "function f(obj) {"
+ " obj.foo;"
+ " obj.foo;"
+ "};"
+ "%PrepareFunctionForOptimization(f);"
+ "f")
+ .As<v8::Function>();
+ v8::Local<v8::Value> obj = templ->NewInstance(context).ToLocalChecked();
+ f->Call(context, context->Global(), 1, &obj).ToLocalChecked();
+ CHECK_EQ(getter_call_count, 1);
+
+ obj = templ->NewInstance(context).ToLocalChecked();
+ f->Call(context, context->Global(), 1, &obj).ToLocalChecked();
+ CHECK_EQ(getter_call_count, 2);
+}
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index a55644a2e3e..9875098d1fb 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "test/cctest/test-api.h"
using ::v8::Array;
@@ -46,31 +47,11 @@ Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab, int byteOffset,
std::shared_ptr<v8::BackingStore> Externalize(Local<v8::ArrayBuffer> ab) {
std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- ab->Externalize(backing_store);
- CHECK(ab->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
return backing_store;
}
std::shared_ptr<v8::BackingStore> Externalize(Local<v8::SharedArrayBuffer> ab) {
std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- ab->Externalize(backing_store);
- CHECK(ab->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
return backing_store;
}
@@ -149,46 +130,6 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(ArrayBuffer_External) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- Local<v8::ArrayBuffer> ab3 =
- v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, ab3->ByteLength());
- CHECK(ab3->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
THREADED_TEST(ArrayBuffer_DisableDetach) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -292,37 +233,6 @@ THREADED_TEST(ArrayBuffer_DetachingScript) {
CheckDataViewIsDetached(dv);
}
-// TODO(v8:9380) the Contents data structure should be deprecated.
-THREADED_TEST(ArrayBuffer_AllocationInformation) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, ab_size);
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- v8::ArrayBuffer::Contents contents(ab->GetContents());
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -341,7 +251,6 @@ THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
// marked as is_external or not.
USE(u8a->Buffer());
- CHECK(ab->IsExternal());
CHECK_EQ(2, backing_store->ByteLength());
}
@@ -380,35 +289,6 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(ArrayBuffer_ExternalReused) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> data(100);
- Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
- ab1->Detach();
- Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
- CHECK_EQ(bs1->Data(), bs2->Data());
-}
-
-THREADED_TEST(SharedArrayBuffer_ExternalReused) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> data(100);
- Local<v8::SharedArrayBuffer> ab1 =
- v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
- Local<v8::SharedArrayBuffer> ab2 =
- v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
- CHECK_EQ(bs1->Data(), bs2->Data());
-}
-
THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
i::FLAG_harmony_sharedarraybuffer = true;
LocalContext env;
@@ -450,64 +330,6 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(SharedArrayBuffer_External) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- Local<v8::SharedArrayBuffer> ab3 =
- v8::SharedArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
- CHECK(ab3->IsExternal());
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-// TODO(v8:9380) the Contents data structure should be deprecated.
-THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::SharedArrayBuffer> ab =
- v8::SharedArrayBuffer::New(isolate, ab_size);
- v8::SharedArrayBuffer::Contents contents(ab->GetContents());
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -515,9 +337,12 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
// Make sure the pointer looks like a heap object
uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
+ auto backing_store = v8::ArrayBuffer::NewBackingStore(
+ store_ptr, 8, [](void*, size_t, void*) {}, nullptr);
// Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, std::move(backing_store));
// Should not crash
CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -538,12 +363,15 @@ THREADED_TEST(SkipArrayBufferDuringScavenge) {
Local<v8::Object> tmp = v8::Object::New(isolate);
uint8_t* store_ptr =
reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
+ auto backing_store = v8::ArrayBuffer::NewBackingStore(
+ store_ptr, 8, [](void*, size_t, void*) {}, nullptr);
// Make `store_ptr` point to from space
CcTest::CollectGarbage(i::NEW_SPACE);
// Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, std::move(backing_store));
// Should not crash,
// i.e. backing store pointer should not be treated as a heap object pointer
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index af5858eaefc..305840f29b7 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -862,9 +862,11 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
CHECK(!value->BooleanValue(isolate));
}
-static void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
- v8::GenericNamedPropertyQueryCallback query,
- const char* source, int expected) {
+namespace {
+
+void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertyQueryCallback query,
+ const char* source, int expected) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
@@ -879,14 +881,13 @@ static void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
}
-static void CheckInterceptorLoadIC(
- v8::GenericNamedPropertyGetterCallback getter, const char* source,
- int expected) {
+void CheckInterceptorLoadIC(v8::GenericNamedPropertyGetterCallback getter,
+ const char* source, int expected) {
CheckInterceptorIC(getter, nullptr, source, expected);
}
-static void InterceptorLoadICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+void InterceptorLoadICGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
@@ -896,6 +897,7 @@ static void InterceptorLoadICGetter(
info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
}
+} // namespace
// This test should hit the load IC for the interceptor case.
THREADED_TEST(InterceptorLoadIC) {
@@ -912,9 +914,23 @@ THREADED_TEST(InterceptorLoadIC) {
// configurations of interceptor and explicit fields works fine
// (those cases are special cased to get better performance).
-static void InterceptorLoadXICGetter(
+namespace {
+
+void InterceptorLoadXICGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ info.GetReturnValue().Set(
+ v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust()
+ ? v8::Local<v8::Value>(v8::Integer::New(info.GetIsolate(), 42))
+ : v8::Local<v8::Value>());
+}
+
+void InterceptorLoadXICGetterWithSideEffects(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
+ CompileRun("interceptor_getter_side_effect()");
info.GetReturnValue().Set(
v8_str("x")
->Equals(info.GetIsolate()->GetCurrentContext(), name)
@@ -923,6 +939,7 @@ static void InterceptorLoadXICGetter(
: v8::Local<v8::Value>());
}
+} // namespace
THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
CheckInterceptorLoadIC(InterceptorLoadXICGetter,
@@ -1447,6 +1464,18 @@ void HasICQueryToggle(TKey name,
isolate, toggle ? v8::internal::ABSENT : v8::internal::NONE));
}
+template <typename TKey, v8::internal::PropertyAttributes attribute>
+void HasICQuerySideEffect(TKey name,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ CompileRun("interceptor_query_side_effect()");
+ if (attribute != v8::internal::ABSENT) {
+ info.GetReturnValue().Set(v8::Integer::New(isolate, attribute));
+ }
+}
+
int named_query_counter = 0;
void NamedQueryCallback(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Integer>& info) {
@@ -1512,6 +1541,42 @@ THREADED_TEST(InterceptorHasICQueryToggle) {
500);
}
+THREADED_TEST(InterceptorStoreICWithSideEffectfulCallbacks) {
+ CheckInterceptorIC(EmptyInterceptorGetter,
+ HasICQuerySideEffect<Local<Name>, v8::internal::ABSENT>,
+ "let r;"
+ "let inside_side_effect = false;"
+ "let interceptor_query_side_effect = function() {"
+ " if (!inside_side_effect) {"
+ " inside_side_effect = true;"
+ " r.x = 153;"
+ " inside_side_effect = false;"
+ " }"
+ "};"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " r.x = i;"
+ "}",
+ 19);
+
+ CheckInterceptorIC(InterceptorLoadXICGetterWithSideEffects,
+ nullptr, // query callback is not provided
+ "let r;"
+ "let inside_side_effect = false;"
+ "let interceptor_getter_side_effect = function() {"
+ " if (!inside_side_effect) {"
+ " inside_side_effect = true;"
+ " r.y = 153;"
+ " inside_side_effect = false;"
+ " }"
+ "};"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " r.y = i;"
+ "}",
+ 19);
+}
+
static void InterceptorStoreICSetter(
Local<Name> key, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -1561,6 +1626,52 @@ THREADED_TEST(InterceptorStoreICWithNoSetter) {
CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
}
+THREADED_TEST(EmptyInterceptorDoesNotShadowReadOnlyProperty) {
+ // Interceptor should not shadow readonly property 'x' on the prototype, and
+ // attempt to store to 'x' must throw.
+ CheckInterceptorIC(EmptyInterceptorGetter,
+ HasICQuery<Local<Name>, v8::internal::ABSENT>,
+ "'use strict';"
+ "let p = {};"
+ "Object.defineProperty(p, 'x', "
+ " {value: 153, writable: false});"
+ "o.__proto__ = p;"
+ "let result = 0;"
+ "let r;"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " try {"
+ " r.x = i;"
+ " } catch (e) {"
+ " result++;"
+ " }"
+ "}"
+ "result",
+ 20);
+}
+
+THREADED_TEST(InterceptorShadowsReadOnlyProperty) {
+ // Interceptor claims that it has a writable property 'x', so the existence
+ // of the readonly property 'x' on the prototype should not cause exceptions.
+ CheckInterceptorIC(InterceptorLoadXICGetter,
+ nullptr, // query callback
+ "'use strict';"
+ "let p = {};"
+ "Object.defineProperty(p, 'x', "
+ " {value: 153, writable: false});"
+ "o.__proto__ = p;"
+ "let result = 0;"
+ "let r;"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " try {"
+ " r.x = i;"
+ " result++;"
+ " } catch (e) {}"
+ "}"
+ "result",
+ 20);
+}
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index edfaa98dd66..4a55b47e2bc 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -179,6 +179,8 @@ TEST(StackTrace) {
// Checks that a StackFrame has certain expected values.
static void checkStackFrame(const char* expected_script_name,
+ const char* expected_script_source,
+ const char* expected_script_source_mapping_url,
const char* expected_func_name,
int expected_line_number, int expected_column,
bool is_eval, bool is_constructor,
@@ -186,12 +188,24 @@ static void checkStackFrame(const char* expected_script_name,
v8::HandleScope scope(CcTest::isolate());
v8::String::Utf8Value func_name(CcTest::isolate(), frame->GetFunctionName());
v8::String::Utf8Value script_name(CcTest::isolate(), frame->GetScriptName());
+ v8::String::Utf8Value script_source(CcTest::isolate(),
+ frame->GetScriptSource());
+ v8::String::Utf8Value script_source_mapping_url(
+ CcTest::isolate(), frame->GetScriptSourceMappingURL());
if (*script_name == nullptr) {
// The situation where there is no associated script, like for evals.
CHECK_NULL(expected_script_name);
} else {
CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
}
+ CHECK_NOT_NULL(strstr(*script_source, expected_script_source));
+ if (*script_source_mapping_url == nullptr) {
+ CHECK_NULL(expected_script_source_mapping_url);
+ } else {
+ CHECK_NOT_NULL(expected_script_source_mapping_url);
+ CHECK_NOT_NULL(
+ strstr(*script_source_mapping_url, expected_script_source_mapping_url));
+ }
if (!frame->GetFunctionName().IsEmpty()) {
CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
}
@@ -202,6 +216,67 @@ static void checkStackFrame(const char* expected_script_name,
CHECK(frame->IsUserJavaScript());
}
+// Tests the C++ StackTrace API.
+
+// Test getting OVERVIEW information. Should ignore information that is not
+// script name, function name, line number, and column offset.
+const char* overview_source_eval = "new foo();";
+const char* overview_source =
+ "function bar() {\n"
+ " var y; AnalyzeStackInNativeCode(1);\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "//# sourceMappingURL=http://foobar.com/overview.ts\n"
+ "var x;eval('new foo();');";
+
+// Test getting DETAILED information.
+const char* detailed_source =
+ "function bat() {AnalyzeStackInNativeCode(2);\n"
+ "}\n"
+ "\n"
+ "function baz() {\n"
+ " bat();\n"
+ "}\n"
+ "eval('new baz();');";
+
+// Test using function.name and function.displayName in stack trace
+const char function_name_source[] =
+ "function bar(function_name, display_name, testGroup) {\n"
+ " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
+ " if (function_name) {\n"
+ " Object.defineProperty(f, 'name', { value: function_name });\n"
+ " }\n"
+ " if (display_name) {\n"
+ " f.displayName = display_name;"
+ " }\n"
+ " f()\n"
+ "}\n"
+ "bar('function.name', undefined, 3);\n"
+ "bar('function.name', 'function.displayName', 4);\n"
+ "bar(239, undefined, 5);\n";
+
+// Maybe it's a bit pathological to depend on the exact format of the wrapper
+// the Function constructor puts around it's input string. If this becomes a
+// hassle, maybe come up with some regex matching approach?
+const char function_name_source_anon3[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(3);\n"
+ "})";
+const char function_name_source_anon4[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(4);\n"
+ "})";
+const char function_name_source_anon5[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(5);\n"
+ "})";
+
static void AnalyzeStackInNativeCode(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
@@ -221,53 +296,55 @@ static void AnalyzeStackInNativeCode(
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bar", 2, 10, false, false,
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "bar",
+ 2, 10, false, false,
stackTrace->GetFrame(args.GetIsolate(), 0));
- checkStackFrame(origin, "foo", 6, 3, false, true,
- stackTrace->GetFrame(isolate, 1));
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "foo",
+ 6, 3, false, true, stackTrace->GetFrame(isolate, 1));
// This is the source string inside the eval which has the call to foo.
- checkStackFrame(nullptr, "", 1, 1, true, false,
+ checkStackFrame(nullptr, "new foo();", nullptr, "", 1, 1, true, false,
stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 8, 7, false, false,
- stackTrace->GetFrame(isolate, 3));
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "", 9,
+ 7, false, false, stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kDetailedTest) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bat", 4, 22, false, false,
- stackTrace->GetFrame(isolate, 0));
- checkStackFrame(origin, "baz", 8, 3, false, true,
+ checkStackFrame(origin, detailed_source, nullptr, "bat", 4, 22, false,
+ false, stackTrace->GetFrame(isolate, 0));
+ checkStackFrame(origin, detailed_source, nullptr, "baz", 8, 3, false, true,
stackTrace->GetFrame(isolate, 1));
bool is_eval = true;
// This is the source string inside the eval which has the call to baz.
- checkStackFrame(nullptr, "", 1, 1, is_eval, false,
+ checkStackFrame(nullptr, "new baz();", nullptr, "", 1, 1, is_eval, false,
stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 10, 1, false, false,
+ checkStackFrame(origin, detailed_source, nullptr, "", 10, 1, false, false,
stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kFunctionName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ checkStackFrame(nullptr, function_name_source_anon3, nullptr,
+ "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameAndDisplayName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ checkStackFrame(nullptr, function_name_source_anon4, nullptr,
+ "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameIsNotString) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
+ checkStackFrame(nullptr, function_name_source_anon5, nullptr, "", 3, 1,
+ true, false, stackTrace->GetFrame(isolate, 0));
}
}
-// Tests the C++ StackTrace API.
// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
// THREADED_TEST(CaptureStackTrace) {
TEST(CaptureStackTrace) {
@@ -279,17 +356,6 @@ TEST(CaptureStackTrace) {
v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
LocalContext context(nullptr, templ);
- // Test getting OVERVIEW information. Should ignore information that is not
- // script name, function name, line number, and column offset.
- const char* overview_source =
- "function bar() {\n"
- " var y; AnalyzeStackInNativeCode(1);\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "var x;eval('new foo();');";
v8::Local<v8::String> overview_src = v8_str(overview_source);
v8::ScriptCompiler::Source script_source(overview_src,
v8::ScriptOrigin(isolate, origin));
@@ -302,15 +368,6 @@ TEST(CaptureStackTrace) {
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
- // Test getting DETAILED information.
- const char* detailed_source =
- "function bat() {AnalyzeStackInNativeCode(2);\n"
- "}\n"
- "\n"
- "function baz() {\n"
- " bat();\n"
- "}\n"
- "eval('new baz();');";
v8::Local<v8::String> detailed_src = v8_str(detailed_source);
// Make the script using a non-zero line and column offset.
v8::ScriptOrigin detailed_origin(isolate, origin, 3, 5);
@@ -324,21 +381,6 @@ TEST(CaptureStackTrace) {
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
- // Test using function.name and function.displayName in stack trace
- const char function_name_source[] =
- "function bar(function_name, display_name, testGroup) {\n"
- " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
- " if (function_name) {\n"
- " Object.defineProperty(f, 'name', { value: function_name });\n"
- " }\n"
- " if (display_name) {\n"
- " f.displayName = display_name;"
- " }\n"
- " f()\n"
- "}\n"
- "bar('function.name', undefined, 3);\n"
- "bar('function.name', 'function.displayName', 4);\n"
- "bar(239, undefined, 5);\n";
v8::Local<v8::String> function_name_src =
v8::String::NewFromUtf8Literal(isolate, function_name_source);
v8::ScriptCompiler::Source script_source3(function_name_src,
@@ -353,33 +395,37 @@ TEST(CaptureStackTrace) {
}
static int report_count = 0;
+
+// Test uncaught exception
+const char uncaught_exception_source[] =
+ "function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};";
+
static void StackTraceForUncaughtExceptionListener(
v8::Local<v8::Message> message, v8::Local<Value>) {
report_count++;
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(2, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo", 2, 3, false, false,
+ checkStackFrame("origin", uncaught_exception_source, nullptr, "foo", 2, 3,
+ false, false,
stack_trace->GetFrame(message->GetIsolate(), 0));
- checkStackFrame("origin", "bar", 5, 3, false, false,
+ checkStackFrame("origin", uncaught_exception_source, nullptr, "bar", 5, 3,
+ false, false,
stack_trace->GetFrame(message->GetIsolate(), 1));
}
TEST(CaptureStackTraceForUncaughtException) {
- report_count = 0;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin(
- "function foo() {\n"
- " throw 1;\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};",
- "origin");
+ CompileRunWithOrigin(uncaught_exception_source, "origin");
v8::Local<v8::Object> global = env->Global();
Local<Value> trouble =
global->Get(env.local(), v8_str("bar")).ToLocalChecked();
@@ -392,40 +438,100 @@ TEST(CaptureStackTraceForUncaughtException) {
CHECK_EQ(1, report_count);
}
+// Test uncaught exception in a setter
+const char uncaught_setter_exception_source[] =
+ "var setters = ['column', 'lineNumber', 'scriptName',\n"
+ " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
+ " 'isConstructor'];\n"
+ "for (let i = 0; i < setters.length; i++) {\n"
+ " let prop = setters[i];\n"
+ " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
+ "}\n";
+
+static void StackTraceForUncaughtExceptionAndSettersListener(
+ v8::Local<v8::Message> message, v8::Local<Value> value) {
+ CHECK(value->IsObject());
+ v8::Isolate* isolate = message->GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ report_count++;
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK_EQ(1, stack_trace->GetFrameCount());
+ checkStackFrame(nullptr, "throw 'exception';", nullptr, nullptr, 1, 1, false,
+ false, stack_trace->GetFrame(isolate, 0));
+ v8::Local<v8::StackFrame> stack_frame = stack_trace->GetFrame(isolate, 0);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ CHECK(object
+ ->Set(context,
+ v8::String::NewFromUtf8Literal(isolate, "lineNumber"),
+ v8::Integer::New(isolate, stack_frame->GetLineNumber()))
+ .IsNothing());
+}
+
TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
+ report_count = 0;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object = v8::Object::New(isolate);
+ isolate->AddMessageListener(StackTraceForUncaughtExceptionAndSettersListener,
+ object);
isolate->SetCaptureStackTraceForUncaughtExceptions(true, 1024,
v8::StackTrace::kDetailed);
- CompileRun(
- "var setters = ['column', 'lineNumber', 'scriptName',\n"
- " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
- " 'isConstructor'];\n"
- "for (var i = 0; i < setters.length; i++) {\n"
- " var prop = setters[i];\n"
- " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
- "}\n");
+ CompileRun(uncaught_setter_exception_source);
CompileRun("throw 'exception';");
isolate->SetCaptureStackTraceForUncaughtExceptions(false);
-}
+ isolate->RemoveMessageListeners(
+ StackTraceForUncaughtExceptionAndSettersListener);
+ CHECK(object
+ ->Get(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8Literal(isolate, "lineNumber"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK_EQ(report_count, 1);
+}
+
+const char functions_with_function_name[] =
+ "function gen(name, counter) {\n"
+ " var f = function foo() {\n"
+ " if (counter === 0)\n"
+ " throw 1;\n"
+ " gen(name, counter - 1)();\n"
+ " };\n"
+ " if (counter == 3) {\n"
+ " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
+ " } else {\n"
+ " Object.defineProperty(f, 'name', {writable:true});\n"
+ " if (counter == 2)\n"
+ " f.name = 42;\n"
+ " else\n"
+ " f.name = name + ':' + counter;\n"
+ " }\n"
+ " return f;\n"
+ "};"
+ "//# sourceMappingURL=local/functional.sc";
+
+const char functions_with_function_name_caller[] = "gen('foo', 3)();";
static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
v8::Isolate* isolate = message->GetIsolate();
CHECK_EQ(5, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo:0", 4, 7, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo:0", 4, 7, false, false,
stack_trace->GetFrame(isolate, 0));
- checkStackFrame("origin", "foo:1", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo:1", 5, 27, false, false,
stack_trace->GetFrame(isolate, 1));
- checkStackFrame("origin", "foo", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo", 5, 27, false, false,
stack_trace->GetFrame(isolate, 2));
- checkStackFrame("origin", "foo", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo", 5, 27, false, false,
stack_trace->GetFrame(isolate, 3));
- checkStackFrame("origin", "", 1, 14, false, false,
- stack_trace->GetFrame(isolate, 4));
+ checkStackFrame("origin", functions_with_function_name_caller, nullptr, "", 1,
+ 14, false, false, stack_trace->GetFrame(isolate, 4));
}
TEST(GetStackTraceContainsFunctionsWithFunctionName) {
@@ -433,29 +539,11 @@ TEST(GetStackTraceContainsFunctionsWithFunctionName) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- CompileRunWithOrigin(
- "function gen(name, counter) {\n"
- " var f = function foo() {\n"
- " if (counter === 0)\n"
- " throw 1;\n"
- " gen(name, counter - 1)();\n"
- " };\n"
- " if (counter == 3) {\n"
- " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
- " } else {\n"
- " Object.defineProperty(f, 'name', {writable:true});\n"
- " if (counter == 2)\n"
- " f.name = 42;\n"
- " else\n"
- " f.name = name + ':' + counter;\n"
- " }\n"
- " return f;\n"
- "};",
- "origin");
+ CompileRunWithOrigin(functions_with_function_name, "origin");
isolate->AddMessageListener(StackTraceFunctionNameListener);
isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin("gen('foo', 3)();", "origin");
+ CompileRunWithOrigin(functions_with_function_name_caller, "origin");
isolate->SetCaptureStackTraceForUncaughtExceptions(false);
isolate->RemoveMessageListeners(StackTraceFunctionNameListener);
}
diff --git a/deps/v8/test/cctest/test-api-typed-array.cc b/deps/v8/test/cctest/test-api-typed-array.cc
index 59c7137525d..a35aad8e37f 100644
--- a/deps/v8/test/cctest/test-api-typed-array.cc
+++ b/deps/v8/test/cctest/test-api-typed-array.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/cctest/test-api.h"
-
#include "src/api/api-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "test/cctest/test-api.h"
using ::v8::Array;
using ::v8::Context;
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index c01c1ea791c..5eafa420bc2 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -71,9 +71,12 @@
#include "src/utils/utils.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
-#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/flag-utils.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#endif // V8_ENABLE_WEBASSEMBLY
static const bool kLogThreading = false;
@@ -378,6 +381,45 @@ THREADED_TEST(ReceiverSignature) {
}
}
+static void DoNothingCallback(const v8::FunctionCallbackInfo<v8::Value>&) {}
+
+// Regression test for issue chromium:1188563.
+THREADED_TEST(Regress1188563) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Set up some data for CallHandlerInfo.
+ v8::Local<v8::FunctionTemplate> data_constructor_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Local<Function> data_constructor =
+ data_constructor_templ->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> data =
+ data_constructor->NewInstance(env.local()).ToLocalChecked();
+
+ // Setup templates and instance with accessor property.
+ v8::Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> callback =
+ v8::FunctionTemplate::New(isolate, DoNothingCallback, data);
+ v8::Local<v8::ObjectTemplate> instance_templ = fun->InstanceTemplate();
+ instance_templ->SetAccessorProperty(v8_str("accessor"), callback, callback);
+ Local<Value> test_object =
+ instance_templ->NewInstance(env.local()).ToLocalChecked();
+ // Setup global variables.
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("test_object"), test_object)
+ .FromJust());
+ CompileRun(
+ "function test() {"
+ " test_object.accessor;"
+ "};"
+ "%PrepareFunctionForOptimization(test);"
+ "try { test() } catch(e) {}"
+ "try { test() } catch(e) {}"
+ "%OptimizeFunctionOnNextCall(test);"
+ "test()");
+}
THREADED_TEST(HulIgennem) {
LocalContext env;
@@ -514,6 +556,7 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CHECK(source->IsExternalTwoByte());
+ CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -541,6 +584,8 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
String::NewExternalOneByte(env->GetIsolate(), resource)
.ToLocalChecked();
CHECK(source->IsExternalOneByte());
+ CHECK(source->IsExternal());
+ CHECK(!source->IsExternalTwoByte());
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalOneByteStringResource());
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -574,6 +619,7 @@ THREADED_TEST(ScriptMakingExternalString) {
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK(!source->IsExternalTwoByte());
CHECK(!source->IsExternalOneByte());
+ CHECK(!source->IsExternal());
String::Encoding encoding = String::UNKNOWN_ENCODING;
CHECK(!source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
@@ -2655,20 +2701,17 @@ static void ThrowingSymbolAccessorGetter(
THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope scope(isolate);
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- CHECK_EQ(
- 1,
- a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
+ CHECK_EQ(1, a->map().instance_descriptors(i_isolate).number_of_descriptors());
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
- CHECK_EQ(
- 0,
- a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
+ CHECK_EQ(0, a->map().instance_descriptors(i_isolate).number_of_descriptors());
// But we should still have an AccessorInfo.
- i::Handle<i::String> name = CcTest::i_isolate()->factory()->length_string();
- i::LookupIterator it(CcTest::i_isolate(), a, name,
+ i::Handle<i::String> name = i_isolate->factory()->length_string();
+ i::LookupIterator it(i_isolate, a, name,
i::LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
CHECK(it.GetAccessors()->IsAccessorInfo());
@@ -4728,6 +4771,13 @@ namespace {
// some particular way by calling the supplied |tester| function. The tests that
// use this purposely test only a single getter as the getter updates the cached
// state of the object which could affect the results of other functions.
+const char message_attributes_script[] =
+ R"javascript(
+ (function() {
+ throw new Error();
+ })();
+ )javascript";
+
void CheckMessageAttributes(std::function<void(v8::Local<v8::Context> context,
v8::Local<v8::Message> message)>
tester) {
@@ -4735,12 +4785,7 @@ void CheckMessageAttributes(std::function<void(v8::Local<v8::Context> context,
v8::HandleScope scope(context->GetIsolate());
TryCatch try_catch(context->GetIsolate());
- CompileRun(
- R"javascript(
- (function() {
- throw new Error();
- })();
- )javascript");
+ CompileRun(message_attributes_script);
CHECK(try_catch.HasCaught());
v8::Local<v8::Value> error = try_catch.Exception();
@@ -4763,38 +4808,47 @@ TEST(MessageGetLineNumber) {
TEST(MessageGetStartColumn) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(14, message->GetStartColumn(context).FromJust());
+ CHECK_EQ(12, message->GetStartColumn(context).FromJust());
});
}
TEST(MessageGetEndColumn) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(15, message->GetEndColumn(context).FromJust());
+ CHECK_EQ(13, message->GetEndColumn(context).FromJust());
});
}
TEST(MessageGetStartPosition) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(35, message->GetStartPosition());
+ CHECK_EQ(31, message->GetStartPosition());
});
}
TEST(MessageGetEndPosition) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(36, message->GetEndPosition());
+ CHECK_EQ(32, message->GetEndPosition());
});
}
+TEST(MessageGetSource) {
+ CheckMessageAttributes([](v8::Local<v8::Context> context,
+ v8::Local<v8::Message> message) {
+ std::string result(*v8::String::Utf8Value(
+ context->GetIsolate(), message->GetSource(context).ToLocalChecked()));
+ CHECK_EQ(message_attributes_script, result);
+ });
+}
+
TEST(MessageGetSourceLine) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
std::string result(*v8::String::Utf8Value(
context->GetIsolate(),
message->GetSourceLine(context).ToLocalChecked()));
- CHECK_EQ(" throw new Error();", result);
+ CHECK_EQ(" throw new Error();", result);
});
}
@@ -15463,7 +15517,7 @@ THREADED_TEST(ScriptContextDependence) {
101);
}
-
+#if V8_ENABLE_WEBASSEMBLY
static int asm_warning_triggered = 0;
static void AsmJsWarningListener(v8::Local<v8::Message> message,
@@ -15490,14 +15544,11 @@ TEST(AsmJsWarning) {
" return {};\n"
"}\n"
"module();");
-#if V8_ENABLE_WEBASSEMBLY
int kExpectedWarnings = 1;
-#else
- int kExpectedWarnings = 0;
-#endif
CHECK_EQ(kExpectedWarnings, asm_warning_triggered);
isolate->RemoveMessageListeners(AsmJsWarningListener);
}
+#endif // V8_ENABLE_WEBASSEMBLY
static int error_level_message_count = 0;
static int expected_error_level = 0;
@@ -16803,6 +16854,7 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
}
~VisitorImpl() override = default;
void VisitExternalString(v8::Local<v8::String> string) override {
+ CHECK(string->IsExternal());
if (string->IsExternalOneByte()) {
CHECK(!string->IsExternalTwoByte());
return;
@@ -16847,6 +16899,7 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
cons->MakeExternal(resource);
CHECK(cons->IsExternalTwoByte());
+ CHECK(cons->IsExternal());
CHECK_EQ(resource, cons->GetExternalStringResource());
String::Encoding encoding;
CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
@@ -16884,7 +16937,7 @@ TEST(VisitExternalStrings) {
v8::HandleScope scope(isolate);
const char string[] = "Some string";
uint16_t* two_byte_string = AsciiToTwoByteString(string);
- TestResource* resource[4];
+ TestResource* resource[5];
resource[0] = new TestResource(two_byte_string);
v8::Local<v8::String> string0 =
v8::String::NewExternalTwoByte(env->GetIsolate(), resource[0])
@@ -16912,11 +16965,29 @@ TEST(VisitExternalStrings) {
string3_i).is_null());
CHECK(string3_i->IsInternalizedString());
+ // Externalized one-byte string.
+ auto one_byte_resource =
+ new TestOneByteResource(i::StrDup(string), nullptr, 0);
+ v8::Local<v8::String> string4 =
+ String::NewExternalOneByte(env->GetIsolate(), one_byte_resource)
+ .ToLocalChecked();
+
// We need to add usages for string* to avoid warnings in GCC 4.7
CHECK(string0->IsExternalTwoByte());
CHECK(string1->IsExternalTwoByte());
CHECK(string2->IsExternalTwoByte());
CHECK(string3->IsExternalTwoByte());
+ CHECK(!string4->IsExternalTwoByte());
+ CHECK(string0->IsExternal());
+ CHECK(string1->IsExternal());
+ CHECK(string2->IsExternal());
+ CHECK(string3->IsExternal());
+ CHECK(string4->IsExternal());
+ CHECK(!string0->IsExternalOneByte());
+ CHECK(!string1->IsExternalOneByte());
+ CHECK(!string2->IsExternalOneByte());
+ CHECK(!string3->IsExternalOneByte());
+ CHECK(string4->IsExternalOneByte());
VisitorImpl visitor(resource);
isolate->VisitExternalResources(&visitor);
@@ -21391,13 +21462,6 @@ class RegExpInterruptTest {
string->MakeExternal(&two_byte_string_resource);
}
- static void ReenterIrregexp(v8::Isolate* isolate, void* data) {
- v8::HandleScope scope(isolate);
- v8::TryCatch try_catch(isolate);
- // Irregexp is not reentrant. This should crash.
- CompileRun("/((a*)*)*b/.exec('aaaaab')");
- }
-
private:
static void SignalSemaphore(v8::Isolate* isolate, void* data) {
reinterpret_cast<RegExpInterruptTest*>(data)->sem_.Signal();
@@ -21524,21 +21588,6 @@ TEST(RegExpInterruptAndMakeSubjectTwoByteExternal) {
test.RunTest(RegExpInterruptTest::MakeSubjectTwoByteExternal);
}
-TEST(RegExpInterruptAndReenterIrregexp) {
- // We only check in the runtime entry to irregexp, so make sure we don't hit
- // an interpreter.
- i::FLAG_regexp_tier_up_ticks = 0;
- i::FLAG_regexp_interpret_all = false;
- i::FLAG_enable_experimental_regexp_engine = false;
- // We want to be stuck in regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
- i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
- RegExpInterruptTest test;
- test.RunTest(RegExpInterruptTest::ReenterIrregexp);
-}
-
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -21800,7 +21849,6 @@ class RequestInterruptTestWithMathAbs
}
};
-
TEST(RequestInterruptTestWithFunctionCall) {
RequestInterruptTestWithFunctionCall().RunTest();
}
@@ -21830,7 +21878,6 @@ TEST(RequestInterruptTestWithMathAbs) {
RequestInterruptTestWithMathAbs().RunTest();
}
-
class RequestMultipleInterrupts : public RequestInterruptTestBase {
public:
RequestMultipleInterrupts() : i_thread(this), counter_(0) {}
@@ -23571,7 +23618,14 @@ void RunStreamingTest(const char** chunks, v8::ScriptType type,
if (i::FLAG_harmony_top_level_await) {
v8::Local<v8::Promise> promise = result.As<v8::Promise>();
CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
- CHECK_EQ(13, promise->Result()->Int32Value(env.local()).FromJust());
+ CHECK(promise->Result()->IsUndefined());
+ // Fulfilled top-level await promises always resolve to undefined. Check
+ // the test result via a global variable.
+ CHECK_EQ(13, env->Global()
+ ->Get(env.local(), v8_str("Result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
} else {
CHECK(!result.IsEmpty());
CHECK_EQ(13, result->Int32Value(env.local()).FromJust());
@@ -23599,17 +23653,20 @@ void RunStreamingTest(const char** chunks,
TEST(StreamingSimpleScript) {
// This script is unrealistically small, since no one chunk is enough to fill
// the backing buffer of Scanner, let alone overflow it.
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
- nullptr};
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f", "oo(); ", nullptr};
RunStreamingTest(chunks);
}
TEST(StreamingScriptConstantArray) {
// When run with Ignition, tests that the streaming parser canonicalizes
// handles so that they are only added to the constant pool array once.
- const char* chunks[] = {
- "var a = {};", "var b = {};", "var c = 'testing';",
- "var d = 'testing';", "13;", nullptr};
+ const char* chunks[] = {"var a = {};",
+ "var b = {};",
+ "var c = 'testing';",
+ "var d = 'testing';",
+ "globalThis.Result = 13;",
+ nullptr};
RunStreamingTest(chunks);
}
@@ -23624,7 +23681,7 @@ TEST(StreamingScriptEvalShadowing) {
" function g() {\n"
" return y\n"
" }\n"
- " return g();\n"
+ " return (globalThis.Result = g());\n"
" })()\n"
"})()\n";
const char* chunks[] = {chunk1, nullptr};
@@ -23648,7 +23705,7 @@ TEST(StreamingBiggerScript) {
" for (i = 0; i < 13; ++i) { result = result + 1; }\n"
" return result;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", nullptr};
+ const char* chunks[] = {chunk1, "globalThis.Result = foo(); ", nullptr};
RunStreamingTest(chunks);
}
@@ -23660,7 +23717,8 @@ TEST(StreamingScriptWithParseError) {
" // This will result in a parse error.\n"
" var if else then foo";
char chunk2[] = " 13\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE,
false);
@@ -23671,7 +23729,8 @@ TEST(StreamingScriptWithParseError) {
" // This will be parsed successfully.\n"
" function foo() { return ";
char chunk2[] = " 13; }\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks);
}
@@ -23688,7 +23747,7 @@ TEST(StreamingUtf8Script) {
" var foob\xec\x92\x81r = 13;\n"
" return foob\xec\x92\x81r;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", nullptr};
+ const char* chunks[] = {chunk1, "globalThis.Result = foo(); ", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23712,7 +23771,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
for (int i = 0; i < 3; ++i) {
chunk2[i] = reference[i];
}
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23733,7 +23793,8 @@ TEST(StreamingUtf8ScriptWithSplitCharacters) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23760,7 +23821,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk2[0] = reference[0];
chunk2[1] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3,
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// The small chunk is at the end of a character
@@ -23778,7 +23840,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3,
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// Case 2: the script ends with a multi-byte character. Make sure that it's
@@ -23786,7 +23849,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
{
char chunk1[] =
"var foob\xec\x92\x81 = 13;\n"
- "foob\xec\x92\x81";
+ "globalThis.Result = foob\xec\x92\x81";
const char* chunks[] = {chunk1, nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23811,7 +23874,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23882,7 +23946,8 @@ TEST(StreamingScriptWithInvalidUtf8) {
"}\n";
for (int i = 0; i < 5; ++i) chunk1[strlen(chunk1) - 5 + i] = reference[i];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
}
@@ -23903,7 +23968,8 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23924,7 +23990,8 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit2) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -24784,7 +24851,8 @@ TEST(ClassPrototypeCreationContext) {
TEST(SimpleStreamingScriptWithSourceURL) {
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo();\n",
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f", "oo();\n",
"//# sourceURL=bar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
@@ -24792,7 +24860,8 @@ TEST(SimpleStreamingScriptWithSourceURL) {
TEST(StreamingScriptWithSplitSourceURL) {
- const char* chunks[] = {"function foo() { ret", "urn 13; } f",
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f",
"oo();\n//# sourceURL=b", "ar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
@@ -24801,7 +24870,8 @@ TEST(StreamingScriptWithSplitSourceURL) {
TEST(StreamingScriptWithSourceMappingURLInTheMiddle) {
const char* chunks[] = {"function foo() { ret", "urn 13; }\n//#",
- " sourceMappingURL=bar2.js\n", "foo();", nullptr};
+ " sourceMappingURL=bar2.js\n",
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
nullptr, "bar2.js");
}
@@ -25127,10 +25197,10 @@ THREADED_TEST(ReceiverConversionForAccessors) {
CHECK(CompileRun("acc.call(undefined) == 42")->BooleanValue(isolate));
}
-class FutexInterruptionThread : public v8::base::Thread {
+class TerminateExecutionThread : public v8::base::Thread {
public:
- explicit FutexInterruptionThread(v8::Isolate* isolate)
- : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
+ explicit TerminateExecutionThread(v8::Isolate* isolate)
+ : Thread(Options("TerminateExecutionThread")), isolate_(isolate) {}
void Run() override {
// Wait a bit before terminating.
@@ -25142,14 +25212,13 @@ class FutexInterruptionThread : public v8::base::Thread {
v8::Isolate* isolate_;
};
-
TEST(FutexInterruption) {
i::FLAG_harmony_sharedarraybuffer = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
- FutexInterruptionThread timeout_thread(isolate);
+ TerminateExecutionThread timeout_thread(isolate);
v8::TryCatch try_catch(CcTest::isolate());
CHECK(timeout_thread.Start());
@@ -25162,6 +25231,28 @@ TEST(FutexInterruption) {
timeout_thread.Join();
}
+TEST(StackCheckTermination) {
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ TerminateExecutionThread timeout_thread(isolate);
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(timeout_thread.Start());
+ auto should_continue = [i_isolate]() {
+ using StackLimitCheck = i::StackLimitCheck;
+ STACK_CHECK(i_isolate, false);
+ return true;
+ };
+ while (should_continue()) {
+ }
+ if (i_isolate->has_pending_exception()) i_isolate->ReportPendingMessages();
+ CHECK(try_catch.HasTerminated());
+ timeout_thread.Join();
+}
+
static int nb_uncaught_exception_callback_calls = 0;
@@ -26705,6 +26796,7 @@ TEST(AtomicsWaitCallback) {
AtomicsWaitCallbackCommon(isolate, CompileRun(init), 4, 4);
}
+#if V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
namespace wasm {
@@ -26783,6 +26875,7 @@ TEST(WasmI64AtomicWaitCallback) {
} // namespace wasm
} // namespace internal
} // namespace v8
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(BigIntAPI) {
LocalContext env;
@@ -27937,37 +28030,28 @@ void CallWithUnexpectedObjectType(v8::Local<v8::Value> receiver) {
}
class TestCFunctionInfo : public v8::CFunctionInfo {
- const v8::CTypeInfo& ReturnInfo() const override {
- static v8::CTypeInfo return_info =
- v8::CTypeInfo(v8::CTypeInfo::Type::kVoid);
- return return_info;
- }
-
- unsigned int ArgumentCount() const override { return 2; }
-
- const v8::CTypeInfo& ArgumentInfo(unsigned int index) const override {
- static v8::CTypeInfo type_info0 =
- v8::CTypeInfo(v8::CTypeInfo::Type::kV8Value);
- static v8::CTypeInfo type_info1 = v8::CTypeInfo(v8::CTypeInfo::Type::kBool);
- switch (index) {
- case 0:
- return type_info0;
- case 1:
- return type_info1;
- default:
- UNREACHABLE();
- }
- }
+ static constexpr unsigned int kArgCount = 2u;
- bool HasOptions() const override { return false; }
+ public:
+ TestCFunctionInfo()
+ : v8::CFunctionInfo(v8::CTypeInfo(v8::CTypeInfo::Type::kVoid), kArgCount,
+ arg_info_storage_),
+ arg_info_storage_{
+ v8::CTypeInfo(v8::CTypeInfo::Type::kV8Value),
+ v8::CTypeInfo(v8::CTypeInfo::Type::kBool),
+ } {}
+
+ private:
+ const v8::CTypeInfo arg_info_storage_[kArgCount];
};
void CheckDynamicTypeInfo() {
LocalContext env;
static TestCFunctionInfo type_info;
- v8::CFunction c_func =
- v8::CFunction::Make(ApiNumberChecker<bool>::FastCallback, &type_info);
+ v8::CFunction c_func = v8::CFunction(
+ reinterpret_cast<const void*>(ApiNumberChecker<bool>::FastCallback),
+ &type_info);
CHECK_EQ(c_func.ArgumentCount(), 2);
CHECK_EQ(c_func.ArgumentInfo(0).GetType(), v8::CTypeInfo::Type::kV8Value);
CHECK_EQ(c_func.ArgumentInfo(1).GetType(), v8::CTypeInfo::Type::kBool);
@@ -27979,7 +28063,6 @@ void CheckDynamicTypeInfo() {
TEST(FastApiStackSlot) {
#ifndef V8_LITE_MODE
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
@@ -28031,7 +28114,6 @@ TEST(FastApiStackSlot) {
TEST(FastApiCalls) {
#ifndef V8_LITE_MODE
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index a1d8cdfb7d4..2784bfe16b4 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -6832,63 +6832,6 @@ TEST(ldr_literal_range_max_dist_no_emission_2) {
#endif
-static const PrefetchOperation kPrfmOperations[] = {
- PLDL1KEEP, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
-
- PLIL1KEEP, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM,
-
- PSTL1KEEP, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM};
-
-TEST(prfm_regoffset_assem) {
- INIT_V8();
- SETUP();
-
- START();
- // The address used in prfm doesn't have to be valid.
- __ Mov(x0, 0x0123456789abcdef);
-
- CPURegList inputs(CPURegister::kRegister, kXRegSizeInBits, 10, 18);
- __ Mov(x10, 0);
- __ Mov(x11, 1);
- __ Mov(x12, 8);
- __ Mov(x13, 255);
- __ Mov(x14, -0);
- __ Mov(x15, -1);
- __ Mov(x16, -8);
- __ Mov(x17, -255);
- __ Mov(x18, 0xfedcba9876543210);
-
- for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
- // Unallocated prefetch operations are ignored, so test all of them.
- // We have to use the Assembler directly for this.
- CPURegList loop = inputs;
- while (!loop.IsEmpty()) {
- __ prfm(op, MemOperand(x0, Register::Create(loop.PopLowestIndex().code(),
- kXRegSizeInBits)));
- }
- }
-
- for (PrefetchOperation op : kPrfmOperations) {
- // Also test named operations.
- CPURegList loop = inputs;
- while (!loop.IsEmpty()) {
- Register input =
- Register::Create(loop.PopLowestIndex().code(), kXRegSizeInBits);
- __ prfm(op, MemOperand(x0, input, UXTW));
- __ prfm(op, MemOperand(x0, input, UXTW, 3));
- __ prfm(op, MemOperand(x0, input, LSL));
- __ prfm(op, MemOperand(x0, input, LSL, 3));
- __ prfm(op, MemOperand(x0, input, SXTW));
- __ prfm(op, MemOperand(x0, input, SXTW, 3));
- __ prfm(op, MemOperand(x0, input, SXTX));
- __ prfm(op, MemOperand(x0, input, SXTX, 3));
- }
- }
-
- END();
- RUN();
-}
-
TEST(add_sub_imm) {
INIT_V8();
SETUP();
diff --git a/deps/v8/test/cctest/test-code-pages.cc b/deps/v8/test/cctest/test-code-pages.cc
index d0ed8334a55..7d335f21742 100644
--- a/deps/v8/test/cctest/test-code-pages.cc
+++ b/deps/v8/test/cctest/test-code-pages.cc
@@ -70,18 +70,23 @@ bool PagesHasExactPage(std::vector<MemoryRange>* pages, Address search_page,
return it != pages->end();
}
-bool PagesContainsAddress(std::vector<MemoryRange>* pages,
- Address search_address) {
+bool PagesContainsRange(std::vector<MemoryRange>* pages, Address search_address,
+ size_t size) {
byte* addr = reinterpret_cast<byte*>(search_address);
auto it =
- std::find_if(pages->begin(), pages->end(), [addr](const MemoryRange& r) {
+ std::find_if(pages->begin(), pages->end(), [=](const MemoryRange& r) {
const byte* page_start = reinterpret_cast<const byte*>(r.start);
const byte* page_end = page_start + r.length_in_bytes;
- return addr >= page_start && addr < page_end;
+ return addr >= page_start && (addr + size) <= page_end;
});
return it != pages->end();
}
+bool PagesContainsAddress(std::vector<MemoryRange>* pages,
+ Address search_address) {
+ return PagesContainsRange(pages, search_address, 0);
+}
+
} // namespace
TEST(CodeRangeCorrectContents) {
@@ -99,8 +104,18 @@ TEST(CodeRangeCorrectContents) {
CHECK_EQ(2, pages->size());
CHECK(PagesHasExactPage(pages, code_range.begin(), code_range.size()));
CHECK(PagesHasExactPage(
- pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
- i_isolate->embedded_blob_code_size()));
+ pages, reinterpret_cast<Address>(i_isolate->CurrentEmbeddedBlobCode()),
+ i_isolate->CurrentEmbeddedBlobCodeSize()));
+ if (i_isolate->is_short_builtin_calls_enabled()) {
+ // In this case embedded blob code must be included via code_range.
+ CHECK(PagesContainsRange(
+ pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
+ i_isolate->embedded_blob_code_size()));
+ } else {
+ CHECK(PagesHasExactPage(
+ pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
+ i_isolate->embedded_blob_code_size()));
+ }
}
TEST(CodePagesCorrectContents) {
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 1ba26a81b86..67aafa8709d 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -1890,13 +1890,21 @@ TEST(AllocateJSObjectFromMap) {
"object")));
JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES, 0,
"Normalize");
+ Handle<HeapObject> properties =
+ V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? Handle<HeapObject>(object->property_dictionary_swiss(), isolate)
+ : handle(object->property_dictionary(), isolate);
Handle<JSObject> result = Handle<JSObject>::cast(
- ft.Call(handle(object->map(), isolate),
- handle(object->property_dictionary(), isolate),
+ ft.Call(handle(object->map(), isolate), properties,
handle(object->elements(), isolate))
.ToHandleChecked());
CHECK_EQ(result->map(), object->map());
- CHECK_EQ(result->property_dictionary(), object->property_dictionary());
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CHECK_EQ(result->property_dictionary_swiss(),
+ object->property_dictionary_swiss());
+ } else {
+ CHECK_EQ(result->property_dictionary(), object->property_dictionary());
+ }
CHECK(!result->HasFastProperties());
#ifdef VERIFY_HEAP
isolate->heap()->Verify();
@@ -2445,6 +2453,90 @@ TEST(IsDebugActive) {
*debug_is_active = false;
}
+// Ensure that the kShortBuiltinCallsOldSpaceSizeThreshold constant can be used
+// for detecting whether the machine has >= 4GB of physical memory by checking
+// the max old space size.
+TEST(ShortBuiltinCallsThreshold) {
+ if (!V8_SHORT_BUILTIN_CALLS_BOOL) return;
+
+ const uint64_t kPhysicalMemoryThreshold = size_t{4} * GB;
+
+ size_t heap_size, old, young;
+
+ // If the physical memory is < kPhysicalMemoryThreshold then the old space
+ // size must be below the kShortBuiltinCallsOldSpaceThreshold.
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold - MB);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_LT(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+
+ // If the physical memory is >= kPhysicalMemoryThreshold then the old space
+ // size must be below the kShortBuiltinCallsOldSpaceThreshold.
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_GE(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold + MB);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_GE(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+}
+
+TEST(CallBuiltin) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate,
+ kNumParams + 1); // Include receiver.
+ PromiseBuiltinsAssembler m(asm_tester.state());
+
+ {
+ auto receiver = m.Parameter<Object>(1);
+ auto name = m.Parameter<Name>(2);
+ auto context = m.Parameter<Context>(kNumParams + 3);
+
+ auto value = m.CallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ m.Return(value);
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Name> name = factory->InternalizeUtf8String("a");
+ Handle<Object> value(Smi::FromInt(153), isolate);
+ Handle<JSObject> object = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, object, name, value, NONE);
+
+ Handle<Object> result = ft.Call(object, name).ToHandleChecked();
+ CHECK_EQ(*value, *result);
+}
+
+TEST(TailCallBuiltin) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate,
+ kNumParams + 1); // Include receiver.
+ PromiseBuiltinsAssembler m(asm_tester.state());
+
+ {
+ auto receiver = m.Parameter<Object>(1);
+ auto name = m.Parameter<Name>(2);
+ auto context = m.Parameter<Context>(kNumParams + 3);
+
+ m.TailCallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Name> name = factory->InternalizeUtf8String("a");
+ Handle<Object> value(Smi::FromInt(153), isolate);
+ Handle<JSObject> object = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, object, name, value, NONE);
+
+ Handle<Object> result = ft.Call(object, name).ToHandleChecked();
+ CHECK_EQ(*value, *result);
+}
+
class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
public:
AppendJSArrayCodeStubAssembler(compiler::CodeAssemblerState* state,
@@ -2595,8 +2687,7 @@ TEST(IsPromiseHookEnabled) {
CodeStubAssembler m(asm_tester.state());
m.Return(
- m.SelectBooleanConstant(
- m.IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate()));
+ m.SelectBooleanConstant(m.IsPromiseHookEnabledOrHasAsyncEventDelegate()));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result =
@@ -3136,7 +3227,7 @@ TEST(DirectMemoryTest16BitWord32) {
for (size_t i = 0; i < element_count; ++i) {
for (size_t j = 0; j < element_count; ++j) {
- Node* loaded = m.LoadBufferData<Uint16T>(
+ TNode<Uint16T> loaded = m.LoadBufferData<Uint16T>(
buffer_node1, static_cast<int>(i * sizeof(int16_t)));
TNode<Word32T> masked = m.Word32And(loaded, constants[j]);
if ((buffer[j] & buffer[i]) != 0) {
@@ -3881,6 +3972,7 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
FLAG_turbo_instruction_scheduling = old_turbo_instruction_scheduling;
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(WasmInt32ToHeapNumber) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -4107,6 +4199,7 @@ TEST(WasmTaggedToFloat64) {
}
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(SmiUntagLeftShiftOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -4155,6 +4248,85 @@ TEST(SmiUntagComparisonOptimization) {
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
}
+TEST(PopCount) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeAssemblerTester asm_tester(isolate);
+ CodeStubAssembler m(asm_tester.state());
+
+ const std::vector<std::pair<uint32_t, int>> test_cases = {
+ {0, 0},
+ {1, 1},
+ {(1 << 31), 1},
+ {0b01010101010101010101010101010101, 16},
+ {0b10101010101010101010101010101010, 16},
+ {0b11100011100000011100011111000111, 17} // arbitrarily chosen
+ };
+
+ for (std::pair<uint32_t, int> test_case : test_cases) {
+ uint32_t value32 = test_case.first;
+ uint64_t value64 = (static_cast<uint64_t>(value32) << 32) | value32;
+ int expected_pop32 = test_case.second;
+ int expected_pop64 = 2 * expected_pop32;
+
+ TNode<Int32T> pop32 = m.PopulationCount32(m.Uint32Constant(value32));
+ CSA_CHECK(&m, m.Word32Equal(pop32, m.Int32Constant(expected_pop32)));
+
+ if (m.Is64()) {
+ // TODO(emrich): enable once 64-bit operations are supported on 32-bit
+ // architectures.
+
+ TNode<Int64T> pop64 = m.PopulationCount64(m.Uint64Constant(value64));
+ CSA_CHECK(&m, m.Word64Equal(pop64, m.Int64Constant(expected_pop64)));
+ }
+ }
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode());
+ ft.Call();
+}
+
+TEST(CountTrailingZeros) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeAssemblerTester asm_tester(isolate);
+ CodeStubAssembler m(asm_tester.state());
+
+ const std::vector<std::pair<uint32_t, int>> test_cases = {
+ {1, 0},
+ {2, 1},
+ {(0b0101010'0000'0000), 9},
+ {(1 << 31), 31},
+ {std::numeric_limits<uint32_t>::max(), 0},
+ };
+
+ for (std::pair<uint32_t, int> test_case : test_cases) {
+ uint32_t value32 = test_case.first;
+ uint64_t value64 = static_cast<uint64_t>(value32) << 32;
+ int expected_ctz32 = test_case.second;
+ int expected_ctz64 = expected_ctz32 + 32;
+
+ TNode<Int32T> pop32 = m.CountTrailingZeros32(m.Uint32Constant(value32));
+ CSA_CHECK(&m, m.Word32Equal(pop32, m.Int32Constant(expected_ctz32)));
+
+ if (m.Is64()) {
+ // TODO(emrich): enable once 64-bit operations are supported on 32-bit
+ // architectures.
+
+ TNode<Int64T> pop64_ext =
+ m.CountTrailingZeros64(m.Uint64Constant(value32));
+ TNode<Int64T> pop64 = m.CountTrailingZeros64(m.Uint64Constant(value64));
+
+ CSA_CHECK(&m, m.Word64Equal(pop64_ext, m.Int64Constant(expected_ctz32)));
+ CSA_CHECK(&m, m.Word64Equal(pop64, m.Int64Constant(expected_ctz64)));
+ }
+ }
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode());
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index a74f3e6bd73..3b895d7420b 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -947,9 +947,10 @@ static int AllocationSitesCount(Heap* heap) {
TEST(DecideToPretenureDuringCompilation) {
// The test makes use of optimization and relies on deterministic
// compilation.
- if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_minor_mc ||
- i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size ||
- i::FLAG_turbo_nci || i::FLAG_stress_concurrent_allocation) {
+ if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_always_sparkplug ||
+ i::FLAG_minor_mc || i::FLAG_stress_incremental_marking ||
+ i::FLAG_optimize_for_size || i::FLAG_turbo_nci ||
+ i::FLAG_stress_concurrent_allocation) {
return;
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 61ceae728f5..cd1fa753ba5 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -35,6 +35,7 @@
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
+#include "src/codegen/compilation-cache.h"
#include "src/codegen/source-position-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/spaces.h"
@@ -178,7 +179,8 @@ TEST(CodeEvents) {
v8::base::TimeDelta::FromMicroseconds(100), true);
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer.strings());
+ *code_observer.strings(),
+ *code_observer.weak_code_registry());
isolate->logger()->AddCodeEventListener(&profiler_listener);
// Enqueue code creation events.
@@ -243,7 +245,8 @@ TEST(TickEvents) {
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
isolate->logger()->AddCodeEventListener(&profiler_listener);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
@@ -404,7 +407,8 @@ TEST(Issue1398) {
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
@@ -558,7 +562,7 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
ProfilingMode mode, unsigned max_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
- profiler_->SetSamplingInterval(100);
+ profiler_->SetSamplingInterval(50);
profiler_->StartProfiling(profile_name, {mode, max_samples, 0});
v8::internal::CpuProfiler* iprofiler =
@@ -1272,7 +1276,8 @@ static void TickLines(bool optimize) {
isolate->logger()->LogCompiledFunctions();
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
// Enqueue code creation events.
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
@@ -2212,16 +2217,22 @@ TEST(FunctionDetails) {
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
reinterpret_cast<const i::CpuProfile*>(profile)->Print();
// The tree should look like this:
- // 0 (root) 0 #1
- // 0 "" 19 #2 no reason script_b:1
- // 0 baz 19 #3 TryCatchStatement script_b:3
- // 0 foo 18 #4 TryCatchStatement script_a:2
- // 1 bar 18 #5 no reason script_a:3
+ // 0 (root):0 3 0 #1
+ // 0 :0 0 5 #2 script_b:0
+ // 0 baz:3 0 5 #3 script_b:3
+ // bailed out due to 'Optimization is always disabled'
+ // 0 foo:4 0 4 #4 script_a:4
+ // bailed out due to 'Optimization is always disabled'
+ // 0 bar:5 0 4 #5 script_a:5
+ // bailed out due to 'Optimization is always disabled'
+ // 0 startProfiling:0 2 0 #6
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
CHECK_EQ(root->GetParent(), nullptr);
const v8::CpuProfileNode* script = GetChild(env, root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_b", true,
- script_b->GetUnboundScript()->GetId(), 1, 1, root);
+ script_b->GetUnboundScript()->GetId(),
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, root);
const v8::CpuProfileNode* baz = GetChild(env, script, "baz");
CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b", true,
script_b->GetUnboundScript()->GetId(), 3, 16, script);
@@ -2290,7 +2301,7 @@ TEST(FunctionDetailsInlining) {
// The tree should look like this:
// 0 (root) 0 #1
// 5 (program) 0 #6
- // 2 14 #2 script_a:1
+ // 2 14 #2 script_a:0
// ;;; deopted at script_id: 14 position: 299 with reason 'Insufficient
// type feedback for call'.
// 1 alpha 14 #4 script_a:1
@@ -2301,7 +2312,9 @@ TEST(FunctionDetailsInlining) {
CHECK_EQ(root->GetParent(), nullptr);
const v8::CpuProfileNode* script = GetChild(env, root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_a", false,
- script_a->GetUnboundScript()->GetId(), 1, 1, root);
+ script_a->GetUnboundScript()->GetId(),
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, root);
const v8::CpuProfileNode* alpha = FindChild(env, script, "alpha");
// Return early if profiling didn't sample alpha.
if (!alpha) return;
@@ -4082,6 +4095,9 @@ TEST(BytecodeFlushEventsEagerLogging) {
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
#endif // V8_LITE_MODE
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
@@ -4147,6 +4163,40 @@ TEST(BytecodeFlushEventsEagerLogging) {
}
}
+// Ensure that unused code entries are removed after GC with eager logging.
+TEST(ClearUnusedWithEagerLogging) {
+ ManualGCScope manual_gc;
+ TestSetup test_setup;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
+
+ CodeMap* code_map = profiler.code_map_for_test();
+ size_t initial_size = code_map->size();
+
+ {
+ // Create and run a new script and function, generating 2 code objects.
+ // Do this in a new context, so that some_func isn't retained by the
+ // context's global object past this scope.
+ i::HandleScope inner_scope(isolate);
+ LocalContext env;
+ CompileRun(
+ "function some_func() {}"
+ "some_func();");
+ CHECK_GT(code_map->size(), initial_size);
+ }
+
+ // Clear the compilation cache so that there are no more references to the
+ // given two functions.
+ isolate->compilation_cache()->Clear();
+
+ CcTest::CollectAllGarbage();
+
+ // Verify that the CodeMap's size is unchanged post-GC.
+ CHECK_EQ(code_map->size(), initial_size);
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index f1c2d400274..6e3033d993c 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api-inl.h"
+#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/flags/flags.h"
#include "src/heap/read-only-spaces.h"
@@ -421,9 +422,29 @@ static void FrameIterationCheck(
d::StackFrameResultPtr props = d::GetStackFrame(frame->fp(), &ReadMemory);
if (frame->is_java_script()) {
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- CHECK_EQ(props->num_properties, 1);
+ CHECK_EQ(props->num_properties, 5);
+ auto js_function = js_frame->function();
CheckProp(*props->properties[0], "v8::internal::JSFunction",
- "currently_executing_jsfunction", js_frame->function().ptr());
+ "currently_executing_jsfunction", js_function.ptr());
+ auto shared_function_info = js_function.shared();
+ auto script = i::Script::cast(shared_function_info.script());
+ CheckProp(*props->properties[1], "v8::internal::Object", "script_name",
+ static_cast<i::Tagged_t>(script.name().ptr()));
+ CheckProp(*props->properties[2], "v8::internal::Object", "script_source",
+ static_cast<i::Tagged_t>(script.source().ptr()));
+
+ auto scope_info = shared_function_info.scope_info();
+ CheckProp(*props->properties[3], "v8::internal::Object", "function_name",
+ static_cast<i::Tagged_t>(scope_info.FunctionName().ptr()));
+
+ CheckProp(*props->properties[4], "", "function_character_offset");
+ const d::ObjectProperty& function_character_offset =
+ *props->properties[4];
+ CHECK_EQ(function_character_offset.num_struct_fields, 2);
+ CheckStructProp(*function_character_offset.struct_fields[0],
+ "v8::internal::Object", "start", 0);
+ CheckStructProp(*function_character_offset.struct_fields[1],
+ "v8::internal::Object", "end", 4);
} else {
CHECK_EQ(props->num_properties, 0);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 9ffc69b682a..2723f5fa14c 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -1354,6 +1354,49 @@ TEST(BreakPointApiAccessor) {
CheckDebuggerUnloaded();
}
+TEST(Regress1163547) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ DebugEventCounter delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+ i::Handle<i::BreakPoint> bp;
+
+ auto constructor_tmpl = v8::FunctionTemplate::New(env->GetIsolate());
+ auto prototype_tmpl = constructor_tmpl->PrototypeTemplate();
+ auto accessor_tmpl =
+ v8::FunctionTemplate::New(env->GetIsolate(), NoOpFunctionCallback);
+ prototype_tmpl->SetAccessorProperty(v8_str("f"), accessor_tmpl);
+
+ auto constructor =
+ constructor_tmpl->GetFunction(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("C"), constructor).ToChecked();
+
+ CompileRun("o = new C();");
+ v8::Local<v8::Function> function =
+ CompileRun("Object.getOwnPropertyDescriptor(C.prototype, 'f').get")
+ .As<v8::Function>();
+
+ // === Test API accessor ===
+ break_point_hit_count = 0;
+
+ // At this point, the C.prototype - which holds the "f" accessor - is in
+ // dictionary mode.
+ auto constructor_fun =
+ Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*constructor));
+ CHECK(!i::JSObject::cast(constructor_fun->prototype()).HasFastProperties());
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(function, 0);
+
+ CompileRun("o.f");
+ CHECK_EQ(1, break_point_hit_count);
+
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
TEST(BreakPointInlineApiFunction) {
i::FLAG_allow_natives_syntax = true;
LocalContext env;
@@ -4545,7 +4588,9 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
CHECK_EQ(count_by_type[i::Script::TYPE_NATIVE], 0);
CHECK_EQ(count_by_type[i::Script::TYPE_EXTENSION], 1);
CHECK_EQ(count_by_type[i::Script::TYPE_NORMAL], 1);
+#if V8_ENABLE_WEBASSEMBLY
CHECK_EQ(count_by_type[i::Script::TYPE_WASM], 0);
+#endif // V8_ENABLE_WEBASSEMBLY
CHECK_EQ(count_by_type[i::Script::TYPE_INSPECTOR], 0);
i::Handle<i::Script> gc_script =
@@ -5498,39 +5543,44 @@ TEST(TerminateOnResumeFromMicrotask) {
class FutexInterruptionThread : public v8::base::Thread {
public:
- FutexInterruptionThread(v8::Isolate* isolate, v8::base::Semaphore* sem)
+ FutexInterruptionThread(v8::Isolate* isolate, v8::base::Semaphore* enter,
+ v8::base::Semaphore* exit)
: Thread(Options("FutexInterruptionThread")),
isolate_(isolate),
- sem_(sem) {}
+ enter_(enter),
+ exit_(exit) {}
void Run() override {
- // Wait a bit before terminating.
- v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
- sem_->Wait();
+ enter_->Wait();
v8::debug::SetTerminateOnResume(isolate_);
+ exit_->Signal();
}
private:
v8::Isolate* isolate_;
- v8::base::Semaphore* sem_;
+ v8::base::Semaphore* enter_;
+ v8::base::Semaphore* exit_;
};
namespace {
class SemaphoreTriggerOnBreak : public v8::debug::DebugDelegate {
public:
- SemaphoreTriggerOnBreak() : sem_(0) {}
+ SemaphoreTriggerOnBreak() : enter_(0), exit_(0) {}
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
const std::vector<v8::debug::BreakpointId>&
inspector_break_points_hit) override {
break_count_++;
- sem_.Signal();
+ enter_.Signal();
+ exit_.Wait();
}
- v8::base::Semaphore* semaphore() { return &sem_; }
+ v8::base::Semaphore* enter() { return &enter_; }
+ v8::base::Semaphore* exit() { return &exit_; }
int break_count() const { return break_count_; }
private:
- v8::base::Semaphore sem_;
+ v8::base::Semaphore enter_;
+ v8::base::Semaphore exit_;
int break_count_ = 0;
};
} // anonymous namespace
@@ -5543,8 +5593,8 @@ TEST(TerminateOnResumeFromOtherThread) {
SemaphoreTriggerOnBreak delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
- FutexInterruptionThread timeout_thread(env->GetIsolate(),
- delegate.semaphore());
+ FutexInterruptionThread timeout_thread(env->GetIsolate(), delegate.enter(),
+ delegate.exit());
CHECK(timeout_thread.Start());
v8::Local<v8::Context> context = env.local();
@@ -5575,7 +5625,7 @@ namespace {
class InterruptionBreakRightNow : public v8::base::Thread {
public:
explicit InterruptionBreakRightNow(v8::Isolate* isolate)
- : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
+ : Thread(Options("InterruptionBreakRightNow")), isolate_(isolate) {}
void Run() override {
// Wait a bit before terminating.
diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc
index a8b9e959526..1d933d2190c 100644
--- a/deps/v8/test/cctest/test-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-descriptor-array.cc
@@ -56,7 +56,7 @@ void CheckDescriptorArrayLookups(Isolate* isolate, Handle<Map> map,
// Test C++ implementation.
{
DisallowGarbageCollection no_gc;
- DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map->instance_descriptors(isolate);
DCHECK(descriptors.IsSortedNoDuplicates());
int nof_descriptors = descriptors.number_of_descriptors();
@@ -91,8 +91,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
for (size_t i = 0; i < maps.size(); ++i) {
Map expected_map = *maps[i];
- Name name = expected_map.instance_descriptors(kRelaxedLoad)
- .GetKey(expected_map.LastAdded());
+ Name name = expected_map.instance_descriptors(isolate).GetKey(
+ expected_map.LastAdded());
Map map = transitions->SearchAndGetTargetForTesting(PropertyKind::kData,
name, NONE);
@@ -105,8 +105,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
if (!FLAG_jitless) {
for (size_t i = 0; i < maps.size(); ++i) {
Handle<Map> expected_map = maps[i];
- Handle<Name> name(expected_map->instance_descriptors(kRelaxedLoad)
- .GetKey(expected_map->LastAdded()),
+ Handle<Name> name(expected_map->instance_descriptors(isolate).GetKey(
+ expected_map->LastAdded()),
isolate);
Handle<Object> transition_map =
@@ -260,7 +260,7 @@ TEST(DescriptorArrayHashCollisionMassive) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors(kRelaxedLoad).Sort();
+ map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
@@ -309,7 +309,7 @@ TEST(DescriptorArrayHashCollision) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors(kRelaxedLoad).Sort();
+ map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 441ae53f320..551488ab212 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1518,24 +1518,6 @@ TEST_(load_literal) {
CLEANUP();
}
-TEST(prfm_regoffset) {
- SET_UP_ASM();
-
- COMPARE(prfm(PLIL1KEEP, MemOperand(x1, x2)), "prfm plil1keep, [x1, x2]");
- COMPARE(prfm(PLIL1STRM, MemOperand(x3, w4, SXTW)),
- "prfm plil1strm, [x3, w4, sxtw]");
- COMPARE(prfm(PLIL2KEEP, MemOperand(x5, x6, LSL, 3)),
- "prfm plil2keep, [x5, x6, lsl #3]");
-
- COMPARE(prfm(PLIL2STRM, MemOperand(sp, xzr)), "prfm plil2strm, [sp, xzr]");
- COMPARE(prfm(PLIL3KEEP, MemOperand(sp, wzr, SXTW)),
- "prfm plil3keep, [sp, wzr, sxtw]");
- COMPARE(prfm(PLIL3STRM, MemOperand(sp, xzr, LSL, 3)),
- "prfm plil3strm, [sp, xzr, lsl #3]");
-
- CLEANUP();
-}
-
TEST_(cond_select) {
SET_UP_ASM();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b9a6d1a7c5d..f387fd29535 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -14,7 +14,10 @@
#include "src/init/v8.h"
#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/internal-index.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
#include "src/objects/property.h"
#include "src/objects/struct-inl.h"
#include "src/objects/transitions.h"
@@ -275,7 +278,7 @@ class Expectations {
CHECK_EQ(expected_nof, map.NumberOfOwnDescriptors());
CHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors();
CHECK(expected_nof <= number_of_properties_);
for (InternalIndex i : InternalIndex::Range(expected_nof)) {
if (!Check(descriptors, i)) {
@@ -444,8 +447,9 @@ class Expectations {
Handle<Object> getter(pair->getter(), isolate);
Handle<Object> setter(pair->setter(), isolate);
- InternalIndex descriptor = map->instance_descriptors(kRelaxedLoad)
- .SearchWithCache(isolate, *name, *map);
+ InternalIndex descriptor =
+ map->instance_descriptors(isolate).SearchWithCache(isolate, *name,
+ *map);
map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
getter, setter, attributes);
CHECK(!map->is_deprecated());
@@ -460,6 +464,23 @@ class Expectations {
// branch.
//
+namespace {
+
+Handle<Map> ReconfigureProperty(Isolate* isolate, Handle<Map> map,
+ InternalIndex modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ DCHECK_EQ(kData, new_kind); // Only kData case is supported.
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(modify_index, new_attributes,
+ PropertyConstness::kConst,
+ new_representation, new_field_type);
+}
+
+} // namespace
+
TEST(ReconfigureAccessorToNonExistingDataField) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -481,8 +502,8 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(expectations.Check(*map));
InternalIndex first(0);
- Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, first, kData, NONE, Representation::None(), none_type);
+ Handle<Map> new_map = ReconfigureProperty(isolate, map, first, kData, NONE,
+ Representation::None(), none_type);
// |map| did not change except marked unstable.
CHECK(!map->is_deprecated());
CHECK(!map->is_stable());
@@ -496,8 +517,8 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(new_map->is_stable());
CHECK(expectations.Check(*new_map));
- Handle<Map> new_map2 = Map::ReconfigureProperty(
- isolate, map, first, kData, NONE, Representation::None(), none_type);
+ Handle<Map> new_map2 = ReconfigureProperty(isolate, map, first, kData, NONE,
+ Representation::None(), none_type);
CHECK_EQ(*new_map, *new_map2);
Handle<Object> value(Smi::zero(), isolate);
@@ -553,7 +574,7 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
InternalIndex first(0);
CHECK(obj->map()
- .instance_descriptors(kRelaxedLoad)
+ .instance_descriptors(isolate)
.GetStrongValue(first)
.IsAccessorPair());
@@ -669,7 +690,7 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
CHECK(expectations.Check(*map));
if (is_detached_map) {
- detach_point_map = Map::ReconfigureProperty(
+ detach_point_map = ReconfigureProperty(
isolate, detach_point_map, InternalIndex(detach_property_at_index),
kData, NONE, Representation::Double(), any_type);
expectations.SetDataField(detach_property_at_index,
@@ -702,8 +723,8 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
// Create new maps by generalizing representation of propX field.
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(property_index),
- kData, NONE, to.representation, to.type);
+ ReconfigureProperty(isolate, map, InternalIndex(property_index), kData,
+ NONE, to.representation, to.type);
expectations.SetDataField(property_index, expected.constness,
expected.representation, expected.type);
@@ -977,8 +998,8 @@ TEST(GeneralizeFieldWithAccessorProperties) {
continue;
}
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
- Representation::Double(), any_type);
+ ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ Representation::Double(), any_type);
maps[i] = new_map;
expectations.SetDataField(i, PropertyConstness::kMutable,
@@ -1103,16 +1124,7 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
CHECK(!map2->is_stable());
CHECK(!map2->is_deprecated());
CHECK_NE(*map2, *new_map);
- // If the "source" property was const then update constness expectations for
- // "source" map and ensure the deoptimization dependency was triggered.
- if (to.constness == PropertyConstness::kConst) {
- expectations2.SetDataField(kSplitProp, READ_ONLY,
- PropertyConstness::kMutable, to.representation,
- to.type);
- CHECK(code_src_field_const->marked_for_deoptimization());
- } else {
- CHECK(!code_src_field_const->marked_for_deoptimization());
- }
+ CHECK(!code_src_field_const->marked_for_deoptimization());
CHECK(expectations2.Check(*map2));
for (int i = kSplitProp; i < kPropCount; i++) {
@@ -1797,7 +1809,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldInPlace(
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
Handle<Map> new_map =
- Map::ReconfigureElementsKind(isolate, map2, PACKED_ELEMENTS);
+ MapUpdater{isolate, map2}.ReconfigureElementsKind(PACKED_ELEMENTS);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -2063,9 +2075,8 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
map2 = handle(target, isolate);
}
- map2 = Map::ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp),
- kData, NONE, Representation::Double(),
- any_type);
+ map2 = ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp), kData,
+ NONE, Representation::Double(), any_type);
expectations.SetDataField(kSplitProp, PropertyConstness::kMutable,
Representation::Double(), any_type);
@@ -2162,8 +2173,8 @@ static void TestGeneralizeFieldWithSpecialTransition(
Handle<Map> maps[kPropCount];
for (int i = 0; i < kPropCount; i++) {
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
- to.representation, to.type);
+ ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ to.representation, to.type);
maps[i] = new_map;
expectations.SetDataField(i, expected.constness, expected.representation,
@@ -2829,13 +2840,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
InternalIndex first(0);
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- PropertyConstness::kConst,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
// Store value2 to obj2 and check that it got same map and property details
// did not change.
@@ -2847,13 +2857,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- PropertyConstness::kConst,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
// Store value2 to obj1 and check that property became mutable.
Call(isolate, store_func, obj1, value2).Check();
@@ -2863,13 +2872,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- expected_constness,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(expected_constness,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
}
void TestStoreToConstantField_PlusMinusZero(const char* store_func_source,
@@ -3030,6 +3038,122 @@ TEST(RepresentationPredicatesAreInSync) {
}
}
+TEST(DeletePropertyGeneralizesConstness) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+
+ // Create a map with some properties.
+ Handle<Map> initial_map = Map::Create(isolate, kPropCount + 3);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ Handle<String> name = CcTest::MakeName("prop", i);
+ map = Map::CopyWithField(isolate, map, name, any_type, NONE,
+ PropertyConstness::kConst, Representation::Smi(),
+ INSERT_TRANSITION)
+ .ToHandleChecked();
+ }
+ Handle<Map> parent_map = map;
+ CHECK(!map->is_deprecated());
+
+ Handle<String> name_x = CcTest::MakeString("x");
+ Handle<String> name_y = CcTest::MakeString("y");
+
+ map = Map::CopyWithField(isolate, parent_map, name_x, any_type, NONE,
+ PropertyConstness::kConst, Representation::Smi(),
+ INSERT_TRANSITION)
+ .ToHandleChecked();
+
+ // Create an object, initialize its properties and add a couple of clones.
+ Handle<JSObject> object1 = isolate->factory()->NewJSObjectFromMap(map);
+ for (int i = 0; i < kPropCount; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, InternalIndex(i));
+ object1->FastPropertyAtPut(index, Smi::FromInt(i));
+ }
+ Handle<JSObject> object2 = isolate->factory()->CopyJSObject(object1);
+
+ CHECK(!map->is_deprecated());
+ CHECK(!parent_map->is_deprecated());
+
+ // Transition to Double must deprecate m1.
+ CHECK(!Representation::Smi().CanBeInPlaceChangedTo(Representation::Double()));
+
+ // Reconfigure one of the first properties to make the whole transition tree
+ // deprecated (including |parent_map| and |map|).
+ Handle<Map> new_map =
+ ReconfigureProperty(isolate, map, InternalIndex(0), PropertyKind::kData,
+ NONE, Representation::Double(), any_type);
+ CHECK(map->is_deprecated());
+ CHECK(parent_map->is_deprecated());
+ CHECK(!new_map->is_deprecated());
+ // The "x" property is still kConst.
+ CHECK_EQ(new_map->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kConst);
+
+ Handle<Map> new_parent_map = Map::Update(isolate, parent_map);
+ CHECK(!new_parent_map->is_deprecated());
+
+ // |new_parent_map| must have exactly one outgoing transition to |new_map|.
+ {
+ TransitionsAccessor ta(isolate, new_parent_map);
+ CHECK_EQ(ta.NumberOfTransitions(), 1);
+ CHECK_EQ(ta.GetTarget(0), *new_map);
+ }
+
+ // Deletion of the property from |object1| must migrate it to |new_parent_map|
+ // which is an up-to-date version of the |parent_map|. The |new_map|'s "x"
+ // property should be marked as mutable.
+ CHECK_EQ(object1->map(isolate), *map);
+ CHECK(Runtime::DeleteObjectProperty(isolate, object1, name_x,
+ LanguageMode::kSloppy)
+ .ToChecked());
+ CHECK_EQ(object1->map(isolate), *new_parent_map);
+ CHECK_EQ(new_map->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kMutable);
+
+ // Now add transitions to "x" and "y" properties from |new_parent_map|.
+ std::vector<Handle<Map>> transitions;
+ Handle<Object> value = handle(Smi::FromInt(0), isolate);
+ for (int i = 0; i < kPropertyAttributesCombinationsCount; i++) {
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+
+ Handle<Map> tmp;
+ // Add some transitions to "x" and "y".
+ tmp = Map::TransitionToDataProperty(isolate, new_parent_map, name_x, value,
+ attributes, PropertyConstness::kConst,
+ StoreOrigin::kNamed);
+ CHECK(!tmp->map(isolate).is_dictionary_map());
+ transitions.push_back(tmp);
+
+ tmp = Map::TransitionToDataProperty(isolate, new_parent_map, name_y, value,
+ attributes, PropertyConstness::kConst,
+ StoreOrigin::kNamed);
+ CHECK(!tmp->map(isolate).is_dictionary_map());
+ transitions.push_back(tmp);
+ }
+
+ // Deletion of the property from |object2| must migrate it to |new_parent_map|
+ // which is an up-to-date version of the |parent_map|.
+ // All outgoing transitions from |new_map| that add "x" must be marked as
+ // mutable, transitions to other properties must remain const.
+ CHECK_EQ(object2->map(isolate), *map);
+ CHECK(Runtime::DeleteObjectProperty(isolate, object2, name_x,
+ LanguageMode::kSloppy)
+ .ToChecked());
+ CHECK_EQ(object2->map(isolate), *new_parent_map);
+ for (Handle<Map> m : transitions) {
+ if (m->GetLastDescriptorName(isolate) == *name_x) {
+ CHECK_EQ(m->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kMutable);
+
+ } else {
+ CHECK_EQ(m->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kConst);
+ }
+ }
+}
+
} // namespace test_field_type_tracking
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 9112dc7a571..fe04cc1085c 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -202,9 +202,11 @@ TEST(FlagsJitlessImplications) {
// fairly primitive and can break easily depending on the implication
// definition order in flag-definitions.h.
CHECK(!FLAG_opt);
+#if V8_ENABLE_WEBASSEMBLY
CHECK(!FLAG_validate_asm);
CHECK(!FLAG_asm_wasm_lazy_compilation);
CHECK(!FLAG_wasm_lazy_compilation);
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 7440ea88230..2331c61bc6c 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -80,7 +80,8 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
// Obtain SharedFunctionInfo for the function.
Handle<SharedFunctionInfo> shared_func_info =
Handle<SharedFunctionInfo>::cast(
- isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos));
+ isolate->debug()->FindInnermostContainingFunctionInfo(i_script,
+ func_pos));
// Verify inferred function name.
std::unique_ptr<char[]> inferred_name =
diff --git a/deps/v8/test/cctest/test-hashcode.cc b/deps/v8/test/cctest/test-hashcode.cc
index cf9d477ff96..c138ba04a28 100644
--- a/deps/v8/test/cctest/test-hashcode.cc
+++ b/deps/v8/test/cctest/test-hashcode.cc
@@ -29,24 +29,24 @@ int AddToSetAndGetHash(Isolate* isolate, Handle<JSObject> obj,
}
int GetPropertyDictionaryHash(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return obj->property_dictionary_ordered().Hash();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return obj->property_dictionary_swiss().Hash();
} else {
return obj->property_dictionary().Hash();
}
}
int GetPropertyDictionaryLength(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return obj->property_dictionary_ordered().length();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return obj->property_dictionary_swiss().Capacity();
} else {
return obj->property_dictionary().length();
}
}
void CheckIsDictionaryModeObject(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- CHECK(obj->raw_properties_or_hash().IsOrderedNameDictionary());
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CHECK(obj->raw_properties_or_hash().IsSwissNameDictionary());
} else {
CHECK(obj->raw_properties_or_hash().IsNameDictionary());
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index ed02cd1e37c..d84e817051d 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -459,12 +459,13 @@ TEST(HeapSnapshotCodeObjects) {
// Verify that non-compiled function doesn't contain references to "x"
// literal, while compiled function does. The scope info is stored in
- // FixedArray objects attached to the SharedFunctionInfo.
+ // ScopeInfo objects attached to the SharedFunctionInfo.
bool compiled_references_x = false, lazy_references_x = false;
for (int i = 0, count = compiled_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kArray) {
+ if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ !strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
compiled_references_x = true;
break;
@@ -474,7 +475,8 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = lazy_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kArray) {
+ if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ !strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
lazy_references_x = true;
break;
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index 13c94f3afcb..529701c2277 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -6,10 +6,13 @@
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
#include "src/handles/handles-inl.h"
-#include "src/wasm/code-space-access.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/code-space-access.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace test_icache {
@@ -170,6 +173,7 @@ CONDITIONAL_TEST(TestFlushICacheOfExecutable) {
#undef CONDITIONAL_TEST
+#if V8_ENABLE_WEBASSEMBLY
// Order of operation for this test case:
// perm(RWX) -> exec -> patch -> flush -> exec
TEST(TestFlushICacheOfWritableAndExecutable) {
@@ -197,6 +201,7 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
#undef __
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
index 4d61e944dc1..d0f5122a323 100644
--- a/deps/v8/test/cctest/test-js-to-wasm.cc
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -68,6 +68,9 @@ struct ExportedFunction {
DECLARE_EXPORTED_FUNCTION(nop, sigs.v_v(), WASM_CODE({WASM_NOP}))
+DECLARE_EXPORTED_FUNCTION(unreachable, sigs.v_v(),
+ WASM_CODE({WASM_UNREACHABLE}))
+
DECLARE_EXPORTED_FUNCTION(i32_square, sigs.i_i(),
WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
kExprI32Mul}))
@@ -457,9 +460,9 @@ class FastJSWasmCallTester {
";"
"function test() {"
" try {"
- " return " +
+ " return %ObserveNode(" +
exported_function_name +
- "(arg);"
+ "(arg));"
" } catch (e) {"
" return 0;"
" }"
@@ -485,13 +488,19 @@ class FastJSWasmCallTester {
// Executes a test function with a try/catch calling a Wasm function returning
// void.
- void CallAndCheckWithTryCatch_void(const std::string& exported_function_name,
- const v8::Local<v8::Value> arg0,
- const v8::Local<v8::Value> arg1) {
+ void CallAndCheckWithTryCatch_void(
+ const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args) {
LocalContext env;
- CHECK((*env)->Global()->Set(env.local(), v8_str("arg0"), arg0).FromJust());
- CHECK((*env)->Global()->Set(env.local(), v8_str("arg1"), arg1).FromJust());
+ for (size_t i = 0; i < args.size(); i++) {
+ CHECK((*env)
+ ->Global()
+ ->Set(env.local(), v8_str(("arg" + std::to_string(i)).c_str()),
+ args[i])
+ .FromJust());
+ }
+ std::string js_args = ArgsToString(args.size());
std::string js_code =
"const importObj = {"
" env: {"
@@ -509,9 +518,9 @@ class FastJSWasmCallTester {
";"
"function test() {"
" try {"
- " " +
- exported_function_name +
- "(arg0, arg1);"
+ " %ObserveNode(" +
+ exported_function_name + "(" + js_args +
+ "));"
" return 1;"
" } catch (e) {"
" return 0;"
@@ -928,6 +937,13 @@ TEST(TestFastJSWasmCall_EagerDeopt) {
// Exception handling tests
+TEST(TestFastJSWasmCall_Unreachable) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_unreachable);
+ tester.CallAndCheckWithTryCatch_void("unreachable", {});
+}
+
TEST(TestFastJSWasmCall_Trap_i32) {
v8::HandleScope scope(CcTest::isolate());
FastJSWasmCallTester tester;
@@ -960,8 +976,8 @@ TEST(TestFastJSWasmCall_Trap_void) {
v8::HandleScope scope(CcTest::isolate());
FastJSWasmCallTester tester;
tester.AddExportedFunction(k_store_i32);
- tester.CallAndCheckWithTryCatch_void("store_i32", v8_int(0x7fffffff),
- v8_int(42));
+ tester.CallAndCheckWithTryCatch_void("store_i32",
+ {v8_int(0x7fffffff), v8_int(42)});
}
// BigInt
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 7838e8ae25d..1291283515d 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -209,6 +209,7 @@ Handle<JSWeakRef> MakeWeakRefAndKeepDuringJob(Isolate* isolate) {
} // namespace
TEST(TestRegister) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -246,6 +247,7 @@ TEST(TestRegister) {
}
TEST(TestRegisterWithKey) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -298,6 +300,7 @@ TEST(TestRegisterWithKey) {
}
TEST(TestWeakCellNullify1) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -332,6 +335,7 @@ TEST(TestWeakCellNullify1) {
}
TEST(TestWeakCellNullify2) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -365,6 +369,7 @@ TEST(TestWeakCellNullify2) {
}
TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -420,6 +425,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
// Test that when all WeakCells for a key are popped, the key is removed from
// the key map.
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -470,6 +476,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
}
TEST(TestUnregisterActiveCells) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -522,6 +529,7 @@ TEST(TestUnregisterActiveCells) {
}
TEST(TestUnregisterActiveAndClearedCells) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -577,6 +585,7 @@ TEST(TestUnregisterActiveAndClearedCells) {
}
TEST(TestWeakCellUnregisterTwice) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -624,6 +633,7 @@ TEST(TestWeakCellUnregisterTwice) {
}
TEST(TestWeakCellUnregisterPopped) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -664,6 +674,7 @@ TEST(TestWeakCellUnregisterPopped) {
}
TEST(TestWeakCellUnregisterNonexistentKey) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -676,6 +687,7 @@ TEST(TestWeakCellUnregisterNonexistentKey) {
}
TEST(TestJSWeakRef) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
@@ -704,6 +716,7 @@ TEST(TestJSWeakRef) {
}
TEST(TestJSWeakRefIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) {
return;
}
@@ -739,6 +752,7 @@ TEST(TestJSWeakRefIncrementalMarking) {
}
TEST(TestJSWeakRefKeepDuringJob) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
@@ -776,6 +790,7 @@ TEST(TestJSWeakRefKeepDuringJob) {
}
TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) {
return;
}
@@ -804,6 +819,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
}
TEST(TestRemoveUnregisterToken) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -867,6 +883,7 @@ TEST(TestRemoveUnregisterToken) {
}
TEST(JSWeakRefScavengedInWorklist) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking || FLAG_single_generation) {
return;
}
@@ -911,6 +928,7 @@ TEST(JSWeakRefScavengedInWorklist) {
}
TEST(JSWeakRefTenuredInWorklist) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking || FLAG_single_generation) {
return;
}
@@ -958,6 +976,7 @@ TEST(JSWeakRefTenuredInWorklist) {
}
TEST(UnregisterTokenHeapVerifier) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
#ifdef VERIFY_HEAP
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index fd1f91a8eb7..d1f88877cd0 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -457,8 +457,10 @@ UNINITIALIZED_TEST(Issue539892) {
void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
const char* name, int length) override {}
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
int length) override {}
+#endif // V8_ENABLE_WEBASSEMBLY
};
SETUP_FLAGS();
@@ -577,8 +579,9 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStack) {
logger.StopLogging();
- CHECK(logger.ContainsLine(
- {"InterpretedFunction", "testLogInterpretedFramesNativeStack"}));
+ CHECK(logger.ContainsLinesInOrder(
+ {{"LazyCompile", "testLogInterpretedFramesNativeStack"},
+ {"LazyCompile", "testLogInterpretedFramesNativeStack"}}));
}
isolate->Dispose();
}
@@ -629,7 +632,11 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStackWithSerialization) {
.ToLocalChecked();
if (has_cache) {
logger.StopLogging();
- CHECK(logger.ContainsLine({"InterpretedFunction", "eyecatcher"}));
+ logger.PrintLog();
+ // Function is logged twice: once as interpreted, and once as the
+ // interpreter entry trampoline builtin.
+ CHECK(logger.ContainsLinesInOrder(
+ {{"Function", "eyecatcher"}, {"Function", "eyecatcher"}}));
}
v8::Local<v8::Value> arg = v8_num(3);
v8::Local<v8::Value> result =
@@ -667,13 +674,16 @@ UNINITIALIZED_TEST(ExternalCodeEventListener) {
"testCodeEventListenerBeforeStart('1', 1);";
CompileRun(source_text_before_start);
+ CHECK_EQ(code_event_handler.CountLines("Function",
+ "testCodeEventListenerBeforeStart"),
+ 0);
CHECK_EQ(code_event_handler.CountLines("LazyCompile",
"testCodeEventListenerBeforeStart"),
0);
code_event_handler.Enable();
- CHECK_GE(code_event_handler.CountLines("LazyCompile",
+ CHECK_GE(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
1);
@@ -715,10 +725,12 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source)
.ToLocalChecked();
- CHECK_EQ(code_event_handler.CountLines("Script", "f1"),
- i::FLAG_stress_background_compile ? 2 : 1);
- CHECK_EQ(code_event_handler.CountLines("Script", "f2"),
- i::FLAG_stress_background_compile ? 2 : 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f1"),
+ 1 + (i::FLAG_stress_background_compile ? 1 : 0) +
+ (i::FLAG_always_sparkplug ? 1 : 0));
+ CHECK_EQ(code_event_handler.CountLines("Function", "f2"),
+ 1 + (i::FLAG_stress_background_compile ? 1 : 0) +
+ (i::FLAG_always_sparkplug ? 1 : 0));
cache = v8::ScriptCompiler::CreateCodeCache(script);
}
isolate1->Dispose();
@@ -743,8 +755,8 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
.ToLocalChecked();
}
- CHECK_EQ(code_event_handler.CountLines("Script", "f1"), 1);
- CHECK_EQ(code_event_handler.CountLines("Script", "f2"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f1"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f2"), 1);
}
isolate2->Dispose();
}
@@ -772,24 +784,24 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
"testCodeEventListenerBeforeStart('1', 1);";
CompileRun(source_text_before_start);
- CHECK_EQ(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_EQ(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
0);
code_event_handler.Enable();
- CHECK_GE(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_GE(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
- 1);
+ 2);
const char* source_text_after_start =
"function testCodeEventListenerAfterStart(a,b) { return a + b };"
"testCodeEventListenerAfterStart('1', 1);";
CompileRun(source_text_after_start);
- CHECK_GE(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_GE(code_event_handler.CountLines("LazyCompile",
"testCodeEventListenerAfterStart"),
- 1);
+ 2);
CHECK_EQ(
code_event_handler.CountLines("Builtin", "InterpreterEntryTrampoline"),
@@ -1192,101 +1204,3 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
}
isolate->Dispose();
}
-
-TEST(BytecodeFlushEvents) {
- SETUP_FLAGS();
-
-#ifndef V8_LITE_MODE
- i::FLAG_opt = false;
- i::FLAG_always_opt = false;
- i::FLAG_optimize_for_size = false;
-#endif // V8_LITE_MODE
- i::FLAG_flush_bytecode = true;
- i::FLAG_allow_natives_syntax = true;
-
- ManualGCScope manual_gc_scope;
-
- v8::Isolate* isolate = CcTest::isolate();
- i::Isolate* i_isolate = CcTest::i_isolate();
- i::Factory* factory = i_isolate->factory();
-
- struct FakeCodeEventLogger : public i::CodeEventLogger {
- explicit FakeCodeEventLogger(i::Isolate* isolate)
- : CodeEventLogger(isolate) {}
-
- void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
- void CodeDisableOptEvent(i::Handle<i::AbstractCode> code,
- i::Handle<i::SharedFunctionInfo> shared) override {
- }
-
- void BytecodeFlushEvent(Address compiled_data_start) override {
- // We only expect a single flush.
- CHECK_EQ(flushed_compiled_data_start, i::kNullAddress);
- flushed_compiled_data_start = compiled_data_start;
- }
-
- void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
- i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
- const char* name, int length) override {}
- void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
- int length) override {}
-
- i::Address flushed_compiled_data_start = i::kNullAddress;
- };
-
- FakeCodeEventLogger code_event_logger(i_isolate);
-
- {
- ScopedLoggerInitializer logger(isolate);
- logger.logger()->AddCodeEventListener(&code_event_logger);
-
- const char* source =
- "function foo() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo()";
- i::Handle<i::String> foo_name = factory->InternalizeUtf8String("foo");
-
- // This compile will add the code to the compilation cache.
- {
- v8::HandleScope scope(isolate);
- CompileRun(source);
- }
-
- // Check function is compiled.
- i::Handle<i::Object> func_value =
- i::Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
- .ToHandleChecked();
- CHECK(func_value->IsJSFunction());
- i::Handle<i::JSFunction> function =
- i::Handle<i::JSFunction>::cast(func_value);
- CHECK(function->shared().is_compiled());
-
- // The code will survive at least two GCs.
- CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage();
- CHECK(function->shared().is_compiled());
- CHECK_EQ(code_event_logger.flushed_compiled_data_start, i::kNullAddress);
-
- // Get the start address of the compiled data before flushing.
- i::HeapObject compiled_data =
- function->shared().GetBytecodeArray(i_isolate);
- i::Address compiled_data_start = compiled_data.address();
-
- // Simulate several GCs that use full marking.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::CollectAllGarbage();
- }
-
- // foo should no longer be in the compilation cache
- CHECK(!function->shared().is_compiled());
- CHECK(!function->is_compiled());
-
- // Verify that foo() was in fact flushed.
- CHECK_EQ(code_event_logger.flushed_compiled_data_start,
- compiled_data_start);
- }
-}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 8f348c4584c..b0df3843760 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -60,10 +60,18 @@ using F0 = int();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ __ pushq(kPointerCageBaseRegister);
+#endif
__ InitializeRootRegister();
}
-static void ExitCode(MacroAssembler* masm) { __ popq(kRootRegister); }
+static void ExitCode(MacroAssembler* masm) {
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ __ popq(kPointerCageBaseRegister);
+#endif
+ __ popq(kRootRegister);
+}
TEST(Smi) {
// Check that C++ Smi operations work as expected.
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 1b4d90628df..33acd05c15a 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -77,6 +77,11 @@ TEST(NoSideEffectsToString) {
"Error: fisk hest");
CheckObject(isolate, factory->NewJSObject(isolate->object_function()),
"#<Object>");
+ CheckObject(
+ isolate,
+ factory->NewJSProxy(factory->NewJSObject(isolate->object_function()),
+ factory->NewJSObject(isolate->object_function())),
+ "#<Object>");
}
TEST(EnumCache) {
@@ -115,14 +120,10 @@ TEST(EnumCache) {
*env->Global()->Get(env.local(), v8_str("cc")).ToLocalChecked()));
// Check the transition tree.
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad),
- b->map().instance_descriptors(kRelaxedLoad));
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad),
- c->map().instance_descriptors(kRelaxedLoad));
- CHECK_NE(c->map().instance_descriptors(kRelaxedLoad),
- cc->map().instance_descriptors(kRelaxedLoad));
- CHECK_NE(b->map().instance_descriptors(kRelaxedLoad),
- cc->map().instance_descriptors(kRelaxedLoad));
+ CHECK_EQ(a->map().instance_descriptors(), b->map().instance_descriptors());
+ CHECK_EQ(b->map().instance_descriptors(), c->map().instance_descriptors());
+ CHECK_NE(c->map().instance_descriptors(), cc->map().instance_descriptors());
+ CHECK_NE(b->map().instance_descriptors(), cc->map().instance_descriptors());
// Check that the EnumLength is unset.
CHECK_EQ(a->map().EnumLength(), kInvalidEnumCacheSentinel);
@@ -131,13 +132,13 @@ TEST(EnumCache) {
CHECK_EQ(cc->map().EnumLength(), kInvalidEnumCacheSentinel);
// Check that the EnumCache is empty.
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
// The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
@@ -149,15 +150,14 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
CHECK_EQ(cc->map().EnumLength(), 3);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- EnumCache enum_cache =
- cc->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = cc->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
CHECK_EQ(enum_cache.keys().length(), 3);
CHECK_EQ(enum_cache.indices().length(), 3);
@@ -174,19 +174,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- EnumCache enum_cache =
- a->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = a->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
CHECK_EQ(enum_cache.keys().length(), 1);
CHECK_EQ(enum_cache.indices().length(), 1);
@@ -195,8 +190,7 @@ TEST(EnumCache) {
// Creating the EnumCache for {c} will create a new EnumCache on the shared
// DescriptorArray.
Handle<EnumCache> previous_enum_cache(
- a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- a->GetIsolate());
+ a->map().instance_descriptors().enum_cache(), a->GetIsolate());
Handle<FixedArray> previous_keys(previous_enum_cache->keys(),
a->GetIsolate());
Handle<FixedArray> previous_indices(previous_enum_cache->indices(),
@@ -208,8 +202,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache =
- c->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -222,25 +215,20 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
// {b} can reuse the existing EnumCache, hence we only need to set the correct
// EnumLength on the map without modifying the cache itself.
previous_enum_cache =
- handle(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- a->GetIsolate());
+ handle(a->map().instance_descriptors().enum_cache(), a->GetIsolate());
previous_keys = handle(previous_enum_cache->keys(), a->GetIsolate());
previous_indices = handle(previous_enum_cache->indices(), a->GetIsolate());
CompileRun("var s = 0; for (let key in b) { s += b[key] };");
@@ -250,8 +238,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache =
- c->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are not updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -262,18 +249,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 19a6d3779f5..1c55995efa0 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1619,7 +1619,6 @@ const char* ReadString(unsigned* start) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyLogicalAssignment,
};
enum ParserSyncTestResult {
@@ -1630,15 +1629,11 @@ enum ParserSyncTestResult {
void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.contains(kAllowNatives);
- i::FLAG_harmony_logical_assignment =
- flags.contains(kAllowHarmonyLogicalAssignment);
}
void SetParserFlags(i::UnoptimizedCompileFlags* compile_flags,
base::EnumSet<ParserFlag> flags) {
compile_flags->set_allow_natives_syntax(flags.contains(kAllowNatives));
- compile_flags->set_allow_harmony_logical_assignment(
- flags.contains(kAllowHarmonyLogicalAssignment));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -4328,6 +4323,7 @@ TEST(MaybeAssignedTopLevel) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
i::Scope* DeserializeFunctionScope(i::Isolate* isolate, i::Zone* zone,
@@ -4370,7 +4366,6 @@ TEST(AsmModuleFlag) {
CHECK(s->IsAsmModule() && s->AsDeclarationScope()->is_asm_module());
}
-
TEST(UseAsmUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
@@ -4383,7 +4378,7 @@ TEST(UseAsmUseCount) {
"function bar() { \"use asm\"; var baz = 1; }");
CHECK_LT(0, use_counts[v8::Isolate::kUseAsm]);
}
-
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(StrictModeUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -12401,9 +12396,7 @@ TEST(LogicalAssignmentDestructuringErrors) {
};
// clang-format on
- static const ParserFlag flags[] = {kAllowHarmonyLogicalAssignment};
- RunParserSyncTest(context_data, error_data, kError, nullptr, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
} // namespace test_parsing
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
index 7256a5876a0..48b72a004fd 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -121,7 +121,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
"csdb", // spec. barrier
"ldur w<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
// branchful decompress
- "add x<<BSt>>, x26, x<<BSt>>", // Add root to ref
+ "add x<<BSt>>, x2[68], x<<BSt>>", // Add root to ref
"and x<<BSt>>, x<<BSt>>, " + kPReg, // apply the poison
"ldur w<<Prop:[0-9]+>>, \\[x<<BSt>>, #[0-9]+\\]", // load the property
"and x<<Prop>>, x<<Prop>>, " + kPReg, // apply the poison
@@ -194,7 +194,7 @@ TEST(DisasmPoisonMonomorphicLoadFloat64) {
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
"ldur w<<F1:[0-9]+>>, \\[<<Obj>>, #11\\]", // load heap number
- "add x<<F1>>, x26, x<<F1>>", // Decompress ref
+ "add x<<F1>>, x2[68], x<<F1>>", // Decompress ref
"and x<<F1>>, x<<F1>>, " + kPReg, // apply the poison
"add <<Addr:x[0-9]+>>, x<<F1>>, #0x[0-9a-f]+", // addr. calculation
"and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7460e9df8fa..ab56d6e7ccf 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -352,10 +352,6 @@ TEST(CodeMapMoveAndDeleteCode) {
code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700)); // Deprecate bbb.
CHECK(!code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1700)));
- CodeEntry* entry3 = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "ccc");
- code_map.AddCode(ToAddress(0x1750), entry3, 0x100);
- CHECK(!code_map.FindEntry(ToAddress(0x1700)));
- CHECK_EQ(entry3, code_map.FindEntry(ToAddress(0x1750)));
}
TEST(CodeMapClear) {
@@ -962,6 +958,63 @@ TEST(NodeSourceTypes) {
CHECK_EQ(unresolved_node->source_type(), v8::CpuProfileNode::kUnresolved);
}
+TEST(CodeMapRemoveCode) {
+ StringsStorage strings;
+ CodeMap code_map(strings);
+
+ CodeEntry* entry = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ code_map.AddCode(ToAddress(0x1000), entry, 0x100);
+ CHECK(code_map.RemoveCode(entry));
+ CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+
+ // Test that when two entries share the same address, we remove only the
+ // entry that we desired to.
+ CodeEntry* colliding_entry1 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry* colliding_entry2 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ code_map.AddCode(ToAddress(0x1000), colliding_entry1, 0x100);
+ code_map.AddCode(ToAddress(0x1000), colliding_entry2, 0x100);
+
+ CHECK(code_map.RemoveCode(colliding_entry1));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1000)), colliding_entry2);
+
+ CHECK(code_map.RemoveCode(colliding_entry2));
+ CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+}
+
+TEST(CodeMapMoveOverlappingCode) {
+ StringsStorage strings;
+ CodeMap code_map(strings);
+ CodeEntry* colliding_entry1 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry* colliding_entry2 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "bbb");
+ CodeEntry* after_entry =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "ccc");
+
+ code_map.AddCode(ToAddress(0x1400), colliding_entry1, 0x200);
+ code_map.AddCode(ToAddress(0x1400), colliding_entry2, 0x200);
+ code_map.AddCode(ToAddress(0x1800), after_entry, 0x200);
+
+ CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1400));
+ CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1400));
+ CHECK_EQ(after_entry->instruction_start(), ToAddress(0x1800));
+
+ CHECK(code_map.FindEntry(ToAddress(0x1400)));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+
+ code_map.MoveCode(ToAddress(0x1400), ToAddress(0x1600));
+
+ CHECK(!code_map.FindEntry(ToAddress(0x1400)));
+ CHECK(code_map.FindEntry(ToAddress(0x1600)));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+
+ CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1600));
+ CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1600));
+ CHECK_EQ(after_entry->instruction_start(), ToAddress(0x1800));
+}
+
} // namespace test_profile_generator
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index cf66f54f4f1..2884dfd136a 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1730,8 +1730,7 @@ TEST(CodeSerializerPromotedToCompilationCache) {
MaybeHandle<SharedFunctionInfo> shared =
isolate->compilation_cache()->LookupScript(
- src, src, 0, 0, v8::ScriptOriginOptions(), isolate->native_context(),
- LanguageMode::kSloppy);
+ src, src, 0, 0, v8::ScriptOriginOptions(), LanguageMode::kSloppy);
CHECK(*shared.ToHandleChecked() == *copy);
@@ -3348,6 +3347,13 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
FreeCurrentEmbeddedBlob();
}
+MaybeLocal<v8::Module> ResolveCallback(Local<v8::Context> context,
+ Local<v8::String> specifier,
+ Local<v8::FixedArray> import_assertions,
+ Local<v8::Module> referrer) {
+ return {};
+}
+
UNINITIALIZED_TEST(SnapshotCreatorAddData) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
@@ -3397,11 +3403,23 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
v8::AccessorSignature::New(isolate,
v8::FunctionTemplate::New(isolate));
+ v8::ScriptOrigin origin(isolate, v8_str(""), {}, {}, {}, {}, {}, {}, {},
+ true);
+ v8::ScriptCompiler::Source source(
+ v8::String::NewFromUtf8Literal(
+ isolate, "export let a = 42; globalThis.a = {};"),
+ origin);
+ v8::Local<v8::Module> module =
+ v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ module->InstantiateModule(context, ResolveCallback).ToChecked();
+ module->Evaluate(context).ToLocalChecked();
+
CHECK_EQ(0u, creator.AddData(context, object));
CHECK_EQ(1u, creator.AddData(context, v8_str("context-dependent")));
CHECK_EQ(2u, creator.AddData(context, persistent_number_1.Get(isolate)));
CHECK_EQ(3u, creator.AddData(context, object_template));
CHECK_EQ(4u, creator.AddData(context, persistent_context.Get(isolate)));
+ CHECK_EQ(5u, creator.AddData(context, module));
creator.AddContext(context);
CHECK_EQ(0u, creator.AddData(v8_str("context-independent")));
@@ -3460,7 +3478,19 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
CHECK_EQ(*v8::Utils::OpenHandle(*serialized_context),
*v8::Utils::OpenHandle(*context));
- CHECK(context->GetDataFromSnapshotOnce<v8::Value>(5).IsEmpty());
+ v8::Local<v8::Module> serialized_module =
+ context->GetDataFromSnapshotOnce<v8::Module>(5).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::Context>(5).IsEmpty());
+ {
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::Object> mod_ns =
+ serialized_module->GetModuleNamespace().As<v8::Object>();
+ CHECK(mod_ns->Get(context, v8_str("a"))
+ .ToLocalChecked()
+ ->StrictEquals(v8_num(42.0)));
+ }
+
+ CHECK(context->GetDataFromSnapshotOnce<v8::Value>(6).IsEmpty());
// Check serialized data on the isolate.
string = isolate->GetDataFromSnapshotOnce<v8::String>(0).ToLocalChecked();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 41aa7072314..735040a4c5d 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -2096,6 +2096,84 @@ TEST(InternalizeExternalStringUncachedWithCopyTwoByte) {
CHECK(internal->IsInternalizedString());
}
+// Show that we cache the data pointer for internal, external and uncached
+// strings with cacheable resources through MakeExternal. One byte version.
+TEST(CheckCachedDataInternalExternalUncachedString) {
+ CcTest::InitializeVM();
+ Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Due to different size restrictions the string needs to be small but not too
+ // small. One of these restrictions is whether pointer compression is enabled.
+#ifdef V8_COMPRESS_POINTERS
+ const char* raw_small = "small string";
+#elif V8_TARGET_ARCH_32_BIT
+ const char* raw_small = "smol";
+#else
+ const char* raw_small = "smalls";
+#endif // V8_COMPRESS_POINTERS
+
+ Handle<String> string =
+ factory->InternalizeString(factory->NewStringFromAsciiChecked(raw_small));
+ OneByteResource* resource =
+ new OneByteResource(i::StrDup(raw_small), strlen(raw_small));
+
+ // Check it is external, internalized, and uncached with a cacheable resource.
+ string->MakeExternal(resource);
+ CHECK(string->IsOneByteRepresentation());
+ CHECK(string->IsExternalString());
+ CHECK(string->IsInternalizedString());
+
+ // Check that the external string is uncached, its resource is cacheable, and
+ // that we indeed cached it.
+ Handle<ExternalOneByteString> external_string =
+ Handle<ExternalOneByteString>::cast(string);
+ CHECK(external_string->is_uncached());
+ CHECK(external_string->resource()->IsCacheable());
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+}
+
+// Show that we cache the data pointer for internal, external and uncached
+// strings with cacheable resources through MakeExternal. One byte version.
+TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) {
+ CcTest::InitializeVM();
+ Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Due to different size restrictions the string needs to be small but not too
+ // small. One of these restrictions is whether pointer compression is enabled.
+#ifdef V8_COMPRESS_POINTERS
+ const char* raw_small = "small string";
+#elif V8_TARGET_ARCH_32_BIT
+ const char* raw_small = "smol";
+#else
+ const char* raw_small = "smalls";
+#endif // V8_COMPRESS_POINTERS
+
+ Handle<String> string =
+ factory->InternalizeString(factory->NewStringFromAsciiChecked(raw_small));
+ Resource* resource =
+ new Resource(AsciiToTwoByteString(raw_small), strlen(raw_small));
+
+ // Check it is external, internalized, and uncached with a cacheable resource.
+ string->MakeExternal(resource);
+ CHECK(string->IsTwoByteRepresentation());
+ CHECK(string->IsExternalString());
+ CHECK(string->IsInternalizedString());
+
+ // Check that the external string is uncached, its resource is cacheable, and
+ // that we indeed cached it.
+ Handle<ExternalTwoByteString> external_string =
+ Handle<ExternalTwoByteString>::cast(string);
+ CHECK(external_string->is_uncached());
+ CHECK(external_string->resource()->IsCacheable());
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+}
+
} // namespace test_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
new file mode 100644
index 00000000000..5c730883e86
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
@@ -0,0 +1,466 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/cpu-features.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+#include "test/cctest/test-swiss-name-dictionary-shared-tests.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+// The non-SIMD SwissNameDictionary implementation requires 64 bit integer
+// operations, which CSA/Torque don't offer on 32 bit platforms. Therefore, we
+// cannot run the CSA version of the tests on 32 bit platforms. The only
+// exception is IA32, where we can use SSE and don't need 64 bit integers.
+// TODO(v8:11330) The Torque SIMD implementation is not specific to SSE (like
+// the C++ one), but works on other platforms. It should be possible to create a
+// workaround where on 32 bit, non-IA32 platforms we use the "portable", non-SSE
+// implementation on the C++ side (which uses a group size of 8) and create a
+// special version of the SIMD Torque implementation that works for group size 8
+// instead of 16.
+#if V8_TARGET_ARCH_64_BIT || V8_TARGET_ARCH_IA32
+
+// Executes tests by executing CSA/Torque versions of dictionary operations.
+// See RuntimeTestRunner for description of public functions.
+class CSATestRunner {
+ public:
+ CSATestRunner(Isolate* isolate, int initial_capacity, KeyCache& keys);
+
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ static bool IsEnabled() {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ CpuFeatures::SupportedFeatures();
+ return CpuFeatures::IsSupported(CpuFeature::AVX) ||
+ CpuFeatures::IsSupported(CpuFeature::SSSE3);
+#else
+ // Other 64-bit architectures always support the required operations.
+ return true;
+#endif
+ }
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details);
+ InternalIndex FindEntry(Handle<Name> key);
+ void Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details);
+ void Delete(InternalIndex entry);
+ void RehashInplace();
+ void Shrink();
+
+ Handle<FixedArray> GetData(InternalIndex entry);
+ void CheckCounts(base::Optional<int> capacity, base::Optional<int> elements,
+ base::Optional<int> deleted);
+ void CheckEnumerationOrder(const std::vector<std::string>& expected_keys);
+ void CheckCopy();
+ void VerifyHeap();
+
+ void PrintTable();
+
+ Handle<SwissNameDictionary> table;
+
+ private:
+ using Label = compiler::CodeAssemblerLabel;
+ template <class T>
+ using TVariable = compiler::TypedCodeAssemblerVariable<T>;
+
+ void CheckAgainstReference();
+
+ void Allocate(Handle<Smi> capacity);
+
+ Isolate* isolate_;
+
+ // Used to mirror all operations using C++ versions of all operations,
+ // yielding a reference to compare against.
+ Handle<SwissNameDictionary> reference_;
+
+ // CSA functions execute the corresponding dictionary operation.
+ compiler::FunctionTester find_entry_ft_;
+ compiler::FunctionTester get_data_ft_;
+ compiler::FunctionTester put_ft_;
+ compiler::FunctionTester delete_ft_;
+ compiler::FunctionTester add_ft_;
+ compiler::FunctionTester allocate_ft_;
+ compiler::FunctionTester get_counts_ft_;
+ compiler::FunctionTester copy_ft_;
+
+ // Used to create the FunctionTesters above.
+ static Handle<Code> create_get_data(Isolate* isolate);
+ static Handle<Code> create_find_entry(Isolate* isolate);
+ static Handle<Code> create_put(Isolate* isolate);
+ static Handle<Code> create_delete(Isolate* isolate);
+ static Handle<Code> create_add(Isolate* isolate);
+ static Handle<Code> create_allocate(Isolate* isolate);
+ static Handle<Code> create_get_counts(Isolate* isolate);
+ static Handle<Code> create_copy(Isolate* isolate);
+
+ // Number of parameters of each of the tester functions above.
+ static constexpr int kFindEntryParams = 2; // (table, key)
+ static constexpr int kGetDataParams = 2; // (table, entry)
+ static constexpr int kPutParams = 4; // (table, entry, value, details)
+ static constexpr int kDeleteParams = 2; // (table, entry)
+ static constexpr int kAddParams = 4; // (table, key, value, details)
+ static constexpr int kAllocateParams = 1; // (capacity)
+ static constexpr int kGetCountsParams = 1; // (table)
+ static constexpr int kCopyParams = 1; // (table)
+};
+
+CSATestRunner::CSATestRunner(Isolate* isolate, int initial_capacity,
+ KeyCache& keys)
+ : isolate_{isolate},
+ reference_{isolate_->factory()->NewSwissNameDictionaryWithCapacity(
+ initial_capacity, AllocationType::kYoung)},
+ find_entry_ft_(create_find_entry(isolate), kFindEntryParams),
+ get_data_ft_(create_get_data(isolate), kGetDataParams),
+ put_ft_{create_put(isolate), kPutParams},
+ delete_ft_{create_delete(isolate), kDeleteParams},
+ add_ft_{create_add(isolate), kAddParams},
+ allocate_ft_{create_allocate(isolate), kAllocateParams},
+ get_counts_ft_{create_get_counts(isolate), kGetCountsParams},
+ copy_ft_{create_copy(isolate), kCopyParams} {
+ Allocate(handle(Smi::FromInt(initial_capacity), isolate));
+}
+
+void CSATestRunner::Add(Handle<Name> key, Handle<Object> value,
+ PropertyDetails details) {
+ ReadOnlyRoots roots(isolate_);
+ reference_ =
+ SwissNameDictionary::Add(isolate_, reference_, key, value, details);
+
+ Handle<Smi> details_smi = handle(details.AsSmi(), isolate_);
+ Handle<Oddball> success =
+ add_ft_.CallChecked<Oddball>(table, key, value, details_smi);
+
+ if (*success == roots.false_value()) {
+ // |add_ft_| does not resize and indicates the need to do so by returning
+ // false.
+ int capacity = table->Capacity();
+ int used_capacity = table->UsedCapacity();
+ CHECK_GT(used_capacity + 1,
+ SwissNameDictionary::MaxUsableCapacity(capacity));
+
+ table = SwissNameDictionary::Add(isolate_, table, key, value, details);
+ }
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::Allocate(Handle<Smi> capacity) {
+ // We must handle |capacity| == 0 specially, because
+ // AllocateSwissNameDictionary (just like AllocateNameDictionary) always
+ // returns a non-zero sized table.
+ if (capacity->value() == 0) {
+ table = ReadOnlyRoots(isolate_).empty_swiss_property_dictionary_handle();
+ } else {
+ table = allocate_ft_.CallChecked<SwissNameDictionary>(capacity);
+ }
+
+ CheckAgainstReference();
+}
+
+InternalIndex CSATestRunner::FindEntry(Handle<Name> key) {
+ Handle<Smi> index = find_entry_ft_.CallChecked<Smi>(table, key);
+ if (index->value() == SwissNameDictionary::kNotFoundSentinel) {
+ return InternalIndex::NotFound();
+ } else {
+ return InternalIndex(index->value());
+ }
+}
+
+Handle<FixedArray> CSATestRunner::GetData(InternalIndex entry) {
+ DCHECK(entry.is_found());
+
+ return get_data_ft_.CallChecked<FixedArray>(
+ table, handle(Smi::FromInt(entry.as_int()), isolate_));
+}
+
+void CSATestRunner::CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements,
+ base::Optional<int> deleted) {
+ Handle<FixedArray> counts = get_counts_ft_.CallChecked<FixedArray>(table);
+
+ if (capacity.has_value()) {
+ CHECK_EQ(Smi::FromInt(capacity.value()), counts->get(0));
+ }
+
+ if (elements.has_value()) {
+ CHECK_EQ(Smi::FromInt(elements.value()), counts->get(1));
+ }
+
+ if (deleted.has_value()) {
+ CHECK_EQ(Smi::FromInt(deleted.value()), counts->get(2));
+ }
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::CheckEnumerationOrder(
+ const std::vector<std::string>& expected_keys) {
+ // Not implemented in CSA. Making this a no-op (rather than forbidding
+ // executing CSA tests with this operation) because CheckEnumerationOrder is
+ // also used by some tests whose main goal is not to test the enumeration
+ // order.
+}
+
+void CSATestRunner::Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ DCHECK(entry.is_found());
+ reference_->ValueAtPut(entry, *new_value);
+ reference_->DetailsAtPut(entry, new_details);
+
+ Handle<Smi> entry_smi = handle(Smi::FromInt(entry.as_int()), isolate_);
+ Handle<Smi> details_smi = handle(new_details.AsSmi(), isolate_);
+
+ put_ft_.Call(table, entry_smi, new_value, details_smi);
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::Delete(InternalIndex entry) {
+ DCHECK(entry.is_found());
+ reference_ = SwissNameDictionary::DeleteEntry(isolate_, reference_, entry);
+
+ Handle<Smi> entry_smi = handle(Smi::FromInt(entry.as_int()), isolate_);
+ table = delete_ft_.CallChecked<SwissNameDictionary>(table, entry_smi);
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::RehashInplace() {
+ // There's no CSA version of this. Use IsRuntimeTest to ensure that we only
+ // run a test using this if it's a runtime test.
+ UNREACHABLE();
+}
+
+void CSATestRunner::Shrink() {
+ // There's no CSA version of this. Use IsRuntimeTest to ensure that we only
+ // run a test using this if it's a runtime test.
+ UNREACHABLE();
+}
+
+void CSATestRunner::CheckCopy() {
+ Handle<SwissNameDictionary> copy =
+ copy_ft_.CallChecked<SwissNameDictionary>(table);
+ CHECK(table->EqualsForTesting(*copy));
+}
+
+void CSATestRunner::VerifyHeap() {
+#if VERIFY_HEAP
+ table->SwissNameDictionaryVerify(isolate_, true);
+#endif
+}
+
+void CSATestRunner::PrintTable() {
+#ifdef OBJECT_PRINT
+ table->SwissNameDictionaryPrint(std::cout);
+#endif
+}
+
+Handle<Code> CSATestRunner::create_find_entry(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kFindEntryParams == 2); // (table, key)
+ compiler::CodeAssemblerTester asm_tester(isolate, kFindEntryParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Name> key = m.Parameter<Name>(2);
+
+ Label done(&m);
+ TVariable<IntPtrT> entry_var(
+ m.IntPtrConstant(SwissNameDictionary::kNotFoundSentinel), &m);
+
+ // |entry_var| defaults to |kNotFoundSentinel| meaning that one label
+ // suffices.
+ m.SwissNameDictionaryFindEntry(table, key, &done, &entry_var, &done);
+
+ m.Bind(&done);
+ m.Return(m.SmiFromIntPtr(entry_var.value()));
+ }
+
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_get_data(Isolate* isolate) {
+ STATIC_ASSERT(kGetDataParams == 2); // (table, entry)
+ compiler::CodeAssemblerTester asm_tester(isolate, kGetDataParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<IntPtrT> entry = m.SmiToIntPtr(m.Parameter<Smi>(2));
+
+ TNode<FixedArray> data = m.AllocateZeroedFixedArray(m.IntPtrConstant(3));
+
+ TNode<Object> key = m.LoadSwissNameDictionaryKey(table, entry);
+ TNode<Object> value = m.LoadValueByKeyIndex(table, entry);
+ TNode<Smi> details = m.SmiFromUint32(m.LoadDetailsByKeyIndex(table, entry));
+
+ m.StoreFixedArrayElement(data, 0, key);
+ m.StoreFixedArrayElement(data, 1, value);
+ m.StoreFixedArrayElement(data, 2, details);
+
+ m.Return(data);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_put(Isolate* isolate) {
+ STATIC_ASSERT(kPutParams == 4); // (table, entry, value, details)
+ compiler::CodeAssemblerTester asm_tester(isolate, kPutParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Smi> entry = m.Parameter<Smi>(2);
+ TNode<Object> value = m.Parameter<Object>(3);
+ TNode<Smi> details = m.Parameter<Smi>(4);
+
+ TNode<IntPtrT> entry_intptr = m.SmiToIntPtr(entry);
+
+ m.StoreValueByKeyIndex(table, entry_intptr, value,
+ WriteBarrierMode::UPDATE_WRITE_BARRIER);
+ m.StoreDetailsByKeyIndex(table, entry_intptr, details);
+
+ m.Return(m.UndefinedConstant());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_delete(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kDeleteParams == 2); // (table, entry)
+ compiler::CodeAssemblerTester asm_tester(isolate, kDeleteParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<IntPtrT> entry = m.SmiToIntPtr(m.Parameter<Smi>(2));
+
+ TVariable<SwissNameDictionary> shrunk_table_var(table, &m);
+ Label done(&m);
+
+ m.SwissNameDictionaryDelete(table, entry, &done, &shrunk_table_var);
+ m.Goto(&done);
+
+ m.Bind(&done);
+ m.Return(shrunk_table_var.value());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_add(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kAddParams == 4); // (table, key, value, details)
+ compiler::CodeAssemblerTester asm_tester(isolate, kAddParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Name> key = m.Parameter<Name>(2);
+ TNode<Object> value = m.Parameter<Object>(3);
+ TNode<Smi> details = m.Parameter<Smi>(4);
+
+ Label needs_resize(&m);
+
+ TNode<Int32T> d32 = m.SmiToInt32(details);
+ TNode<Uint8T> d = m.UncheckedCast<Uint8T>(d32);
+
+ m.SwissNameDictionaryAdd(table, key, value, d, &needs_resize);
+ m.Return(m.TrueConstant());
+
+ m.Bind(&needs_resize);
+ m.Return(m.FalseConstant());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_allocate(Isolate* isolate) {
+ STATIC_ASSERT(kAllocateParams == 1); // (capacity)
+ compiler::CodeAssemblerTester asm_tester(isolate, kAllocateParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<IntPtrT> capacity = m.SmiToIntPtr(m.Parameter<Smi>(1));
+
+ TNode<SwissNameDictionary> table =
+ m.AllocateSwissNameDictionaryWithCapacity(capacity);
+
+ m.Return(table);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_get_counts(Isolate* isolate) {
+ STATIC_ASSERT(kGetCountsParams == 1); // (table)
+ compiler::CodeAssemblerTester asm_tester(isolate, kGetCountsParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+
+ TNode<IntPtrT> capacity =
+ m.ChangeInt32ToIntPtr(m.LoadSwissNameDictionaryCapacity(table));
+ TNode<IntPtrT> elements =
+ m.LoadSwissNameDictionaryNumberOfElements(table, capacity);
+ TNode<IntPtrT> deleted =
+ m.LoadSwissNameDictionaryNumberOfDeletedElements(table, capacity);
+
+ TNode<FixedArray> results = m.AllocateZeroedFixedArray(m.IntPtrConstant(3));
+
+ auto check_and_add = [&](TNode<IntPtrT> value, int array_index) {
+ CSA_ASSERT(&m, m.UintPtrGreaterThanOrEqual(value, m.IntPtrConstant(0)));
+ CSA_ASSERT(&m, m.UintPtrLessThanOrEqual(
+ value, m.IntPtrConstant(Smi::kMaxValue)));
+ TNode<Smi> smi = m.SmiFromIntPtr(value);
+ m.StoreFixedArrayElement(results, array_index, smi);
+ };
+
+ check_and_add(capacity, 0);
+ check_and_add(elements, 1);
+ check_and_add(deleted, 2);
+
+ m.Return(results);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_copy(Isolate* isolate) {
+ STATIC_ASSERT(kCopyParams == 1); // (table)
+ compiler::CodeAssemblerTester asm_tester(isolate, kCopyParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+
+ m.Return(m.CopySwissNameDictionary(table));
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+void CSATestRunner::CheckAgainstReference() {
+ CHECK(table->EqualsForTesting(*reference_));
+}
+
+// Executes the tests defined in test-swiss-name-dictionary-shared-tests.h as if
+// they were defined in this file, using the CSATestRunner. See comments in
+// test-swiss-name-dictionary-shared-tests.h and in
+// swiss-name-dictionary-infra.h for details.
+const char kCSATestFileName[] = __FILE__;
+SharedSwissTableTests<CSATestRunner, kCSATestFileName> execute_shared_tests_csa;
+
+#endif
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
new file mode 100644
index 00000000000..539d71c823f
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
@@ -0,0 +1,139 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+namespace {
+std::vector<PropertyDetails> MakeDistinctDetails() {
+ std::vector<PropertyDetails> result(32, PropertyDetails::Empty());
+
+ int i = 0;
+ for (PropertyKind kind : {PropertyKind::kAccessor, PropertyKind::kAccessor}) {
+ for (PropertyConstness constness :
+ {PropertyConstness::kConst, PropertyConstness::kMutable}) {
+ for (bool writeable : {true, false}) {
+ for (bool enumerable : {true, false}) {
+ for (bool configurable : {true, false}) {
+ uint8_t attrs = static_cast<uint8_t>(PropertyAttributes::NONE);
+ if (!writeable) attrs |= PropertyAttributes::READ_ONLY;
+ if (!enumerable) {
+ attrs |= PropertyAttributes::DONT_ENUM;
+ }
+ if (!configurable) {
+ attrs |= PropertyAttributes::DONT_DELETE;
+ }
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(attrs);
+ PropertyDetails details(kind, attributes,
+ PropertyCellType::kNoCell);
+ details = details.CopyWithConstness(constness);
+ result[i++] = details;
+ }
+ }
+ }
+ }
+ }
+ return result;
+}
+
+} // namespace
+
+// To enable more specific testing, we allow overriding the H1 and H2 hashes for
+// a key before adding it to the SwissNameDictionary. The necessary overriding
+// of the stored hash happens here. Symbols are compared by identity, we cache
+// the Symbol associcated with each std::string key. This means that using
+// "my_key" twice in the same TestSequence will return the same Symbol
+// associcated with "my_key" both times. This also means that within a given
+// TestSequence, we cannot use the same (std::string) key with different faked
+// hashes.
+Handle<Name> CreateKeyWithHash(Isolate* isolate, KeyCache& keys,
+ const Key& key) {
+ Handle<Symbol> key_symbol;
+ auto iter = keys.find(key.str);
+
+ if (iter == keys.end()) {
+ // We haven't seen the the given string as a key in the current
+ // TestSequence. Create it, fake its hash if requested and cache it.
+
+ key_symbol = isolate->factory()->NewSymbol();
+
+ // We use the description field to store the original string key for
+ // debugging.
+ Handle<String> description =
+ isolate->factory()->NewStringFromAsciiChecked(key.str.c_str());
+ key_symbol->set_description(*description);
+
+ CachedKey new_info = {key_symbol, key.h1_override, key.h2_override};
+ keys[key.str] = new_info;
+
+ if (key.h1_override || key.h2_override) {
+ uint32_t actual_hash = key_symbol->hash();
+ int fake_hash = actual_hash;
+ if (key.h1_override) {
+ uint32_t override_with = key.h1_override.value().value;
+
+ // We cannot override h1 with 0 unless we also override h2 with a
+ // non-zero value. Otherwise, the overall hash may become 0 (which is
+ // forbidden) based on the (nondeterminstic) choice of h2.
+ CHECK_IMPLIES(override_with == 0,
+ key.h2_override && key.h2_override.value().value != 0);
+
+ fake_hash = (override_with << swiss_table::kH2Bits) |
+ swiss_table::H2(actual_hash);
+ }
+ if (key.h2_override) {
+ // Unset 7 bits belonging to H2:
+ fake_hash &= ~((1 << swiss_table::kH2Bits) - 1);
+
+ uint8_t override_with = key.h2_override.value().value;
+
+ // Same as above, but for h2: Prevent accidentally creating 0 fake hash.
+ CHECK_IMPLIES(override_with == 0,
+ key.h1_override && key.h1_override.value().value != 0);
+
+ CHECK_LT(key.h2_override.value().value, 1 << swiss_table::kH2Bits);
+ fake_hash |= swiss_table::H2(override_with);
+ }
+
+ // Ensure that just doing a shift below is correct.
+ static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::kHashNotComputedMask == 1,
+ "This test needs updating");
+ static_assert(Name::kIsNotIntegerIndexMask == 2,
+ "This test needs updating");
+
+ // Prepare what to put into the hash field.
+ uint32_t hash_field = fake_hash << Name::kHashShift;
+ CHECK_NE(hash_field, 0);
+
+ key_symbol->set_raw_hash_field(hash_field);
+ CHECK_EQ(fake_hash, key_symbol->hash());
+ }
+
+ return key_symbol;
+ } else {
+ // We've seen this key before. Return the cached version.
+ CachedKey& cached_info = iter->second;
+
+ // Internal consistency check: Make sure that we didn't request something
+ // else w.r.t. hash faking when using this key before. If so, the test case
+ // would make inconsistent assumptions about how the hashes should be faked
+ // and be broken.
+ CHECK_EQ(cached_info.h1_override, key.h1_override);
+ CHECK_EQ(cached_info.h2_override, key.h2_override);
+
+ return cached_info.key_symbol;
+ }
+}
+
+const std::vector<PropertyDetails> distinct_property_details =
+ MakeDistinctDetails();
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h
new file mode 100644
index 00000000000..60ac78477a4
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h
@@ -0,0 +1,321 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
+#define V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
+
+#include <memory>
+#include <utility>
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+using Value = std::string;
+using ValueOpt = base::Optional<Value>;
+using PropertyDetailsOpt = base::Optional<PropertyDetails>;
+using IndexOpt = base::Optional<InternalIndex>;
+
+static const ValueOpt kNoValue;
+static const PropertyDetailsOpt kNoDetails;
+static const base::Optional<int> kNoInt;
+static const IndexOpt kIndexUnknown;
+
+static const std::vector<int> interesting_initial_capacities = {
+ 4,
+ 8,
+ 16,
+ 128,
+ 1 << (sizeof(uint16_t) * 8),
+ 1 << (sizeof(uint16_t) * 8 + 1)};
+
+// Capacities for tests that may timeout on larger capacities when
+// sanitizers/CFI are enabled.
+// TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run by
+// the test suite, which will speed things up.
+#if defined(THREAD_SANITIZER) || defined(V8_ENABLE_CONTROL_FLOW_INTEGRITY)
+static const std::vector<int> capacities_for_slow_sanitizer_tests = {4, 8, 16,
+ 128, 1024};
+#else
+static const std::vector<int> capacities_for_slow_sanitizer_tests =
+ interesting_initial_capacities;
+#endif
+
+// Capacities for tests that are generally slow, so that they don't use the
+// maximum capacities in debug mode.
+// TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run by
+// the test suite, which will speed things up.
+#if DEBUG
+static const std::vector<int> capacities_for_slow_debug_tests = {4, 8, 16, 128,
+ 1024};
+#else
+static const std::vector<int> capacities_for_slow_debug_tests =
+ interesting_initial_capacities;
+#endif
+
+extern const std::vector<PropertyDetails> distinct_property_details;
+
+// Wrapping this in a struct makes the tests a bit more readable.
+struct FakeH1 {
+ uint32_t value;
+
+ explicit FakeH1(int value) : value{static_cast<uint32_t>(value)} {}
+
+ bool operator==(const FakeH1& other) const { return value == other.value; }
+};
+
+// Wrapping this in a struct makes the tests a bit more readable.
+struct FakeH2 {
+ uint8_t value;
+
+ bool operator==(const FakeH2& other) const { return value == other.value; }
+};
+
+using FakeH1Opt = base::Optional<FakeH1>;
+using FakeH2Opt = base::Optional<FakeH2>;
+
+// Representation of keys used when writing test cases.
+struct Key {
+ std::string str;
+
+ // If present, contains the value we faked the key's H1 hash with.
+ FakeH1Opt h1_override = FakeH1Opt();
+
+ // If present, contains the value we faked the key's H2 hash with.
+ FakeH2Opt h2_override = FakeH2Opt();
+};
+
+// Internal representation of keys. See |create_key_with_hash| for details.
+struct CachedKey {
+ Handle<Symbol> key_symbol;
+
+ // If present, contains the value we faked the key's H1 hash with.
+ FakeH1Opt h1_override;
+
+ // If present, contains the value we faked the key's H2 hash with.
+ FakeH2Opt h2_override;
+};
+
+using KeyCache = std::unordered_map<std::string, CachedKey>;
+
+Handle<Name> CreateKeyWithHash(Isolate* isolate, KeyCache& keys,
+ const Key& key);
+
+class RuntimeTestRunner;
+class CSATestRunner;
+
+// Abstraction over executing a sequence of operations on a single hash table.
+// Actually performing those operations is done by the TestRunner.
+template <typename TestRunner>
+class TestSequence {
+ public:
+ explicit TestSequence(Isolate* isolate, int initial_capacity)
+ : isolate{isolate},
+ initial_capacity{initial_capacity},
+ keys_{},
+ runner_{isolate, initial_capacity, keys_} {}
+
+ // Determines whether or not to run VerifyHeap after each operation. Can make
+ // debugging easier.
+ static constexpr bool kVerifyAfterEachStep = false;
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+ runner_.Add(key, value, details);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void Add(const Key& key, ValueOpt value = kNoValue,
+ PropertyDetailsOpt details = kNoDetails) {
+ if (!value) {
+ value = "dummy_value";
+ }
+
+ if (!details) {
+ details = PropertyDetails::Empty();
+ }
+
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, key);
+ Handle<Object> value_handle = isolate->factory()->NewStringFromAsciiChecked(
+ value.value().c_str(), AllocationType::kYoung);
+
+ Add(key_handle, value_handle, details.value());
+ }
+
+ void UpdateByKey(Handle<Name> key, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ InternalIndex entry = runner_.FindEntry(key);
+ CHECK(entry.is_found());
+ runner_.Put(entry, new_value, new_details);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void UpdateByKey(const Key& existing_key, Value new_value,
+ PropertyDetails new_details) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, existing_key);
+ Handle<Object> value_handle = isolate->factory()->NewStringFromAsciiChecked(
+ new_value.c_str(), AllocationType::kYoung);
+
+ UpdateByKey(key_handle, value_handle, new_details);
+ }
+
+ void DeleteByKey(Handle<Name> key) {
+ InternalIndex entry = runner_.FindEntry(key);
+ CHECK(entry.is_found());
+ runner_.Delete(entry);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void DeleteByKey(const Key& existing_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, existing_key);
+
+ DeleteByKey(key_handle);
+ }
+
+ void CheckDataAtKey(Handle<Name> key, IndexOpt expected_index_opt,
+ base::Optional<Handle<Object>> expected_value_opt,
+ PropertyDetailsOpt expected_details_opt) {
+ InternalIndex actual_index = runner_.FindEntry(key);
+
+ if (expected_index_opt) {
+ CHECK_EQ(expected_index_opt.value(), actual_index);
+ }
+
+ if (actual_index.is_found()) {
+ Handle<FixedArray> data = runner_.GetData(actual_index);
+ CHECK_EQ(*key, data->get(0));
+
+ if (expected_value_opt) {
+ CHECK(expected_value_opt.value()->StrictEquals(data->get(1)));
+ }
+
+ if (expected_details_opt) {
+ CHECK_EQ(expected_details_opt.value().AsSmi(), data->get(2));
+ }
+ }
+ }
+
+ void CheckDataAtKey(const Key& expected_key, IndexOpt expected_index,
+ ValueOpt expected_value = kNoValue,
+ PropertyDetailsOpt expected_details = kNoDetails) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+ base::Optional<Handle<Object>> value_handle_opt;
+ if (expected_value) {
+ value_handle_opt = isolate->factory()->NewStringFromAsciiChecked(
+ expected_value.value().c_str(), AllocationType::kYoung);
+ }
+
+ CheckDataAtKey(key_handle, expected_index, value_handle_opt,
+ expected_details);
+ }
+
+ void CheckKeyAbsent(Handle<Name> key) {
+ CHECK(runner_.FindEntry(key).is_not_found());
+ }
+
+ void CheckKeyAbsent(const Key& expected_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+ CheckKeyAbsent(key_handle);
+ }
+
+ void CheckHasKey(const Key& expected_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+
+ CHECK(runner_.FindEntry(key_handle).is_found());
+ }
+
+ void CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements = base::Optional<int>(),
+ base::Optional<int> deleted = base::Optional<int>()) {
+ runner_.CheckCounts(capacity, elements, deleted);
+ }
+
+ void CheckEnumerationOrder(const std::vector<std::string>& keys) {
+ runner_.CheckEnumerationOrder(keys);
+ }
+
+ void RehashInplace() { runner_.RehashInplace(); }
+
+ void Shrink() { runner_.Shrink(); }
+
+ void CheckCopy() { runner_.CheckCopy(); }
+
+ static constexpr bool IsRuntimeTest() {
+ return std::is_same<TestRunner, RuntimeTestRunner>::value;
+ }
+
+ void VerifyHeap() { runner_.VerifyHeap(); }
+
+ // Just for debugging
+ void Print() { runner_.PrintTable(); }
+
+ static std::vector<int> boundary_indices(int capacity) {
+ if (capacity == 4 && SwissNameDictionary::MaxUsableCapacity(4) < 4) {
+ // If we cannot put 4 entries in a capacity 4 table without resizing, just
+ // work with 3 boundary indices.
+ return {0, capacity - 2, capacity - 1};
+ }
+ return {0, 1, capacity - 2, capacity - 1};
+ }
+
+ // Contains all possible PropertyDetails suitable for storing in a
+ // SwissNameDictionary (i.e., PropertyDetails for dictionary mode objects
+ // without storing an enumeration index). Used to ensure that we can correctly
+ // store an retrieve all possible such PropertyDetails.
+ static const std::vector<PropertyDetails> distinct_property_details;
+
+ static void WithAllInterestingInitialCapacities(
+ std::function<void(TestSequence&)> manipulate_sequence) {
+ WithInitialCapacities(interesting_initial_capacities, manipulate_sequence);
+ }
+
+ static void WithInitialCapacity(
+ int capacity, std::function<void(TestSequence&)> manipulate_sequence) {
+ WithInitialCapacities({capacity}, manipulate_sequence);
+ }
+
+ // For each capacity in |capacities|, create a TestSequence and run the given
+ // function on it.
+ static void WithInitialCapacities(
+ const std::vector<int>& capacities,
+ std::function<void(TestSequence&)> manipulate_sequence) {
+ for (int capacity : capacities) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope{isolate};
+ TestSequence<TestRunner> s(isolate, capacity);
+ manipulate_sequence(s);
+ }
+ }
+
+ Isolate* const isolate;
+ const int initial_capacity;
+
+ private:
+ // Caches keys used in this TestSequence. See |create_key_with_hash| for
+ // details.
+ KeyCache keys_;
+ TestRunner runner_;
+};
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h b/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h
new file mode 100644
index 00000000000..96ad222b627
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h
@@ -0,0 +1,942 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
+#define V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
+
+#include <algorithm>
+#include <string>
+
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+// The name of the test-*.cc file that executes the tests below with the
+// RuntimeTestRunner.
+extern const char kRuntimeTestFileName[];
+
+// The name of the test-*.cc file that executes the tests below with the
+// CSATestRunner.
+extern const char kCSATestFileName[];
+
+// This class contains test cases for SwissNameDictionary that can be executed
+// by different "test runners", which are supplied as a template parameter. The
+// TestRunner determines how the operations on dictionaries are actually
+// executed. Currently there are two TestRunners: RuntimeTestRunner calls C++
+// functions, whereas CSATestRunner executes dictionary operations by executing
+// CSA-generated code.
+// To execute the tests, just create an instance of the class below with an
+// appropriate TestRunner.
+// Whenever creating an instance of this class in a file bar.cc, the template
+// parameter |kTestFileName| should be set to the name of the file that
+// *instantiates the class* (i.e., "bar.cc"). This ensures that the tests
+// defined below are then registred within the overall cctest machinery as if
+// they were directly written within bar.cc.
+template <typename TestRunner, char const* kTestFileName>
+struct SharedSwissTableTests {
+ STATIC_ASSERT((std::is_same<TestRunner, RuntimeTestRunner>::value) ||
+ (std::is_same<TestRunner, CSATestRunner>::value));
+
+ SharedSwissTableTests() {
+ CHECK(kTestFileName == kRuntimeTestFileName ||
+ kTestFileName == kCSATestFileName);
+ }
+
+ using TS = TestSequence<TestRunner>;
+
+ //
+ // Helpers
+ //
+
+ // We add this value when we want to create fake H1 values to prevent us from
+ // accidentally creating an overall hash of 0, which is forbidden. Due to all
+ // H1 values are used modulo the capacity of the table, this has no further
+ // effects. Note that using just this value itself as an H1 value means that a
+ // key will (try to) occupy bucket 0.
+ static const int kBigModulus = (1 << 22);
+ STATIC_ASSERT(SwissNameDictionary::IsValidCapacity(kBigModulus));
+
+ // Returns elements from TS::distinct_property_details in a determinstic
+ // order. Subsequent calls with increasing |index| (and the same |offset|)
+ // will return pairwise different values until |index| has risen by more than
+ // {TS::distinct_property_details.size()}.
+ static PropertyDetails distinct_details(int index, int offset = 0) {
+ int size = static_cast<int>(distinct_property_details.size());
+ return distinct_property_details[(index + offset) % size];
+ }
+
+ // Adds elements at the boundaries of the table, e.g. to buckets 0, 1,
+ // Capacity() - 2, and Capacity() - 1. (But only three of those if the table
+ // can't hold 4 elements without resizing).
+ static void AddAtBoundaries(TS& s) {
+ int capacity = s.initial_capacity;
+ std::vector<int> interesting_indices = s.boundary_indices(capacity);
+
+ s.CheckCounts(capacity, 0, 0);
+
+ int count = 0;
+ for (int index : interesting_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++);
+ s.Add(Key{key, FakeH1{index + kBigModulus}}, value, details);
+ }
+
+ // We didn't want to cause a resize:
+ s.CheckCounts(capacity);
+ }
+
+ // Adds |count| entries to the table, using their unmodified hashes, of the
+ // form key_i -> (value_i, details_i), where key_i and value_i are build from
+ // appending the actual index (e.g., 0, ...., counts - 1) to |key_prefix| and
+ // |value_prefix|, respectively. The property details are taken from
+ // |distinct_property_details|.
+ static void AddMultiple(TS& s, int count, std::string key_prefix = "key",
+ std::string value_prefix = "value",
+ int details_offset = 0) {
+ for (int i = 0; i < count; ++i) {
+ std::string key = key_prefix + std::to_string(i);
+ std::string value = value_prefix + std::to_string(i);
+ PropertyDetails d = distinct_details(i);
+ s.Add(Key{key}, value, d);
+ }
+ }
+
+ // Checks that |count| entries exist, as they would have been added by a call
+ // to AddMultiple with the same arguments.
+ static void CheckMultiple(TS& s, int count, std::string key_prefix = "key",
+ std::string value_prefix = "value",
+ int details_offset = 0) {
+ DCHECK_LE(count,
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity));
+
+ std::vector<std::string> expected_keys;
+ for (int i = 0; i < count; ++i) {
+ std::string key = key_prefix + std::to_string(i);
+ expected_keys.push_back(key);
+ std::string value = value_prefix + std::to_string(i);
+ int details_index =
+ (details_offset + i) % distinct_property_details.size();
+ PropertyDetails d = distinct_property_details[details_index];
+ s.CheckDataAtKey(Key{key}, kIndexUnknown, value, d);
+ }
+ s.CheckEnumerationOrder(expected_keys);
+ }
+
+ //
+ // Start of actual tests.
+ //
+
+ MEMBER_TEST(Allocation) {
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ // The test runner does the allocation automatically.
+ s.CheckCounts(s.initial_capacity, 0, 0);
+ s.VerifyHeap();
+ });
+ }
+
+ // Simple test for adding entries. Also uses non-Symbol keys and non-String
+ // values, which is not supported by the higher-level testing infrastructure.
+ MEMBER_TEST(SimpleAdd) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.CheckCounts(4, 0, 0);
+ s.CheckKeyAbsent(key1);
+
+ s.Add(key1, value1, details1);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1, details1);
+ s.CheckCounts(4, 1, 0);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.CheckKeyAbsent(key2);
+ s.Add(key2, value2, details2);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+ s.CheckCounts(4, 2, 0);
+ });
+ }
+
+ // Simple test for updating existing entries. Also uses non-Symbol keys and
+ // non-String values, which is not supported by the higher-level testing
+ // infrastructure.
+ MEMBER_TEST(SimpleUpdate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key1, value1, details1);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key2, value2, details2);
+
+ // Until here same operations as in Test "Add".
+
+ Handle<Smi> value1_updated = handle(Smi::FromInt(456), s.isolate);
+ Handle<String> value2_updated =
+ s.isolate->factory()->InternalizeUtf8String("updated");
+ PropertyDetails details1_updated = details2;
+ PropertyDetails details2_updated = details1;
+
+ s.UpdateByKey(key1, value1_updated, details1_updated);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1_updated, details1_updated);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+
+ s.UpdateByKey(key2, value2_updated, details2_updated);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1_updated, details1_updated);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2_updated, details2_updated);
+ s.CheckCounts(4, 2, 0);
+ });
+ }
+
+ // Simple test for deleting existing entries. Also uses non-Symbol keys and
+ // non-String values, which is not supported by the higher-level testing
+ // infrastructure.
+ MEMBER_TEST(SimpleDelete) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key1, value1, details1);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key2, value2, details2);
+
+ // Until here same operations as in Test "Add".
+
+ s.DeleteByKey(key1);
+ s.CheckKeyAbsent(key1);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+ s.CheckCounts(4, 1, 1);
+
+ s.DeleteByKey(key2);
+ s.CheckKeyAbsent(key1);
+ s.CheckKeyAbsent(key2);
+ s.CheckCounts(4, 0, 0);
+ });
+ }
+
+ // Adds entries that occuppy the boundaries (first and last
+ // buckets) of the hash table.
+ MEMBER_TEST(AddAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+ int size = static_cast<int>(boundary_indices.size());
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++);
+
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ s.CheckCounts(capacity, size, 0);
+ });
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then updates
+ // their values and property details.
+ MEMBER_TEST(UpdateAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+ int size = static_cast<int>(boundary_indices.size());
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "newv" + std::to_string(index);
+ // setting offset means getting other PropertyDetails than before
+ PropertyDetails details = distinct_details(count++, size);
+
+ s.UpdateByKey(Key{key, FakeH1{index + kBigModulus}}, value, details);
+ }
+
+ count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "newv" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++, size);
+
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ });
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then updates
+ // their values and property details.
+ MEMBER_TEST(DeleteAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ // The maximum value of {TS::boundary_indices(capacity).size()} for any
+ // |capacity|.
+ int count = 4;
+
+ // Due to shrink-on-delete, we create a new dictionary prior to each
+ // deletion, so that we don't re-hash (which would defeat the purpose of
+ // this test).
+ for (int i = 0; i < count; ++i) {
+ // In this iteration, we delete the i-th element of |boundary_indices|.
+
+ TS::WithAllInterestingInitialCapacities([&](TS& s) {
+ std::vector<int> boundary_indices =
+ TS::boundary_indices(s.initial_capacity);
+ int number_of_entries = static_cast<int>(boundary_indices.size());
+ DCHECK_GE(count, number_of_entries);
+
+ if (i >= static_cast<int>(boundary_indices.size())) {
+ // Nothing to do.
+ return;
+ }
+
+ AddAtBoundaries(s);
+
+ int entry_to_delete = boundary_indices[i];
+ int h1 = entry_to_delete + kBigModulus;
+
+ // We know that the key in question was added at bucket
+ // |entry_to_delete| by AddAtBoundaries.
+ Key key = Key{"k" + std::to_string(entry_to_delete), FakeH1{h1}};
+ s.DeleteByKey(key);
+ s.CheckKeyAbsent(key);
+
+ // Account for the fact that a shrink-on-delete may have happened.
+ int expected_capacity = number_of_entries - 1 < s.initial_capacity / 4
+ ? s.initial_capacity / 2
+ : s.initial_capacity;
+ s.CheckCounts(expected_capacity, number_of_entries - 1);
+ });
+ }
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then add
+ // further entries targeting the same buckets.
+ MEMBER_TEST(OverwritePresentAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+
+ std::vector<std::string> keys, values;
+ std::vector<PropertyDetails> details;
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "additional_k" + std::to_string(index);
+ std::string value = "additional_v" + std::to_string(index);
+
+ PropertyDetails d = distinct_details(count++);
+ keys.push_back(key);
+ values.push_back(value);
+ details.push_back(d);
+ s.Add(Key{key, FakeH1{index + kBigModulus}}, value, d);
+ }
+
+ count = 0;
+ for (int entry : boundary_indices) {
+ std::string key = keys[count];
+ std::string value = values[count];
+ PropertyDetails d = details[count];
+
+ // We don't know the indices where the new entries will land.
+ s.CheckDataAtKey(Key{key, FakeH1{entry + kBigModulus}},
+ base::Optional<InternalIndex>(), value, d);
+ count++;
+ }
+
+ // The entries added by AddAtBoundaries must also still be there, at their
+ // original indices.
+ count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_property_details.at(count++);
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ });
+ }
+
+ MEMBER_TEST(Empty) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacities({0}, [](TS& s) {
+ // FindEntry on empty table succeeds.
+ s.CheckKeyAbsent(Key{"some non-existing key"});
+ });
+
+ TS::WithInitialCapacities({0}, [](TS& s) {
+ PropertyDetails d = PropertyDetails::Empty();
+
+ // Adding to empty table causes resize.
+ s.Add(Key{"some key"}, "some value", d);
+ s.CheckDataAtKey(Key{"some key"}, kIndexUnknown, "some value", d);
+
+ s.CheckCounts(SwissNameDictionary::kInitialCapacity, 1, 0);
+ });
+
+ TS::WithInitialCapacity(0, [](TS& s) { s.CheckEnumerationOrder({}); });
+
+ // Inplace rehashing and shrinking don't have CSA versions.
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(0, [](TS& s) {
+ s.RehashInplace();
+ s.CheckCounts(0, 0, 0);
+ s.VerifyHeap();
+ });
+
+ TS::WithInitialCapacity(0, [](TS& s) {
+ s.Shrink();
+ s.CheckCounts(0, 0, 0);
+ s.VerifyHeap();
+ });
+ }
+ }
+
+ // We test that hash tables get resized/rehashed correctly by repeatedly
+ // adding an deleting elements.
+ MEMBER_TEST(Resize1) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(0, [](TS& s) {
+ // Should be at least 8 so that we capture the transition from 8 bit to 16
+ // bit meta table entries:
+ const int max_exponent = 9;
+
+ // For all |exponent| between 0 and |max_exponent|, we add 2^|exponent|
+ // entries, and then delete every second one of those. Note that we do
+ // this all on a single table, meaning that the entries from the previous
+ // value of |exponent| are still present.
+ int added = 0;
+ int deleted = 0;
+ int offset = 0;
+ for (int exponent = 0; exponent <= max_exponent; ++exponent) {
+ int count = 1 << exponent;
+ for (int i = 0; i < count; ++i) {
+ std::string key = "key" + std::to_string(offset + i);
+ std::string value = "value" + std::to_string(offset + i);
+
+ s.Add(Key{key}, value, distinct_details(i, offset));
+ ++added;
+ }
+ for (int i = 0; i < count; i += 2) {
+ if (offset + i == 0) {
+ continue;
+ }
+ std::string key = "key" + std::to_string(offset + i);
+ s.DeleteByKey(Key{key});
+ ++deleted;
+ }
+
+ s.CheckCounts(kNoInt, added - deleted, kNoInt);
+ offset += count;
+ }
+
+ // Some internal consistency checks on the test itself:
+ DCHECK_EQ((1 << (max_exponent + 1)) - 1, offset);
+ DCHECK_EQ(offset, added);
+ DCHECK_EQ(offset / 2, deleted);
+
+ // Check that those entries that we expect are indeed present.
+ for (int i = 0; i < offset; i += 2) {
+ std::string key = "key" + std::to_string(i);
+ std::string value = "value" + std::to_string(i);
+
+ s.CheckDataAtKey(Key{key}, kIndexUnknown, value, distinct_details(i));
+ }
+ s.VerifyHeap();
+ });
+ }
+
+ // Check that we resize exactly when expected.
+ MEMBER_TEST(Resize2) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacities({4, 8, 16, 128}, [](TS& s) {
+ int count = SwissNameDictionary::MaxUsableCapacity(s.initial_capacity);
+
+ AddMultiple(s, count, "resize2");
+
+ // No resize:
+ s.CheckCounts(s.initial_capacity, count, 0);
+
+ s.Add(Key{"key causing resize"});
+ s.CheckCounts(2 * s.initial_capacity, count + 1, 0);
+ });
+ }
+
+ // There are certain capacities where we can fill every single bucket of the
+ // table before resizing (i.e., the max load factor is 100% for those
+ // particular configurations. Test that this works as intended.
+ MEMBER_TEST(AtFullCapacity) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ // Determine those capacities, allowing 100% max load factor. We trust
+ // MaxUsableCapacity to tell us which capacities that are (e.g., 4 and 8),
+ // because we tested that function separately elsewhere.
+ std::vector<int> capacities_allowing_full_utilization;
+ for (int c = SwissNameDictionary::kInitialCapacity;
+ c <= static_cast<int>(SwissNameDictionary::kGroupWidth); c *= 2) {
+ if (SwissNameDictionary::MaxUsableCapacity(c) == c) {
+ capacities_allowing_full_utilization.push_back(c);
+ }
+ }
+
+ DCHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 16,
+ capacities_allowing_full_utilization.size() > 0);
+
+ TS::WithInitialCapacities(capacities_allowing_full_utilization, [](TS& s) {
+ AddMultiple(s, s.initial_capacity, "k_full_capacity", "v_full_capacity");
+
+ // No resize must have happened.
+ s.CheckCounts(s.initial_capacity, s.initial_capacity, 0);
+
+ CheckMultiple(s, s.initial_capacity, "k_full_capacity",
+ "v_full_capacity");
+
+ // Must make sure that the first |SwissNameDictionary::kGroupWidth|
+ // entries of the ctrl table contain a kEmpty, so that an unsuccessful
+ // search stop, instead of going into an infinite loop. Therefore, search
+ // for a fake key whose H1 is 0, making us start from ctrl table bucket 0.
+ s.CheckKeyAbsent(Key{"non_existing_key", FakeH1{0}, FakeH2{1}});
+ });
+ }
+
+ MEMBER_TEST(EnumerationOrder) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) return;
+
+ // This test times out on sanitizer builds in CSA mode when testing the
+ // larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test =
+ TS::IsRuntimeTest() ? interesting_initial_capacities
+ : capacities_for_slow_sanitizer_tests;
+
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ std::vector<std::string> expected_keys;
+ int count = std::min(
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity), 1000);
+
+ for (int i = 0; i < count; ++i) {
+ std::string key = "enumkey" + std::to_string(i);
+ expected_keys.push_back(key);
+ s.Add(Key{key});
+ }
+ s.CheckEnumerationOrder(expected_keys);
+
+ // Delete some entries.
+
+ std::string last_key = "enumkey" + std::to_string(count - 1);
+ s.DeleteByKey(Key{"enumkey0"});
+ s.DeleteByKey(Key{"enumkey1"});
+ s.DeleteByKey(Key{last_key});
+
+ auto should_be_deleted = [&](const std::string& k) -> bool {
+ return k == "enumkey0" || k == "enumkey1" || k == last_key;
+ };
+ expected_keys.erase(
+ std::remove_if(expected_keys.begin(), expected_keys.end(),
+ should_be_deleted),
+ expected_keys.end());
+ DCHECK_EQ(expected_keys.size(), count - 3);
+
+ s.CheckEnumerationOrder(expected_keys);
+
+ if (s.initial_capacity <= 1024) {
+ // Now cause a resize. Doing + 4 on top of the maximum usable capacity
+ // rather than just + 1 because in the case where the initial capacity
+ // is 4 and the group size is 8, the three deletes above caused a
+ // shrink, which in this case was just a rehash. So we need to add 4
+ // elements to cause a resize.
+ int resize_at =
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) + 4;
+
+ for (int i = count; i < resize_at; ++i) {
+ std::string key = "enumkey" + std::to_string(i);
+ expected_keys.push_back(key);
+ s.Add(Key{key});
+ }
+ s.CheckCounts(2 * s.initial_capacity);
+ s.CheckEnumerationOrder(expected_keys);
+ }
+ });
+ }
+
+ // Make sure that keys with colliding H1 and same H2 don't get mixed up.
+ MEMBER_TEST(SameH2) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ int i = 0;
+ TS::WithAllInterestingInitialCapacities([&](TS& s) {
+ // Let's try a few differnet values for h1, starting at big_modulus;.
+ int first_h1 = i * 13 + kBigModulus;
+ int second_h1 = first_h1 + s.initial_capacity;
+
+ int first_entry = first_h1 % s.initial_capacity;
+ int second_entry = (first_h1 + 1) % s.initial_capacity;
+
+ // Add two keys with same H1 modulo capacity and same H2.
+ Key k1{"first_key", FakeH1{first_h1}, FakeH2{42}};
+ Key k2{"second_key", FakeH1{second_h1}, FakeH2{42}};
+
+ s.Add(k1, "v1");
+ s.Add(k2, "v2");
+
+ s.CheckDataAtKey(k1, InternalIndex(first_entry), "v1");
+ s.CheckDataAtKey(k2, InternalIndex(second_entry), "v2");
+
+ // Deletion works, too.
+ s.DeleteByKey(k2);
+ s.CheckHasKey(k1);
+ s.CheckKeyAbsent(k2);
+
+ ++i;
+ });
+ }
+
+ // Check that we can delete a key and add it again.
+ MEMBER_TEST(ReAddSameKey) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ s.Add(Key{"some_key"}, "some_value", distinct_details(0));
+ s.DeleteByKey(Key{"some_key"});
+ s.Add(Key{"some_key"}, "new_value", distinct_details(1));
+ s.CheckDataAtKey(Key{"some_key"}, kIndexUnknown, "new_value",
+ distinct_details(1));
+ s.CheckEnumerationOrder({"some_key"});
+ });
+ }
+
+ // Make sure that we continue probing if there is no match in the first
+ // group and that the quadratic probing for choosing subsequent groups to
+ // probe works as intended.
+ MEMBER_TEST(BeyondInitialGroup) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(128, [](TS& s) {
+ int h1 = 33; // Arbitrarily chosen.
+ int count = 37; // Will lead to more than 2 groups being filled.
+
+ for (int i = 0; i < count; ++i) {
+ std::string key = "key" + std::to_string(i);
+ std::string value = "value" + std::to_string(i);
+
+ s.Add(Key{key, FakeH1{h1}}, value);
+ }
+
+ s.CheckDataAtKey(Key{"key36", FakeH1{h1}}, kIndexUnknown, "value36");
+
+ // Deleting something shouldn't disturb further additions.
+ s.DeleteByKey(Key{"key14", FakeH1{h1}});
+ s.DeleteByKey(Key{"key15", FakeH1{h1}});
+ s.DeleteByKey(Key{"key16", FakeH1{h1}});
+ s.DeleteByKey(Key{"key17", FakeH1{h1}});
+
+ s.Add(Key{"key37", FakeH1{h1}}, "value37");
+ s.CheckDataAtKey(Key{"key37", FakeH1{h1}}, kIndexUnknown, "value37");
+ });
+ }
+
+ // Check that we correclty "wrap around" when probing the control table. This
+ // means that when we probe a group starting at a bucket such that there are
+ // fewer than kGroupWidth bucktets before the end of the control table, we
+ // (logically) continue at bucket 0. Note that actually, we use the copy of
+ // first group at the end of the control table.
+ MEMBER_TEST(WrapAround) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) {
+ return;
+ }
+
+ // This test times out in CSA mode when testing the larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test = TS::IsRuntimeTest()
+ ? interesting_initial_capacities
+ : capacities_for_slow_debug_tests;
+
+ int width = SwissNameDictionary::kGroupWidth;
+ for (int offset_from_end = 0; offset_from_end < width; ++offset_from_end) {
+ TS::WithInitialCapacities(capacities_to_test, [&](TS& s) {
+ int capacity = s.initial_capacity;
+ int first_bucket = capacity - offset_from_end;
+
+ // How many entries to add (carefully chosen not to cause a resize).
+ int filler_entries =
+ std::min(width, SwissNameDictionary::MaxUsableCapacity(capacity)) -
+ 1;
+
+ if (first_bucket < 0 ||
+ // No wraparound in this case:
+ first_bucket + filler_entries < capacity) {
+ return;
+ }
+
+ // Starting at bucket |first_bucket|, add a sequence of |kGroupWitdth|
+ // - 1 (if table can take that many, see calculation of |filler_entries|
+ // above) entries in a single collision chain.
+ for (int f = 0; f < filler_entries; ++f) {
+ std::string key = "filler" + std::to_string(f);
+ s.Add(Key{key, FakeH1{first_bucket}});
+ }
+
+ // ... then add a final key which (unless table too small) will end up
+ // in the last bucket belonging to the group started at |first_bucket|.
+ // Check that we can indeed find it.
+ s.Add(Key{"final_key", FakeH1{first_bucket}});
+ s.CheckDataAtKey(Key{"final_key", FakeH1{first_bucket}},
+ InternalIndex(filler_entries - offset_from_end));
+
+ // + 1 due to the final key.
+ s.CheckCounts(s.initial_capacity, filler_entries + 1, 0);
+
+ // Now delete the entries in between and make sure that this
+ // doesn't break anything.
+ for (int f = 0; f < filler_entries; ++f) {
+ std::string key = "filler" + std::to_string(f);
+ s.DeleteByKey(Key{key, FakeH1{first_bucket}});
+ }
+
+ s.CheckHasKey(Key{"final_key", FakeH1{first_bucket}});
+ });
+ }
+ }
+
+ MEMBER_TEST(RehashInplace) {
+ // This test may fully fill the table and hardly depends on the underlying
+ // shape (e.g., meta table structure). Thus not testing overly large
+ // capacities.
+ std::vector<int> capacities_to_test = {4, 8, 16, 128, 1024};
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ if (s.initial_capacity <= 8) {
+ // Add 3 elements, which will not cause a resize. Then delete the
+ // first key before rehasing.
+
+ AddMultiple(s, 3);
+ s.DeleteByKey(Key{"key0"});
+
+ // We shouldn't have done a resize on deletion or addition:
+ s.CheckCounts(s.initial_capacity, 2, 1);
+
+ s.RehashInplace();
+
+ s.CheckDataAtKey(Key{"key1"}, kIndexUnknown, "value1");
+ s.CheckDataAtKey(Key{"key2"}, kIndexUnknown, "value2");
+ s.CheckEnumerationOrder({"key1", "key2"});
+ } else {
+ int count =
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) - 5;
+ AddMultiple(s, count);
+
+ s.DeleteByKey(Key{"key1"});
+ s.DeleteByKey(Key{"key2"});
+ s.DeleteByKey(Key{"key" + std::to_string(count - 1)});
+
+ // We shouldn't have done a resize on deletion or addition:
+ s.CheckCounts(s.initial_capacity, count - 3, 3);
+
+ s.RehashInplace();
+
+ std::vector<std::string> expected_enum_order;
+ for (int i = 0; i < count; ++i) {
+ if (i == 1 || i == 2 || i == count - 1) {
+ // These are the keys we deleted.
+ continue;
+ }
+
+ std::string key = "key" + std::to_string(i);
+ PropertyDetails d =
+ distinct_property_details[i % distinct_property_details.size()];
+ s.CheckDataAtKey(Key{key}, kIndexUnknown,
+ "value" + std::to_string(i), d);
+
+ expected_enum_order.push_back(key);
+ }
+
+ s.CheckEnumerationOrder(expected_enum_order);
+ }
+ });
+ }
+ }
+
+ MEMBER_TEST(Shrink) {
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(32, [&](TS& s) {
+ // Filling less than a forth of the table:
+ int count = 4;
+
+ AddMultiple(s, count);
+
+ s.Shrink();
+
+ CheckMultiple(s, count, "key", "value", 0);
+
+ // Shrink doesn't shrink to fit, but only halves the capacity.
+ int expected_capacity = s.initial_capacity / 2;
+ s.CheckCounts(expected_capacity, 4, 0);
+
+ s.CheckEnumerationOrder({"key0", "key1", "key2", "key3"});
+ s.VerifyHeap();
+ });
+ }
+ }
+
+ MEMBER_TEST(ShrinkToInitial) {
+ // When shrinking, we never go below SwissNameDictionary::kInitialCapacity.
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(8, [&](TS& s) {
+ s.Shrink();
+
+ s.CheckCounts(SwissNameDictionary::kInitialCapacity, 0, 0);
+ });
+ }
+ }
+
+ MEMBER_TEST(ShrinkOnDelete) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(32, [](TS& s) {
+ // Adds key0 ... key9:
+ AddMultiple(s, 10);
+
+ // We remove some entries. Each time less than a forth of the table is
+ // used by present entries, it's shrunk to half.
+
+ s.DeleteByKey(Key{"key9"});
+ s.DeleteByKey(Key{"key8"});
+
+ s.CheckCounts(32, 8, 2);
+
+ s.DeleteByKey(Key{"key7"});
+
+ // Deleted count is 0 after rehash.
+ s.CheckCounts(16, 7, 0);
+ });
+ }
+
+ MEMBER_TEST(Copy) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) return;
+
+ // This test times out on sanitizer builds in CSA mode when testing the
+ // larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test =
+ TS::IsRuntimeTest() ? interesting_initial_capacities
+ : capacities_for_slow_sanitizer_tests;
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ int fill = std::min(
+ 1000,
+ // -2 due to the two manually added keys below.
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) - 2);
+ AddMultiple(s, fill);
+
+ // Occupy first and last bucket (another key may occuppy these already,
+ // but let's don't bother with that):
+ s.Add(Key{"first_bucket_key", FakeH1{kBigModulus}});
+ s.Add(Key{"last_bucket_key", FakeH1{s.initial_capacity - 1}});
+
+ // We shouldn't have caused a resize.
+ s.CheckCounts(s.initial_capacity);
+
+ // Creates a copy and compares it against the original. In order to check
+ // copying of large dictionary, need to check before deletion due to
+ // shrink-on-delete kicking in.
+ s.CheckCopy();
+
+ // Let's delete a few entries, most notably the first and last two in enum
+ // order and the keys (potentially) occupying the first and last bucket.
+ s.DeleteByKey(Key{"key0"});
+ if (fill > 1) {
+ s.DeleteByKey(Key{"key1"});
+ }
+ s.DeleteByKey(Key{"first_bucket_key", FakeH1{kBigModulus}});
+ s.DeleteByKey(Key{"last_bucket_key", FakeH1{s.initial_capacity - 1}});
+
+ s.CheckCopy();
+ });
+ }
+};
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary.cc b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
index e274eed358c..0aabd5981df 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
@@ -4,11 +4,153 @@
#include "src/objects/swiss-name-dictionary-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+#include "test/cctest/test-swiss-name-dictionary-shared-tests.h"
namespace v8 {
namespace internal {
namespace test_swiss_hash_table {
+// Executes tests by executing C++ versions of dictionary operations.
+class RuntimeTestRunner {
+ public:
+ RuntimeTestRunner(Isolate* isolate, int initial_capacity, KeyCache& keys)
+ : isolate_{isolate}, keys_{keys} {
+ table = isolate->factory()->NewSwissNameDictionaryWithCapacity(
+ initial_capacity, AllocationType::kYoung);
+ }
+
+ // The runtime implementations does not depend on the CPU features and
+ // therefore always work.
+ static bool IsEnabled() { return true; }
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details);
+ InternalIndex FindEntry(Handle<Name> key);
+ // Updates the value and property details of the given entry.
+ void Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details);
+ void Delete(InternalIndex entry);
+ void RehashInplace();
+ void Shrink();
+
+ // Retrieves data associated with |entry|, which must be an index pointing to
+ // an existing entry. The returned array contains key, value, property details
+ // in that order.
+ Handle<FixedArray> GetData(InternalIndex entry);
+
+ // Tests that the current table has the given capacity, and number of
+ // (deleted) elements, based on which optional values are present.
+ void CheckCounts(base::Optional<int> capacity, base::Optional<int> elements,
+ base::Optional<int> deleted);
+ // Checks that |expected_keys| contains exactly the keys in the current table,
+ // in the given order.
+ void CheckEnumerationOrder(const std::vector<std::string>& expected_keys);
+ void CheckCopy();
+ void VerifyHeap();
+
+ // Just for debugging.
+ void PrintTable();
+
+ Handle<SwissNameDictionary> table;
+
+ private:
+ Isolate* isolate_;
+ KeyCache& keys_;
+};
+
+void RuntimeTestRunner::Add(Handle<Name> key, Handle<Object> value,
+ PropertyDetails details) {
+ Handle<SwissNameDictionary> updated_table =
+ SwissNameDictionary::Add(isolate_, this->table, key, value, details);
+ this->table = updated_table;
+}
+
+InternalIndex RuntimeTestRunner::FindEntry(Handle<Name> key) {
+ return table->FindEntry(isolate_, key);
+}
+
+Handle<FixedArray> RuntimeTestRunner::GetData(InternalIndex entry) {
+ if (entry.is_found()) {
+ Handle<FixedArray> data = isolate_->factory()->NewFixedArray(3);
+ data->set(0, table->KeyAt(entry));
+ data->set(1, table->ValueAt(entry));
+ data->set(2, table->DetailsAt(entry).AsSmi());
+ return data;
+ } else {
+ return handle(ReadOnlyRoots(isolate_).empty_fixed_array(), isolate_);
+ }
+}
+
+void RuntimeTestRunner::Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ CHECK(entry.is_found());
+
+ table->ValueAtPut(entry, *new_value);
+ table->DetailsAtPut(entry, new_details);
+}
+
+void RuntimeTestRunner::Delete(InternalIndex entry) {
+ CHECK(entry.is_found());
+ table = table->DeleteEntry(isolate_, table, entry);
+}
+
+void RuntimeTestRunner::CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements,
+ base::Optional<int> deleted) {
+ if (capacity.has_value()) {
+ CHECK_EQ(capacity.value(), table->Capacity());
+ }
+ if (elements.has_value()) {
+ CHECK_EQ(elements.value(), table->NumberOfElements());
+ }
+ if (deleted.has_value()) {
+ CHECK_EQ(deleted.value(), table->NumberOfDeletedElements());
+ }
+}
+
+void RuntimeTestRunner::CheckEnumerationOrder(
+ const std::vector<std::string>& expected_keys) {
+ ReadOnlyRoots roots(isolate_);
+ int i = 0;
+ for (InternalIndex index : table->IterateEntriesOrdered()) {
+ Object key;
+ if (table->ToKey(roots, index, &key)) {
+ CHECK_LT(i, expected_keys.size());
+ Handle<Name> expected_key =
+ CreateKeyWithHash(isolate_, this->keys_, Key{expected_keys[i]});
+
+ CHECK_EQ(key, *expected_key);
+ ++i;
+ }
+ }
+ CHECK_EQ(i, expected_keys.size());
+}
+
+void RuntimeTestRunner::RehashInplace() { table->Rehash(isolate_); }
+
+void RuntimeTestRunner::Shrink() {
+ table = SwissNameDictionary::Shrink(isolate_, table);
+}
+
+void RuntimeTestRunner::CheckCopy() {
+ Handle<SwissNameDictionary> copy =
+ SwissNameDictionary::ShallowCopy(isolate_, table);
+
+ CHECK(table->EqualsForTesting(*copy));
+}
+
+void RuntimeTestRunner::VerifyHeap() {
+#if VERIFY_HEAP
+ table->SwissNameDictionaryVerify(isolate_, true);
+#endif
+}
+
+void RuntimeTestRunner::PrintTable() {
+#ifdef OBJECT_PRINT
+ table->SwissNameDictionaryPrint(std::cout);
+#endif
+}
+
TEST(CapacityFor) {
for (int elements = 0; elements <= 32; elements++) {
int capacity = SwissNameDictionary::CapacityFor(elements);
@@ -76,6 +218,14 @@ TEST(SizeFor) {
CHECK_EQ(SwissNameDictionary::SizeFor(8), size_8);
}
+// Executes the tests defined in test-swiss-name-dictionary-shared-tests.h as if
+// they were defined in this file, using the RuntimeTestRunner. See comments in
+// test-swiss-name-dictionary-shared-tests.h and in
+// swiss-name-dictionary-infra.h for details.
+const char kRuntimeTestFileName[] = __FILE__;
+SharedSwissTableTests<RuntimeTestRunner, kRuntimeTestFileName>
+ execute_shared_tests_runtime;
+
} // namespace test_swiss_hash_table
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index 867d0f90b95..0134befedd2 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -71,30 +71,6 @@ TEST(CopyContentsView) {
TestArrayBufferViewContents(&env, true);
}
-
-TEST(AllocateNotExternal) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- void* memory = reinterpret_cast<Isolate*>(env->GetIsolate())
- ->array_buffer_allocator()
- ->Allocate(1024);
-
-// Keep the test until the functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- v8::Local<v8::ArrayBuffer> buffer =
- v8::ArrayBuffer::New(env->GetIsolate(), memory, 1024,
- v8::ArrayBufferCreationMode::kInternalized);
- CHECK(!buffer->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
- CHECK_EQ(memory, buffer->GetBackingStore()->Data());
-}
-
void TestSpeciesProtector(char* code,
bool invalidates_species_protector = true) {
v8::Isolate::CreateParams create_params;
diff --git a/deps/v8/test/cctest/test-verifiers.cc b/deps/v8/test/cctest/test-verifiers.cc
index 80e2517cd2b..8e393ae1636 100644
--- a/deps/v8/test/cctest/test-verifiers.cc
+++ b/deps/v8/test/cctest/test-verifiers.cc
@@ -70,7 +70,7 @@ TEST_PAIR(TestWrongStrongTypeInIndexedStructField) {
v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
Handle<Object> o = v8::Utils::OpenHandle(*v);
Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(i_isolate),
i_isolate);
int offset = DescriptorArray::OffsetOfDescriptorAt(1) +
DescriptorArray::kEntryKeyOffset;
@@ -102,7 +102,7 @@ TEST_PAIR(TestWrongWeakTypeInIndexedStructField) {
v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
Handle<Object> o = v8::Utils::OpenHandle(*v);
Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(i_isolate),
i_isolate);
int offset = DescriptorArray::OffsetOfDescriptorAt(0) +
DescriptorArray::kEntryValueOffset;
diff --git a/deps/v8/test/cctest/test-web-snapshots.cc b/deps/v8/test/cctest/test-web-snapshots.cc
new file mode 100644
index 00000000000..b7f314318a5
--- /dev/null
+++ b/deps/v8/test/cctest/test-web-snapshots.cc
@@ -0,0 +1,131 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/web-snapshot/web-snapshot.h"
+#include "test/cctest/cctest-utils.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void TestWebSnapshot(const char* snapshot_source, const char* test_source,
+ const char* expected_result, uint32_t string_count,
+ uint32_t map_count, uint32_t context_count,
+ uint32_t function_count, uint32_t object_count) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ CompileRun(snapshot_source);
+ WebSnapshotData snapshot_data;
+ {
+ std::vector<std::string> exports;
+ exports.push_back("foo");
+ WebSnapshotSerializer serializer(isolate);
+ CHECK(serializer.TakeSnapshot(context, exports, snapshot_data));
+ CHECK(!serializer.has_error());
+ CHECK_NOT_NULL(snapshot_data.buffer);
+ CHECK_EQ(string_count, serializer.string_count());
+ CHECK_EQ(map_count, serializer.map_count());
+ CHECK_EQ(context_count, serializer.context_count());
+ CHECK_EQ(function_count, serializer.function_count());
+ CHECK_EQ(object_count, serializer.object_count());
+ }
+
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ WebSnapshotDeserializer deserializer(isolate);
+ CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
+ snapshot_data.buffer_size));
+ CHECK(!deserializer.has_error());
+ v8::Local<v8::String> result = CompileRun(test_source).As<v8::String>();
+ CHECK(result->Equals(new_context, v8_str(expected_result)).FromJust());
+ CHECK_EQ(string_count, deserializer.string_count());
+ CHECK_EQ(map_count, deserializer.map_count());
+ CHECK_EQ(context_count, deserializer.context_count());
+ CHECK_EQ(function_count, deserializer.function_count());
+ CHECK_EQ(object_count, deserializer.object_count());
+ }
+}
+
+} // namespace
+
+TEST(Minimal) {
+ const char* snapshot_source = "var foo = {'key': 'lol'};";
+ const char* test_source = "foo.key";
+ const char* expected_result = "lol";
+ uint32_t kStringCount = 3; // 'foo', 'key', 'lol'
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(Function) {
+ const char* snapshot_source =
+ "var foo = {'key': function() { return '11525'; }};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ uint32_t kStringCount = 3; // 'foo', 'key', function source code
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(InnerFunctionWithContext) {
+ const char* snapshot_source =
+ "var foo = {'key': (function() {\n"
+ " let result = '11525';\n"
+ " function inner() { return result; }\n"
+ " return inner;\n"
+ " })()};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ // Strings: 'foo', 'key', function source code (inner), 'result', '11525'
+ uint32_t kStringCount = 5;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 1;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(InnerFunctionWithContextAndParentContext) {
+ const char* snapshot_source =
+ "var foo = {'key': (function() {\n"
+ " let part1 = '11';\n"
+ " function inner() {\n"
+ " let part2 = '525';\n"
+ " function innerinner() {\n"
+ " return part1 + part2;\n"
+ " }\n"
+ " return innerinner;\n"
+ " }\n"
+ " return inner();\n"
+ " })()};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ // Strings: 'foo', 'key', function source code (innerinner), 'part1', 'part2',
+ // '11', '525'
+ uint32_t kStringCount = 7;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 2;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-backing-store.cc b/deps/v8/test/cctest/wasm/test-backing-store.cc
index f8010d30319..2dc2fc89a75 100644
--- a/deps/v8/test/cctest/test-backing-store.cc
+++ b/deps/v8/test/cctest/wasm/test-backing-store.cc
@@ -5,12 +5,12 @@
#include "src/api/api-inl.h"
#include "src/objects/backing-store.h"
#include "src/wasm/wasm-objects.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/manually-externalized-buffer.h"
namespace v8 {
namespace internal {
+namespace wasm {
using testing::ManuallyExternalizedBuffer;
@@ -22,7 +22,7 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
HandleScope scope(isolate);
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
- wasm::kWasmPageSize, InitializedFlag::kZeroInitialized);
+ kWasmPageSize, InitializedFlag::kZeroInitialized);
Handle<JSArrayBuffer> buffer = result.ToHandleChecked();
// Embedder requests contents.
@@ -81,5 +81,6 @@ TEST(BackingStore_Reclaim) {
}
#endif
+} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index dd1dfea0f1e..07084b22650 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -609,6 +609,7 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WasmGCTester tester(execution_tier);
const byte type_index = tester.DefineArray(wasm::kWasmI32, true);
+ const byte fp_type_index = tester.DefineArray(wasm::kWasmF64, true);
ValueType kRefTypes[] = {ref(type_index)};
FunctionSig sig_q_v(1, 0, kRefTypes);
ValueType kOptRefType = optref(type_index);
@@ -655,6 +656,20 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WASM_RTT_CANON(type_index)),
kExprEnd});
+ // Tests that fp arrays work properly.
+ // f: a = [10.0, 10.0, 10.0]; a[1] = 42.42; return static_cast<int64>(a[1]);
+ double result_value = 42.42;
+ const byte kTestFpArray = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(fp_type_index)},
+ {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_WITH_RTT(
+ fp_type_index, WASM_F64(10.0), WASM_I32V(3),
+ WASM_RTT_CANON(fp_type_index))),
+ WASM_ARRAY_SET(fp_type_index, WASM_LOCAL_GET(0), WASM_I32V(1),
+ WASM_F64(result_value)),
+ WASM_I32_SCONVERT_F64(
+ WASM_ARRAY_GET(fp_type_index, WASM_LOCAL_GET(0), WASM_I32V(1))),
+ kExprEnd});
+
tester.CompileModule();
tester.CheckResult(kGetElem, 12, 0);
@@ -663,6 +678,7 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
tester.CheckHasThrown(kGetElem, 3);
tester.CheckHasThrown(kGetElem, -1);
tester.CheckResult(kGetLength, 42);
+ tester.CheckResult(kTestFpArray, static_cast<int32_t>(result_value));
MaybeHandle<Object> h_result = tester.GetResultObject(kAllocate);
CHECK(h_result.ToHandleChecked()->IsWasmArray());
@@ -863,6 +879,105 @@ WASM_COMPILED_EXEC_TEST(BasicRtt) {
tester.CheckResult(kRefCast, 43);
}
+WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
+ WasmGCTester tester(execution_tier);
+ byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
+ byte subtype_index =
+ tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)});
+ ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
+ FunctionSig sig(1, 2, sig_types);
+ byte sig_index = tester.DefineSignature(&sig);
+
+ const byte kRefTestNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(type_index), WASM_RTT_CANON(subtype_index)),
+ kExprEnd});
+ const byte kRefTestUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ const byte kRefTestUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(subtype_index), WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ const byte kRefTestUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+ const byte kRefTestUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(subtype_index), WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+ const byte kRefTestUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+
+ const byte kRefCastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(type_index),
+ WASM_RTT_CANON(subtype_index))),
+ kExprEnd});
+ const byte kRefCastUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
+ const byte kRefCastUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(subtype_index),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
+ const byte kRefCastUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+ const byte kRefCastUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(subtype_index),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+ const byte kRefCastUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckResult(kRefTestNull, 0);
+ tester.CheckResult(kRefTestUpcast, 1);
+ tester.CheckResult(kRefTestUpcastNull, 0);
+ tester.CheckResult(kRefTestUnrelated, 0);
+ tester.CheckResult(kRefTestUnrelatedNull, 0);
+ tester.CheckResult(kRefTestUnrelatedNonNullable, 0);
+
+ tester.CheckResult(kRefCastNull, 1);
+ tester.CheckResult(kRefCastUpcast, 0);
+ tester.CheckResult(kRefCastUpcastNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelated);
+ tester.CheckResult(kRefCastUnrelatedNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelatedNonNullable);
+}
+
WASM_EXEC_TEST(NoDepthRtt) {
WasmGCTester tester(execution_tier);
@@ -871,14 +986,19 @@ WASM_EXEC_TEST(NoDepthRtt) {
tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
const byte empty_struct_index = tester.DefineStruct({});
+ ValueType kRttTypeNoDepth = ValueType::Rtt(type_index);
+ FunctionSig sig_t1_v_nd(1, 0, &kRttTypeNoDepth);
ValueType kRttSubtypeNoDepth = ValueType::Rtt(subtype_index);
FunctionSig sig_t2_v_nd(1, 0, &kRttSubtypeNoDepth);
+ const byte kRttTypeCanon = tester.DefineFunction(
+ &sig_t1_v_nd, {}, {WASM_RTT_CANON(type_index), kExprEnd});
const byte kRttSubtypeCanon = tester.DefineFunction(
&sig_t2_v_nd, {}, {WASM_RTT_CANON(subtype_index), kExprEnd});
const byte kRttSubtypeSub = tester.DefineFunction(
&sig_t2_v_nd, {},
- {WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)), kExprEnd});
+ {WASM_RTT_SUB(subtype_index, WASM_CALL_FUNCTION0(kRttTypeCanon)),
+ kExprEnd});
const byte kTestCanon = tester.DefineFunction(
tester.sigs.i_v(), {optref(type_index)},
@@ -1059,23 +1179,46 @@ WASM_COMPILED_EXEC_TEST(CallRef) {
tester.CheckResult(caller, 47, 5);
}
-WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
+WASM_COMPILED_EXEC_TEST(CallReftypeParameters) {
WasmGCTester tester(execution_tier);
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
-
- const byte kRefTestNull = tester.DefineFunction(
- tester.sigs.i_v(), {},
- {WASM_REF_TEST(WASM_REF_NULL(type_index), WASM_RTT_CANON(type_index)),
+ ValueType kRefType{optref(type_index)};
+ ValueType sig_types[] = {kWasmI32, kRefType, kRefType, kRefType, kRefType,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(1, 8, sig_types);
+ byte adder = tester.DefineFunction(
+ &sig, {},
+ {WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(0)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(1)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(2)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(3)),
+ WASM_I32_ADD(
+ WASM_LOCAL_GET(4),
+ WASM_I32_ADD(WASM_LOCAL_GET(5),
+ WASM_I32_ADD(WASM_LOCAL_GET(6),
+ WASM_LOCAL_GET(7)))))))),
kExprEnd});
-
- const byte kRefCastNull = tester.DefineFunction(
+ byte caller = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(type_index),
- WASM_RTT_CANON(type_index))),
+ {WASM_CALL_FUNCTION(adder,
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(2),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(4),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(8),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(16),
+ WASM_RTT_CANON(type_index)),
+ WASM_I32V(32), WASM_I32V(64), WASM_I32V(128),
+ WASM_I32V(256)),
kExprEnd});
+
tester.CompileModule();
- tester.CheckResult(kRefTestNull, 0);
- tester.CheckResult(kRefCastNull, 1);
+ tester.CheckResult(caller, 510);
}
WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
diff --git a/deps/v8/test/cctest/wasm/test-grow-memory.cc b/deps/v8/test/cctest/wasm/test-grow-memory.cc
index 662c037a58e..d3ad66aa4b8 100644
--- a/deps/v8/test/cctest/wasm/test-grow-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-grow-memory.cc
@@ -83,7 +83,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(6)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index f5847d1fb1f..551d8f214b2 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -6,6 +6,7 @@
#include "src/wasm/wasm-debug.h"
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
@@ -89,6 +90,8 @@ class LiftoffCompileEnvironment {
return debug_side_table_via_compilation;
}
+ TestingModuleBuilder* builder() { return &wasm_runner_.builder(); }
+
private:
static void CheckTableEquals(const DebugSideTable& a,
const DebugSideTable& b) {
@@ -177,7 +180,7 @@ struct DebugSideTableEntry {
// Check for equality, but ignore exact register and stack offset.
static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
const DebugSideTable::Entry::Value& b) {
- return a.index == b.index && a.kind == b.kind && a.kind == b.kind &&
+ return a.index == b.index && a.type == b.type && a.storage == b.storage &&
(a.storage != DebugSideTable::Entry::kConstant ||
a.i32_const == b.i32_const);
}
@@ -189,7 +192,7 @@ std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
out << "stack height " << entry.stack_height << ", changed: {";
const char* comma = "";
for (auto& v : entry.changed_values) {
- out << comma << v.index << ":" << name(v.kind) << " ";
+ out << comma << v.index << ":" << v.type.name() << " ";
switch (v.storage) {
case DebugSideTable::Entry::kConstant:
out << "const:" << v.i32_const;
@@ -213,26 +216,26 @@ std::ostream& operator<<(std::ostream& out,
#endif // DEBUG
// Named constructors to make the tests more readable.
-DebugSideTable::Entry::Value Constant(int index, ValueKind kind,
+DebugSideTable::Entry::Value Constant(int index, ValueType type,
int32_t constant) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kConstant;
value.i32_const = constant;
return value;
}
-DebugSideTable::Entry::Value Register(int index, ValueKind kind) {
+DebugSideTable::Entry::Value Register(int index, ValueType type) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kRegister;
return value;
}
-DebugSideTable::Entry::Value Stack(int index, ValueKind kind) {
+DebugSideTable::Entry::Value Stack(int index, ValueType type) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kStack;
return value;
}
@@ -296,9 +299,9 @@ TEST(Liftoff_debug_side_table_simple) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// OOL stack check, locals spilled, stack still empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
},
debug_side_table.get());
}
@@ -312,9 +315,9 @@ TEST(Liftoff_debug_side_table_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// call, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL stack check, local spilled as before, stack empty.
{1, {}},
},
@@ -332,11 +335,11 @@ TEST(Liftoff_debug_side_table_call_const) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// call, local is kConst.
- {1, {Constant(0, kI32, kConst)}},
+ {1, {Constant(0, kWasmI32, kConst)}},
// OOL stack check, local spilled.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
},
debug_side_table.get());
}
@@ -351,13 +354,13 @@ TEST(Liftoff_debug_side_table_indirect_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// indirect call, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL stack check, local still spilled.
{1, {}},
// OOL trap (invalid index), local still spilled, stack has {kConst}.
- {2, {Constant(1, kI32, kConst)}},
+ {2, {Constant(1, kWasmI32, kConst)}},
// OOL trap (sig mismatch), stack unmodified.
{2, {}},
},
@@ -373,11 +376,11 @@ TEST(Liftoff_debug_side_table_loop) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// OOL stack check, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL loop stack check, local still spilled, stack has {kConst}.
- {2, {Constant(1, kI32, kConst)}},
+ {2, {Constant(1, kWasmI32, kConst)}},
},
debug_side_table.get());
}
@@ -390,9 +393,9 @@ TEST(Liftoff_debug_side_table_trap) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// OOL stack check, local spilled, stack empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
// OOL trap (div by zero), stack as before.
{2, {}},
// OOL trap (unrepresentable), stack as before.
@@ -414,11 +417,38 @@ TEST(Liftoff_breakpoint_simple) {
CheckDebugSideTable(
{
// First break point, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// Second break point, locals unchanged, two register stack values.
- {4, {Register(2, kI32), Register(3, kI32)}},
+ {4, {Register(2, kWasmI32), Register(3, kWasmI32)}},
// OOL stack check, locals spilled, stack empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
+ },
+ debug_side_table.get());
+}
+
+TEST(Liftoff_debug_side_table_catch_all) {
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ LiftoffCompileEnvironment env;
+ TestSignatures sigs;
+ int ex = env.builder()->AddException(sigs.v_v());
+ ValueType exception_type = ValueType::Ref(HeapType::kExtern, kNonNullable);
+ auto debug_side_table = env.GenerateDebugSideTable(
+ {}, {kWasmI32},
+ {WASM_TRY_CATCH_ALL_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(ex)),
+ WASM_I32V(1)),
+ WASM_DROP},
+ {
+ 18 // Break at the end of the try block.
+ });
+ CheckDebugSideTable(
+ {
+ // function entry.
+ {1, {Register(0, kWasmI32)}},
+ // breakpoint.
+ {3,
+ {Stack(0, kWasmI32), Register(1, exception_type),
+ Constant(2, kWasmI32, 1)}},
+ {1, {}},
},
debug_side_table.get());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 37163c0a8c7..760a7cc7ea6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -1491,7 +1491,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
WasmRunner<void> r(tier);
- FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
+ FunctionSig* sig = sigs.many(r.zone(), kWasmVoid, param, num_params);
r.builder().AddSignature(sig);
r.builder().AddSignature(sig);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index e55547911b2..bb61f93ac39 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -99,6 +99,30 @@ WASM_EXEC_TEST(TryCatchAllThrow) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult1), WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ kExprCatchAll, WASM_I32V(kResult0), kExprEnd);
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchCatchAllThrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
uint32_t except1 = r.builder().AddException(sigs.v_v());
uint32_t except2 = r.builder().AddException(sigs.v_v());
constexpr uint32_t kResult0 = 23;
@@ -112,8 +136,8 @@ WASM_EXEC_TEST(TryCatchAllThrow) {
WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
WASM_THROW(except2))),
- kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatchAll,
- WASM_STMTS(WASM_I32V(kResult1)), kExprEnd);
+ kExprCatch, except1, WASM_I32V(kResult0), kExprCatchAll,
+ WASM_I32V(kResult1), kExprEnd);
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -318,6 +342,39 @@ WASM_EXEC_TEST(TryCatchCallDirect) {
}
}
+WASM_EXEC_TEST(TryCatchAllCallDirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(
+ throw_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_I32V(kResult0))));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
WASM_EXEC_TEST(TryCatchCallIndirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
@@ -348,7 +405,49 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
sig_index, WASM_I32V(7),
WASM_I32V(9), WASM_LOCAL_GET(0)),
WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0)), except));
+ WASM_I32V(kResult0), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchAllCallIndirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+ byte sig_index = r.builder().AddSignature(sigs.i_ii());
+ throw_func.SetSigIndex(0);
+
+ // Add an indirect function table.
+ uint16_t indirect_function_table[] = {
+ static_cast<uint16_t>(throw_func.function_index())};
+ r.builder().AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_INDIRECT(
+ sig_index, WASM_I32V(7),
+ WASM_I32V(9), WASM_LOCAL_GET(0)),
+ WASM_DROP))),
+ WASM_I32V(kResult0)));
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -383,7 +482,37 @@ WASM_COMPILED_EXEC_TEST(TryCatchCallExternal) {
WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
WASM_I32V(9)),
WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0))));
+ WASM_I32V(kResult0)));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_COMPILED_EXEC_TEST(TryCatchAllCallExternal) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ HandleScope scope(CcTest::InitIsolateOnce());
+ const char* source = "(function() { throw 'ball'; })";
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ ManuallyImportedJSFunction import = {sigs.i_ii(), js_function};
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kJSFunc = 0;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_I32V(kResult0)));
// Need to call through JS to allow for creation of stack traces.
r.CheckCallViaJS(kResult0, 0);
@@ -460,6 +589,46 @@ TEST(Regress1180457) {
CHECK_EQ(kResult0, r.CallInterpreter());
}
+TEST(Regress1187896) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ byte try_sig = r.builder().AddSignature(sigs.v_i());
+ constexpr uint32_t kResult = 23;
+ BUILD(r, kExprI32Const, 0, kExprTry, try_sig, kExprDrop, kExprCatchAll,
+ kExprNop, kExprEnd, kExprI32Const, kResult);
+ CHECK_EQ(kResult, r.CallInterpreter());
+}
+
+TEST(Regress1190291) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ byte try_sig = r.builder().AddSignature(sigs.v_i());
+ BUILD(r, kExprUnreachable, kExprTry, try_sig, kExprCatchAll, kExprEnd,
+ kExprI32Const, 0);
+ r.CallInterpreter();
+}
+
+TEST(Regress1186795) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ uint32_t except = r.builder().AddException(sigs.v_i());
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
+ WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
+ WASM_TRY_UNWIND_T(
+ kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(except)),
+ WASM_I32V(0)),
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP),
+ WASM_NOP, except));
+ CHECK_EQ(0, r.CallInterpreter());
+}
+
} // namespace test_run_wasm_exceptions
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 7199d34e7d1..0d039843e6c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -312,14 +312,14 @@ TEST(MemoryGrow) {
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(1, r.Call(1));
}
{
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(-1, r.Call(11));
}
}
@@ -332,7 +332,7 @@ TEST(MemoryGrowPreservesData) {
BUILD(
r,
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
- WASM_GROW_MEMORY(WASM_LOCAL_GET(0)), WASM_DROP,
+ WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index)));
CHECK_EQ(value, r.Call(1));
}
@@ -341,7 +341,7 @@ TEST(MemoryGrowInvalidSize) {
// Grow memory by an invalid amount without initial memory.
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(-1, r.Call(1048575));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
index 71bb77f6ade..2679000dd6c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
@@ -98,6 +98,26 @@ WASM_EXEC_TEST(MemorySize) {
CHECK_EQ(kNumPages, r.Call());
}
+WASM_EXEC_TEST(MemoryGrow) {
+ // TODO(clemensb): Implement memory64 in the interpreter.
+ if (execution_tier == TestExecutionTier::kInterpreter) return;
+
+ Memory64Runner<int64_t, int64_t> r(execution_tier);
+ r.builder().SetMaxMemPages(13);
+ r.builder().AddMemory(kWasmPageSize);
+
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ CHECK_EQ(1, r.Call(6));
+ CHECK_EQ(7, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 31));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 32));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 33));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 63));
+ CHECK_EQ(-1, r.Call(6)); // Above the maximum of 13.
+ CHECK_EQ(8, r.Call(5)); // Just at the maximum of 13.
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index a9f5dd6b263..14d88bc5626 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -471,7 +471,7 @@ TEST(Run_WasmModule_MemSize_GrowMem) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(10)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(10)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, kExpectedValue);
@@ -490,7 +490,7 @@ TEST(MemoryGrowZero) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V(0))};
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V(0))};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, kExpectedValue);
}
@@ -597,7 +597,7 @@ TEST(Run_WasmModule_MemoryGrowInIf) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_GROW_MEMORY(WASM_I32V(1)),
+ byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_MEMORY_GROW(WASM_I32V(1)),
WASM_I32V(12))};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, 12);
@@ -618,7 +618,7 @@ TEST(Run_WasmModule_GrowMemOobOffset) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(1)),
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(1)),
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value))};
EMIT_CODE_WITH_END(f, code);
@@ -640,7 +640,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_LOCAL_GET(0)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value)),
WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))};
@@ -687,7 +687,7 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(1)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(1)), WASM_DROP,
WASM_STORE_MEM(MachineType::Int32(), WASM_LOCAL_GET(0),
WASM_I32V(value)),
WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))};
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
new file mode 100644
index 00000000000..50f5bb44b72
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
@@ -0,0 +1,239 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/overflowing-math.h"
+#include "src/wasm/compilation-environment.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/cctest/wasm/wasm-simd-utils.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_relaxed_simd {
+
+// Use this for experimental relaxed-simd opcodes.
+#define WASM_RELAXED_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ TestExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ if (!CpuFeatures::SupportsWasmSimd128()) return; \
+ EXPERIMENTAL_FLAG_SCOPE(relaxed_simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
+ } \
+ TEST(RunWasm_##name##_interpreter) { \
+ EXPERIMENTAL_FLAG_SCOPE(relaxed_simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ TestExecutionTier execution_tier)
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC64
+// Only used for qfma and qfms tests below.
+
+// FMOperation holds the params (a, b, c) for a Multiply-Add or
+// Multiply-Subtract operation, and the expected result if the operation was
+// fused, rounded only once for the entire operation, or unfused, rounded after
+// multiply and again after add/subtract.
+template <typename T>
+struct FMOperation {
+ const T a;
+ const T b;
+ const T c;
+ const T fused_result;
+ const T unfused_result;
+};
+
+// large_n is large number that overflows T when multiplied by itself, this is a
+// useful constant to test fused/unfused behavior.
+template <typename T>
+constexpr T large_n = T(0);
+
+template <>
+constexpr double large_n<double> = 1e200;
+
+template <>
+constexpr float large_n<float> = 1e20;
+
+// Fused Multiply-Add performs a + b * c.
+template <typename T>
+static constexpr FMOperation<T> qfma_array[] = {
+ {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
+ // fused: a + b * c = -inf + (positive overflow) = -inf
+ // unfused: a + b * c = -inf + inf = NaN
+ {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a + b * c = inf + (negative overflow) = inf
+ // unfused: a + b * c = inf + -inf = NaN
+ {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfma_vector() {
+ return ArrayVector(qfma_array<T>);
+}
+
+// Fused Multiply-Subtract performs a - b * c.
+template <typename T>
+static constexpr FMOperation<T> qfms_array[]{
+ {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
+ // fused: a - b * c = inf - (positive overflow) = inf
+ // unfused: a - b * c = inf - inf = NaN
+ {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a - b * c = -inf - (negative overflow) = -inf
+ // unfused: a - b * c = -inf - -inf = NaN
+ {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfms_vector() {
+ return ArrayVector(qfms_array<T>);
+}
+
+// Fused results only when fma3 feature is enabled, and running on TurboFan or
+// Liftoff (which can fall back to TurboFan if FMA is not implemented).
+bool ExpectFused(TestExecutionTier tier) {
+#ifdef V8_TARGET_ARCH_X64
+ return CpuFeatures::IsSupported(FMA3) &&
+ (tier == TestExecutionTier::kTurbofan ||
+ tier == TestExecutionTier::kLiftoff);
+#else
+ return (tier == TestExecutionTier::kTurbofan ||
+ tier == TestExecutionTier::kLiftoff);
+#endif
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
+ // V8_TARGET_ARCH_PPC64
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC64
+WASM_RELAXED_SIMD_TEST(F32x4Qfma) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfma_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F32x4Qfms) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfms_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F64x2Qfma) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfma_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F64x2Qfms) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfms_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
+ // V8_TARGET_ARCH_PPC64
+
+WASM_RELAXED_SIMD_TEST(F32x4RecipApprox) {
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
+ base::Recip, false /* !exact */);
+}
+
+WASM_RELAXED_SIMD_TEST(F32x4RecipSqrtApprox) {
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
+ base::RecipSqrt, false /* !exact */);
+}
+
+#undef WASM_RELAXED_SIMD_TEST
+} // namespace test_run_wasm_relaxed_simd
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
index 4c5309aae5d..c0cc3c7daca 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
@@ -193,7 +193,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV8x16AllTrue));
+ WASM_SIMD_OP(kExprI8x16AllTrue));
CHECK_EQ(0, r.Call(0x00FF00FF));
}
@@ -202,7 +202,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV16x8AllTrue));
+ WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(0, r.Call(0x000000FF));
}
@@ -212,7 +212,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV16x8AllTrue));
+ WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(1, r.Call(0x000F000F));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index fa9299f27b8..4dd925a20ae 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -8,8 +8,10 @@
#include <cstdint>
#include <cstring>
#include <limits>
+#include <map>
#include <tuple>
#include <type_traits>
+#include <utility>
#include <vector>
#include "src/base/bits.h"
@@ -19,7 +21,6 @@
#include "src/base/overflowing-math.h"
#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
@@ -33,6 +34,7 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/cctest/wasm/wasm-simd-utils.h"
#include "test/common/flag-utils.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
@@ -44,27 +46,7 @@ namespace test_run_wasm_simd {
namespace {
-using DoubleUnOp = double (*)(double);
-using DoubleBinOp = double (*)(double, double);
-using DoubleCompareOp = int64_t (*)(double, double);
-using FloatUnOp = float (*)(float);
-using FloatBinOp = float (*)(float, float);
-using FloatCompareOp = int (*)(float, float);
-using Int64UnOp = int64_t (*)(int64_t);
-using Int64BinOp = int64_t (*)(int64_t, int64_t);
-using Int64ShiftOp = int64_t (*)(int64_t, int);
-using Int32UnOp = int32_t (*)(int32_t);
-using Int32BinOp = int32_t (*)(int32_t, int32_t);
-using Int32CompareOp = int (*)(int32_t, int32_t);
-using Int32ShiftOp = int32_t (*)(int32_t, int);
-using Int16UnOp = int16_t (*)(int16_t);
-using Int16BinOp = int16_t (*)(int16_t, int16_t);
-using Int16CompareOp = int (*)(int16_t, int16_t);
-using Int16ShiftOp = int16_t (*)(int16_t, int);
-using Int8UnOp = int8_t (*)(int8_t);
-using Int8BinOp = int8_t (*)(int8_t, int8_t);
-using Int8CompareOp = int (*)(int8_t, int8_t);
-using Int8ShiftOp = int8_t (*)(int8_t, int);
+using Shuffle = std::array<int8_t, kSimd128Size>;
#define WASM_SIMD_TEST(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
@@ -81,20 +63,9 @@ using Int8ShiftOp = int8_t (*)(int8_t, int);
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
} \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, TestExecutionTier::kTurbofan); \
- } \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier)
-// Generic expected value functions.
-template <typename T, typename = typename std::enable_if<
- std::is_floating_point<T>::value>::type>
-T Negate(T a) {
- return -a;
-}
-
// For signed integral types, use base::AddWithWraparound.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
@@ -138,45 +109,93 @@ T UnsignedMaximum(T a, T b) {
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
}
-int Equal(float a, float b) { return a == b ? -1 : 0; }
+template <typename T, typename U = T>
+U Equal(T a, T b) {
+ return a == b ? -1 : 0;
+}
-template <typename T>
-T Equal(T a, T b) {
+template <>
+int32_t Equal(float a, float b) {
return a == b ? -1 : 0;
}
-int NotEqual(float a, float b) { return a != b ? -1 : 0; }
+template <>
+int64_t Equal(double a, double b) {
+ return a == b ? -1 : 0;
+}
-template <typename T>
-T NotEqual(T a, T b) {
+template <typename T, typename U = T>
+U NotEqual(T a, T b) {
+ return a != b ? -1 : 0;
+}
+
+template <>
+int32_t NotEqual(float a, float b) {
return a != b ? -1 : 0;
}
-int Less(float a, float b) { return a < b ? -1 : 0; }
+template <>
+int64_t NotEqual(double a, double b) {
+ return a != b ? -1 : 0;
+}
-template <typename T>
-T Less(T a, T b) {
+template <typename T, typename U = T>
+U Less(T a, T b) {
return a < b ? -1 : 0;
}
-int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
+template <>
+int32_t Less(float a, float b) {
+ return a < b ? -1 : 0;
+}
-template <typename T>
-T LessEqual(T a, T b) {
+template <>
+int64_t Less(double a, double b) {
+ return a < b ? -1 : 0;
+}
+
+template <typename T, typename U = T>
+U LessEqual(T a, T b) {
+ return a <= b ? -1 : 0;
+}
+
+template <>
+int32_t LessEqual(float a, float b) {
return a <= b ? -1 : 0;
}
-int Greater(float a, float b) { return a > b ? -1 : 0; }
+template <>
+int64_t LessEqual(double a, double b) {
+ return a <= b ? -1 : 0;
+}
-template <typename T>
-T Greater(T a, T b) {
+template <typename T, typename U = T>
+U Greater(T a, T b) {
return a > b ? -1 : 0;
}
-int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
+template <>
+int32_t Greater(float a, float b) {
+ return a > b ? -1 : 0;
+}
-template <typename T>
-T GreaterEqual(T a, T b) {
+template <>
+int64_t Greater(double a, double b) {
+ return a > b ? -1 : 0;
+}
+
+template <typename T, typename U = T>
+U GreaterEqual(T a, T b) {
+ return a >= b ? -1 : 0;
+}
+
+template <>
+int32_t GreaterEqual(float a, float b) {
+ return a >= b ? -1 : 0;
+}
+
+template <>
+int64_t GreaterEqual(double a, double b) {
return a >= b ? -1 : 0;
}
@@ -227,109 +246,6 @@ template <typename T>
T Abs(T a) {
return std::abs(a);
}
-
-// only used for F64x2 tests below
-int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
-
-int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
-
-int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
-
-int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
-
-int64_t Less(double a, double b) { return a < b ? -1 : 0; }
-
-int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
-
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-// Only used for qfma and qfms tests below.
-
-// FMOperation holds the params (a, b, c) for a Multiply-Add or
-// Multiply-Subtract operation, and the expected result if the operation was
-// fused, rounded only once for the entire operation, or unfused, rounded after
-// multiply and again after add/subtract.
-template <typename T>
-struct FMOperation {
- const T a;
- const T b;
- const T c;
- const T fused_result;
- const T unfused_result;
-};
-
-// large_n is large number that overflows T when multiplied by itself, this is a
-// useful constant to test fused/unfused behavior.
-template <typename T>
-constexpr T large_n = T(0);
-
-template <>
-constexpr double large_n<double> = 1e200;
-
-template <>
-constexpr float large_n<float> = 1e20;
-
-// Fused Multiply-Add performs a + b * c.
-template <typename T>
-static constexpr FMOperation<T> qfma_array[] = {
- {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
- // fused: a + b * c = -inf + (positive overflow) = -inf
- // unfused: a + b * c = -inf + inf = NaN
- {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
- -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a + b * c = inf + (negative overflow) = inf
- // unfused: a + b * c = inf + -inf = NaN
- {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
- std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
- // -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
-
-template <typename T>
-static constexpr Vector<const FMOperation<T>> qfma_vector() {
- return ArrayVector(qfma_array<T>);
-}
-
-// Fused Multiply-Subtract performs a - b * c.
-template <typename T>
-static constexpr FMOperation<T> qfms_array[]{
- {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
- // fused: a - b * c = inf - (positive overflow) = inf
- // unfused: a - b * c = inf - inf = NaN
- {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
- std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a - b * c = -inf - (negative overflow) = -inf
- // unfused: a - b * c = -inf - -inf = NaN
- {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
- -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
- // -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
-
-template <typename T>
-static constexpr Vector<const FMOperation<T>> qfms_vector() {
- return ArrayVector(qfms_array<T>);
-}
-
-// Fused results only when fma3 feature is enabled, and running on TurboFan or
-// Liftoff (which can fall back to TurboFan if FMA is not implemented).
-bool ExpectFused(TestExecutionTier tier) {
-#ifdef V8_TARGET_ARCH_X64
- return CpuFeatures::IsSupported(FMA3) &&
- (tier == TestExecutionTier::kTurbofan ||
- tier == TestExecutionTier::kLiftoff);
-#else
- return (tier == TestExecutionTier::kTurbofan ||
- tier == TestExecutionTier::kLiftoff);
-#endif
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
} // namespace
#define WASM_SIMD_CHECK_LANE_S(TYPE, value, LANE_TYPE, lane_value, lane_index) \
@@ -345,57 +261,6 @@ bool ExpectFused(TestExecutionTier tier) {
lane_index, WASM_LOCAL_GET(value))), \
WASM_RETURN1(WASM_ZERO))
-// The macro below disables tests lowering for certain nodes where the simd
-// lowering doesn't work correctly. Early return here if the CPU does not
-// support SIMD as the graph will be implicitly lowered in that case.
-#define WASM_SIMD_TEST_NO_LOWERING(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- TestExecutionTier execution_tier); \
- TEST(RunWasm_##name##_turbofan) { \
- if (!CpuFeatures::SupportsWasmSimd128()) return; \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
- } \
- TEST(RunWasm_##name##_liftoff) { \
- if (!CpuFeatures::SupportsWasmSimd128()) return; \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
- } \
- TEST(RunWasm_##name##_interpreter) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- TestExecutionTier execution_tier)
-
-// Returns true if the platform can represent the result.
-template <typename T>
-bool PlatformCanRepresent(T x) {
-#if V8_TARGET_ARCH_ARM
- return std::fpclassify(x) != FP_SUBNORMAL;
-#else
- return true;
-#endif
-}
-
-// Returns true for very small and very large numbers. We skip these test
-// values for the approximation instructions, which don't work at the extremes.
-bool IsExtreme(float x) {
- float abs_x = std::fabs(x);
- const float kSmallFloatThreshold = 1.0e-32f;
- const float kLargeFloatThreshold = 1.0e32f;
- return abs_x != 0.0f && // 0 or -0 are fine.
- (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
-}
-
-#if V8_OS_AIX
-template <typename T>
-bool MightReverseSign(T float_op) {
- return float_op == static_cast<T>(Negate) ||
- float_op == static_cast<T>(std::abs);
-}
-#endif
-
WASM_SIMD_TEST(S128Globals) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input and output vectors.
@@ -488,113 +353,6 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
}
}
-bool IsSameNan(float expected, float actual) {
- // Sign is non-deterministic.
- uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
- uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x00400000) == actual_bits);
-}
-
-bool IsCanonical(float actual) {
- uint32_t actual_bits = bit_cast<uint32_t>(actual);
- // Canonical NaN has quiet bit and no payload.
- return (actual_bits & 0xFFC00000) == actual_bits;
-}
-
-void CheckFloatResult(float x, float y, float expected, float actual,
- bool exact = true) {
- if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
- if (std::isnan(x) && IsSameNan(x, actual)) return;
- if (std::isnan(y) && IsSameNan(y, actual)) return;
- if (IsSameNan(expected, actual)) return;
- if (IsCanonical(actual)) return;
- // This is expected to assert; it's useful for debugging.
- CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
- } else {
- if (exact) {
- CHECK_EQ(expected, actual);
- // The sign of 0's must match.
- CHECK_EQ(std::signbit(expected), std::signbit(actual));
- return;
- }
- // Otherwise, perform an approximate equality test. First check for
- // equality to handle +/-Infinity where approximate equality doesn't work.
- if (expected == actual) return;
-
- // 1% error allows all platforms to pass easily.
- constexpr float kApproximationError = 0.01f;
- float abs_error = std::abs(expected) * kApproximationError,
- min = expected - abs_error, max = expected + abs_error;
- CHECK_LE(min, actual);
- CHECK_GE(max, actual);
- }
-}
-
-// Test some values not included in the float inputs from value_helper. These
-// tests are useful for opcodes that are synthesized during code gen, like Min
-// and Max on ia32 and x64.
-static constexpr uint32_t nan_test_array[] = {
- // Bit patterns of quiet NaNs and signaling NaNs, with or without
- // additional payload.
- 0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
- // NaN with top payload bit unset.
- 0x7FA00000,
- // Both Infinities.
- 0x7F800000, 0xFF800000,
- // Some "normal" numbers, 1 and -1.
- 0x3F800000, 0xBF800000};
-
-#define FOR_FLOAT32_NAN_INPUTS(i) \
- for (size_t i = 0; i < arraysize(nan_test_array); ++i)
-
-void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, FloatUnOp expected_op,
- bool exact = true) {
- WasmRunner<int32_t, float> r(execution_tier, lower_simd);
- // Global to hold output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- float expected = expected_op(x);
-#if V8_OS_AIX
- if (!MightReverseSign<FloatUnOp>(expected_op))
- expected = FpOpWorkaround<float>(x, expected);
-#endif
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, x, expected, actual, exact);
- }
- }
-
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- float expected = expected_op(x);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, x, expected, actual, exact);
- }
- }
-}
-
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
}
@@ -607,18 +365,6 @@ WASM_SIMD_TEST(F32x4Sqrt) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, std::sqrt);
}
-WASM_SIMD_TEST(F32x4RecipApprox) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
- base::Recip, false /* !exact */);
-}
-
-WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
- base::RecipSqrt, false /* !exact */);
-}
-
WASM_SIMD_TEST(F32x4Ceil) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Ceil, ceilf, true);
}
@@ -636,54 +382,6 @@ WASM_SIMD_TEST(F32x4NearestInt) {
true);
}
-void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, FloatBinOp expected_op) {
- WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
- // Global to hold output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- float expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_NAN_INPUTS(j) {
- float y = bit_cast<float>(nan_test_array[j]);
- if (!PlatformCanRepresent(y)) continue;
- float expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-}
-
-#undef FOR_FLOAT32_NAN_INPUTS
-
WASM_SIMD_TEST(F32x4Add) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
}
@@ -711,37 +409,6 @@ WASM_SIMD_TEST(F32x4Pmax) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
}
-void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- FloatCompareOp expected_op) {
- WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- float diff = x - y; // Model comparison as subtraction.
- if (!PlatformCanRepresent(diff)) continue;
- r.Call(x, y);
- int32_t expected = expected_op(x, y);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(F32x4Eq) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
}
@@ -766,115 +433,6 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
-// TODO(v8:10983) Prototyping sign select.
-template <typename T>
-void RunSignSelect(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode signselect, WasmOpcode splat,
- std::array<int8_t, kSimd128Size> mask) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- T* output = r.builder().template AddGlobal<T>(kWasmS128);
-
- // Splat 2 constant values, then use a mask that selects alternate lanes.
- BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(splat), WASM_LOCAL_GET(1),
- WASM_SIMD_OP(splat), WASM_SIMD_CONSTANT(mask), WASM_SIMD_OP(signselect),
- kExprGlobalSet, 0, WASM_ONE);
-
- r.Call(1, 2);
-
- constexpr int lanes = kSimd128Size / sizeof(T);
- for (int i = 0; i < lanes; i += 2) {
- CHECK_EQ(1, ReadLittleEndianValue<T>(&output[i]));
- }
- for (int i = 1; i < lanes; i += 2) {
- CHECK_EQ(2, ReadLittleEndianValue<T>(&output[i]));
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I8x16SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0x80, 0, -1, 0, 0x80, 0, -1, 0,
- 0x80, 0, -1, 0, 0x80, 0, -1, 0};
- RunSignSelect<int8_t>(execution_tier, lower_simd, kExprI8x16SignSelect,
- kExprI8x16Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I16x8SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0x80, 0, 0, -1, -1, 0, 0,
- 0, 0x80, 0, 0, -1, -1, 0, 0};
- RunSignSelect<int16_t>(execution_tier, lower_simd, kExprI16x8SignSelect,
- kExprI16x8Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I32x4SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0x80, 0, 0, 0, 0,
- -1, -1, -1, -1, 0, 0, 0, 0};
- RunSignSelect<int32_t>(execution_tier, lower_simd, kExprI32x4SignSelect,
- kExprI32x4Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0, 0, 0, 0, 0x80,
- 0, 0, 0, 0, 0, 0, 0, 0};
- RunSignSelect<int64_t>(execution_tier, lower_simd, kExprI64x2SignSelect,
- kExprI64x2Splat, mask);
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM
-
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<float> x : qfma_vector<float>()) {
- r.Call(x.a, x.b, x.c);
- float expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<float> x : qfms_vector<float>()) {
- r.Call(x.a, x.b, x.c);
- float expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
WASM_SIMD_TEST(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -925,68 +483,15 @@ WASM_SIMD_TEST(I64x2ReplaceLane) {
}
}
-void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64UnOp expected_op) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Neg) {
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
base::NegateWithWraparound);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Abs) {
+WASM_SIMD_TEST(I64x2Abs) {
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Abs, std::abs);
}
-void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64ShiftOp expected_op) {
- // Intentionally shift by 64, should be no-op.
- for (int shift = 1; shift <= 64; shift++) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
- int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x, shift);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Shl) {
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
LogicalShiftLeft);
@@ -1002,32 +507,6 @@ WASM_SIMD_TEST(I64x2ShrU) {
LogicalShiftRight);
}
-void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64BinOp expected_op) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_INT64_INPUTS(x) {
- FOR_INT64_INPUTS(y) {
- r.Call(x, y);
- int64_t expected = expected_op(x, y);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Add) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
base::AddWithWraparound);
@@ -1038,27 +517,27 @@ WASM_SIMD_TEST(I64x2Sub) {
base::SubWithWraparound);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
+WASM_SIMD_TEST(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+WASM_SIMD_TEST(I64x2Ne) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+WASM_SIMD_TEST(I64x2LtS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+WASM_SIMD_TEST(I64x2LeS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+WASM_SIMD_TEST(I64x2GtS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+WASM_SIMD_TEST(I64x2GeS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
}
@@ -1149,124 +628,6 @@ WASM_SIMD_TEST(I64x2ExtractWithF64x2) {
CHECK_EQ(1, r.Call());
}
-bool IsExtreme(double x) {
- double abs_x = std::fabs(x);
- const double kSmallFloatThreshold = 1.0e-298;
- const double kLargeFloatThreshold = 1.0e298;
- return abs_x != 0.0f && // 0 or -0 are fine.
- (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
-}
-
-bool IsSameNan(double expected, double actual) {
- // Sign is non-deterministic.
- uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
- uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x0008000000000000) == actual_bits);
-}
-
-bool IsCanonical(double actual) {
- uint64_t actual_bits = bit_cast<uint64_t>(actual);
- // Canonical NaN has quiet bit and no payload.
- return (actual_bits & 0xFFF8000000000000) == actual_bits;
-}
-
-void CheckDoubleResult(double x, double y, double expected, double actual,
- bool exact = true) {
- if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
- if (std::isnan(x) && IsSameNan(x, actual)) return;
- if (std::isnan(y) && IsSameNan(y, actual)) return;
- if (IsSameNan(expected, actual)) return;
- if (IsCanonical(actual)) return;
- // This is expected to assert; it's useful for debugging.
- CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
- } else {
- if (exact) {
- CHECK_EQ(expected, actual);
- // The sign of 0's must match.
- CHECK_EQ(std::signbit(expected), std::signbit(actual));
- return;
- }
- // Otherwise, perform an approximate equality test. First check for
- // equality to handle +/-Infinity where approximate equality doesn't work.
- if (expected == actual) return;
-
- // 1% error allows all platforms to pass easily.
- constexpr double kApproximationError = 0.01f;
- double abs_error = std::abs(expected) * kApproximationError,
- min = expected - abs_error, max = expected + abs_error;
- CHECK_LE(min, actual);
- CHECK_GE(max, actual);
- }
-}
-
-// Test some values not included in the double inputs from value_helper. These
-// tests are useful for opcodes that are synthesized during code gen, like Min
-// and Max on ia32 and x64.
-static constexpr uint64_t double_nan_test_array[] = {
- // quiet NaNs, + and -
- 0x7FF8000000000001, 0xFFF8000000000001,
- // with payload
- 0x7FF8000000000011, 0xFFF8000000000011,
- // signaling NaNs, + and -
- 0x7FF0000000000001, 0xFFF0000000000001,
- // with payload
- 0x7FF0000000000011, 0xFFF0000000000011,
- // Both Infinities.
- 0x7FF0000000000000, 0xFFF0000000000000,
- // Some "normal" numbers, 1 and -1.
- 0x3FF0000000000000, 0xBFF0000000000000};
-
-#define FOR_FLOAT64_NAN_INPUTS(i) \
- for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
-
-void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, DoubleUnOp expected_op,
- bool exact = true) {
- WasmRunner<int32_t, double> r(execution_tier, lower_simd);
- // Global to hold output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- double expected = expected_op(x);
-#if V8_OS_AIX
- if (!MightReverseSign<DoubleUnOp>(expected_op))
- expected = FpOpWorkaround<double>(x, expected);
-#endif
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, x, expected, actual, exact);
- }
- }
-
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- double expected = expected_op(x);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, x, expected, actual, exact);
- }
- }
-}
-
WASM_SIMD_TEST(F64x2Abs) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
}
@@ -1301,12 +662,15 @@ void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode opcode) {
WasmRunner<int32_t, SrcType> r(execution_tier, lower_simd);
double* g = r.builder().template AddGlobal<double>(kWasmS128);
- // TODO(zhin): set top lanes to 0 to assert conversion happens on low lanes.
- BUILD(
- r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(opcode, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0,
+ WASM_SIMD_UNOP(
+ opcode,
+ // Set top lane of i64x2 == set top 2 lanes of i32x4.
+ WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)), WASM_ZERO64))),
+ WASM_ONE);
for (SrcType x : compiler::ValueHelper::GetVector<SrcType>()) {
r.Call(x);
@@ -1318,12 +682,12 @@ void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
}
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4S) {
+WASM_SIMD_TEST(F64x2ConvertLowI32x4S) {
RunF64x2ConvertLowI32x4Test<int32_t>(execution_tier, lower_simd,
kExprF64x2ConvertLowI32x4S);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4U) {
+WASM_SIMD_TEST(F64x2ConvertLowI32x4U) {
RunF64x2ConvertLowI32x4Test<uint32_t>(execution_tier, lower_simd,
kExprF64x2ConvertLowI32x4U);
}
@@ -1353,17 +717,17 @@ void RunI32x4TruncSatF64x2Test(TestExecutionTier execution_tier,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2SZero) {
+WASM_SIMD_TEST(I32x4TruncSatF64x2SZero) {
RunI32x4TruncSatF64x2Test<int32_t>(execution_tier, lower_simd,
kExprI32x4TruncSatF64x2SZero);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2UZero) {
+WASM_SIMD_TEST(I32x4TruncSatF64x2UZero) {
RunI32x4TruncSatF64x2Test<uint32_t>(execution_tier, lower_simd,
kExprI32x4TruncSatF64x2UZero);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
+WASM_SIMD_TEST(F32x4DemoteF64x2Zero) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
float* g = r.builder().AddGlobal<float>(kWasmS128);
BUILD(r,
@@ -1386,7 +750,7 @@ WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
}
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
+WASM_SIMD_TEST(F64x2PromoteLowF32x4) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
double* g = r.builder().AddGlobal<double>(kWasmS128);
BUILD(r,
@@ -1405,53 +769,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
}
}
-void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, DoubleBinOp expected_op) {
- WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
- // Global to hold output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test value, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_INPUTS(y) {
- if (!PlatformCanRepresent(x)) continue;
- double expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_NAN_INPUTS(j) {
- double y = bit_cast<double>(double_nan_test_array[j]);
- double expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-}
-
-#undef FOR_FLOAT64_NAN_INPUTS
-
WASM_SIMD_TEST(F64x2Add) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
}
@@ -1476,42 +793,6 @@ WASM_SIMD_TEST(F64x2Pmax) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
}
-void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- DoubleCompareOp expected_op) {
- WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- // Make the lanes of each temp compare differently:
- // temp1 = y, x and temp2 = y, y.
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp1,
- WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- double diff = x - y; // Model comparison as subtraction.
- if (!PlatformCanRepresent(diff)) continue;
- r.Call(x, y);
- int64_t expected0 = expected_op(x, y);
- int64_t expected1 = expected_op(y, y);
- CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
- CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
- }
- }
-}
-
WASM_SIMD_TEST(F64x2Eq) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
}
@@ -1549,58 +830,6 @@ WASM_SIMD_TEST(I64x2Mul) {
base::MulWithWraparound);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<double> x : qfma_vector<double>()) {
- r.Call(x.a, x.b, x.c);
- double expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<double> x : qfms_vector<double>()) {
- r.Call(x.a, x.b, x.c);
- double expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -1758,7 +987,7 @@ WASM_SIMD_TEST(I32x4BitMask) {
}
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2BitMask) {
+WASM_SIMD_TEST(I64x2BitMask) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
@@ -1928,7 +1157,7 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
}
// Tests both signed and unsigned conversion from I32x4 (unpacking).
-WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
+WASM_SIMD_TEST(I64x2ConvertI32x4) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
@@ -1963,27 +1192,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
}
}
-void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT32_INPUTS(x) {
- r.Call(x);
- int32_t expected = expected_op(x);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I32x4Neg) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
base::NegateWithWraparound);
@@ -2001,74 +1209,56 @@ WASM_SIMD_TEST(S128Not) {
template <typename Narrow, typename Wide>
void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
- WasmOpcode splat) {
+ WasmOpcode splat, Shuffle interleaving_shuffle) {
constexpr int num_lanes = kSimd128Size / sizeof(Wide);
- WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
+ WasmRunner<int32_t, Narrow, Narrow> r(execution_tier, lower_simd);
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
- // TODO(v8:11086) We splat the same value, so pairwise adding ends up adding
- // the same value to itself, consider a more complicated test, like having 2
- // vectors, and shuffling them.
- BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(splat),
+ BUILD(r,
+ WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, interleaving_shuffle,
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1))),
WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
- for (Narrow x : compiler::ValueHelper::GetVector<Narrow>()) {
- r.Call(x);
- Wide expected = AddLong<Wide>(x, x);
+ auto v = compiler::ValueHelper::GetVector<Narrow>();
+ // Iterate vector from both ends to try and splat two different values.
+ for (auto i = v.begin(), j = v.end() - 1; i < v.end(); i++, j--) {
+ r.Call(*i, *j);
+ Wide expected = AddLong<Wide>(*i, *j);
for (int i = 0; i < num_lanes; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<Wide>(&g[i]));
}
}
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8S) {
- RunExtAddPairwiseTest<int16_t, int32_t>(execution_tier, lower_simd,
- kExprI32x4ExtAddPairwiseI16x8S,
- kExprI16x8Splat);
-}
+// interleave even lanes from one input and odd lanes from another.
+constexpr Shuffle interleave_16x8_shuffle = {0, 1, 18, 19, 4, 5, 22, 23,
+ 8, 9, 26, 27, 12, 13, 30, 31};
+constexpr Shuffle interleave_8x16_shuffle = {0, 17, 2, 19, 4, 21, 6, 23,
+ 8, 25, 10, 27, 12, 29, 14, 31};
-WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8U) {
- RunExtAddPairwiseTest<uint16_t, uint32_t>(execution_tier, lower_simd,
- kExprI32x4ExtAddPairwiseI16x8U,
- kExprI16x8Splat);
+WASM_SIMD_TEST(I32x4ExtAddPairwiseI16x8S) {
+ RunExtAddPairwiseTest<int16_t, int32_t>(
+ execution_tier, lower_simd, kExprI32x4ExtAddPairwiseI16x8S,
+ kExprI16x8Splat, interleave_16x8_shuffle);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16S) {
- RunExtAddPairwiseTest<int8_t, int16_t>(execution_tier, lower_simd,
- kExprI16x8ExtAddPairwiseI8x16S,
- kExprI8x16Splat);
+WASM_SIMD_TEST(I32x4ExtAddPairwiseI16x8U) {
+ RunExtAddPairwiseTest<uint16_t, uint32_t>(
+ execution_tier, lower_simd, kExprI32x4ExtAddPairwiseI16x8U,
+ kExprI16x8Splat, interleave_16x8_shuffle);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
- RunExtAddPairwiseTest<uint8_t, uint16_t>(execution_tier, lower_simd,
- kExprI16x8ExtAddPairwiseI8x16U,
- kExprI8x16Splat);
+WASM_SIMD_TEST(I16x8ExtAddPairwiseI8x16S) {
+ RunExtAddPairwiseTest<int8_t, int16_t>(
+ execution_tier, lower_simd, kExprI16x8ExtAddPairwiseI8x16S,
+ kExprI8x16Splat, interleave_8x16_shuffle);
}
-void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_INT32_INPUTS(x) {
- FOR_INT32_INPUTS(y) {
- r.Call(x, y);
- int32_t expected = expected_op(x, y);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
- }
+WASM_SIMD_TEST(I16x8ExtAddPairwiseI8x16U) {
+ RunExtAddPairwiseTest<uint8_t, uint16_t>(
+ execution_tier, lower_simd, kExprI16x8ExtAddPairwiseI8x16U,
+ kExprI8x16Splat, interleave_8x16_shuffle);
}
WASM_SIMD_TEST(I32x4Add) {
@@ -2167,38 +1357,6 @@ WASM_SIMD_TEST(I32x4GeU) {
UnsignedGreaterEqual);
}
-void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32ShiftOp expected_op) {
- // Intentionally shift by 32, should be no-op.
- for (int shift = 1; shift <= 32; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
- int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT32_INPUTS(x) {
- r.Call(x);
- int32_t expected = expected_op(x, shift);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
@@ -2278,27 +1436,6 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
}
}
-void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int16UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT16_INPUTS(x) {
- r.Call(x);
- int16_t expected = expected_op(x);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Neg) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
base::NegateWithWraparound);
@@ -2308,33 +1445,6 @@ WASM_SIMD_TEST(I16x8Abs) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Abs, Abs);
}
-template <typename T = int16_t, typename OpType = T (*)(T, T)>
-void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, OpType expected_op) {
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- // Global to hold output.
- T* g = r.builder().template AddGlobal<T>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- for (T x : compiler::ValueHelper::GetVector<T>()) {
- for (T y : compiler::ValueHelper::GetVector<T>()) {
- r.Call(x, y);
- T expected = expected_op(x, y);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Add) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
base::AddWithWraparound);
@@ -2369,13 +1479,13 @@ WASM_SIMD_TEST(I16x8MaxS) {
}
WASM_SIMD_TEST(I16x8AddSatU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatU,
- SaturateAdd<uint16_t>);
+ RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd, kExprI16x8AddSatU,
+ SaturateAdd<uint16_t>);
}
WASM_SIMD_TEST(I16x8SubSatU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatU,
- SaturateSub<uint16_t>);
+ RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd, kExprI16x8SubSatU,
+ SaturateSub<uint16_t>);
}
WASM_SIMD_TEST(I16x8MinU) {
@@ -2433,10 +1543,10 @@ WASM_SIMD_TEST(I16x8LeU) {
WASM_SIMD_TEST(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
kExprI16x8RoundingAverageU,
- base::RoundingAverageUnsigned);
+ RoundingAverageUnsigned);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
+WASM_SIMD_TEST(I16x8Q15MulRSatS) {
RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
SaturateRoundingQMul<int16_t>);
}
@@ -2576,38 +1686,6 @@ WASM_SIMD_TEST(I32x4DotI16x8S) {
}
}
-void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int16ShiftOp expected_op) {
- // Intentionally shift by 16, should be no-op.
- for (int shift = 1; shift <= 16; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
- int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT16_INPUTS(x) {
- r.Call(x);
- int16_t expected = expected_op(x, shift);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
@@ -2623,27 +1701,6 @@ WASM_SIMD_TEST(I16x8ShrU) {
LogicalShiftRight);
}
-void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int8UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT8_INPUTS(x) {
- r.Call(x);
- int8_t expected = expected_op(x);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
base::NegateWithWraparound);
@@ -2653,7 +1710,7 @@ WASM_SIMD_TEST(I8x16Abs) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
+WASM_SIMD_TEST(I8x16Popcnt) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
@@ -2703,33 +1760,6 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
}
}
-template <typename T = int8_t, typename OpType = T (*)(T, T)>
-void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, OpType expected_op) {
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- // Global to hold output.
- T* g = r.builder().template AddGlobal<T>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- for (T x : compiler::ValueHelper::GetVector<T>()) {
- for (T y : compiler::ValueHelper::GetVector<T>()) {
- r.Call(x, y);
- T expected = expected_op(x, y);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I8x16Add) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
base::AddWithWraparound);
@@ -2759,13 +1789,13 @@ WASM_SIMD_TEST(I8x16MaxS) {
}
WASM_SIMD_TEST(I8x16AddSatU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatU,
- SaturateAdd<uint8_t>);
+ RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd, kExprI8x16AddSatU,
+ SaturateAdd<uint8_t>);
}
WASM_SIMD_TEST(I8x16SubSatU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatU,
- SaturateSub<uint8_t>);
+ RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd, kExprI8x16SubSatU,
+ SaturateSub<uint8_t>);
}
WASM_SIMD_TEST(I8x16MinU) {
@@ -2820,48 +1850,10 @@ WASM_SIMD_TEST(I8x16LeU) {
UnsignedLessEqual);
}
-WASM_SIMD_TEST(I8x16Mul) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
- base::MulWithWraparound);
-}
-
WASM_SIMD_TEST(I8x16RoundingAverageU) {
RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd,
kExprI8x16RoundingAverageU,
- base::RoundingAverageUnsigned);
-}
-
-void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int8ShiftOp expected_op) {
- // Intentionally shift by 8, should be no-op.
- for (int shift = 1; shift <= 8; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
- int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT8_INPUTS(x) {
- r.Call(x);
- int8_t expected = expected_op(x, shift);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
- }
- }
- }
+ RoundingAverageUnsigned);
}
WASM_SIMD_TEST(I8x16Shl) {
@@ -2990,27 +1982,6 @@ void RunBinaryLaneOpTest(
}
}
-WASM_SIMD_TEST(I32x4AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0 1 2 3] and [4 5 6 7].
- RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
- {{1, 5, 9, 13}});
-}
-
-WASM_SIMD_TEST(I16x8AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
- RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
- {{1, 5, 9, 13, 17, 21, 25, 29}});
-}
-
-WASM_SIMD_TEST(F32x4AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
- RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
- {{1.0f, 5.0f, 9.0f, 13.0f}});
-}
-
// Test shuffle ops.
void RunShuffleOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op,
@@ -3077,7 +2048,6 @@ enum ShuffleKey {
kNumShuffleKeys
};
-using Shuffle = std::array<int8_t, kSimd128Size>;
using ShuffleMap = std::map<ShuffleKey, const Shuffle>;
ShuffleMap test_shuffles = {
@@ -3256,6 +2226,30 @@ WASM_SIMD_TEST(I8x16Swizzle) {
CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
}
}
+
+ {
+ // We have an optimization for constant indices, test this case.
+ for (SwizzleTestArgs si : swizzle_test_vector) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
+ WASM_SIMD_CONSTANT(si.indices))),
+ WASM_ONE);
+
+ for (int i = 0; i < kSimd128Size; i++) {
+ WriteLittleEndianValue<uint8_t>(&src0[i], si.input[i]);
+ }
+
+ CHECK_EQ(1, r.Call());
+
+ for (int i = 0; i < kSimd128Size; i++) {
+ CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
+ }
+ }
+ }
}
// Combine 3 shuffles a, b, and c by applying both a and b and then applying c
@@ -3370,7 +2364,6 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
@@ -3392,14 +2385,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
@@ -3423,14 +2416,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
@@ -3698,60 +2691,6 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-// TODO(v8:11168): Prototyping prefetch.
-WASM_SIMD_TEST(SimdPrefetch) {
- FLAG_SCOPE(wasm_simd_post_mvp);
-
- {
- // Test PrefetchT.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchT), ZERO_ALIGNMENT,
- ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-
- {
- // Test PrefetchNT.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchNT), ZERO_ALIGNMENT,
- ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-
- {
- // Test OOB.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
-
- // Prefetch kWasmPageSize+1 but still load from 0.
- BUILD(r, WASM_I32V(kWasmPageSize + 1), WASM_SIMD_OP(kExprPrefetchNT),
- ZERO_ALIGNMENT, ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-
WASM_SIMD_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
int32_t* memory =
@@ -4181,22 +3120,22 @@ void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load8Lane) {
+WASM_SIMD_TEST(S128Load8Lane) {
RunLoadLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Load8Lane,
kExprI8x16Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load16Lane) {
+WASM_SIMD_TEST(S128Load16Lane) {
RunLoadLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Load16Lane,
kExprI16x8Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load32Lane) {
+WASM_SIMD_TEST(S128Load32Lane) {
RunLoadLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Lane,
kExprI32x4Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
+WASM_SIMD_TEST(S128Load64Lane) {
RunLoadLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Lane,
kExprI64x2Splat);
}
@@ -4274,29 +3213,28 @@ void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store8Lane) {
+WASM_SIMD_TEST(S128Store8Lane) {
RunStoreLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Store8Lane,
kExprI8x16Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store16Lane) {
+WASM_SIMD_TEST(S128Store16Lane) {
RunStoreLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Store16Lane,
kExprI16x8Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store32Lane) {
+WASM_SIMD_TEST(S128Store32Lane) {
RunStoreLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Store32Lane,
kExprI32x4Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
+WASM_SIMD_TEST(S128Store64Lane) {
RunStoreLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Store64Lane,
kExprI64x2Splat);
}
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -4325,15 +3263,14 @@ WASM_SIMD_TEST(V128AnytrueWithNegativeZero) {
}
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
- WASM_SIMD_TEST(V##format##AllTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
+ WASM_SIMD_TEST(I##format##AllTrue) { \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
- WASM_SIMD_UNOP(kExprV##format##AllTrue, WASM_LOCAL_GET(simd))); \
+ WASM_SIMD_UNOP(kExprI##format##AllTrue, WASM_LOCAL_GET(simd))); \
CHECK_EQ(1, r.Call(max)); \
CHECK_EQ(1, r.Call(0x1)); \
CHECK_EQ(0, r.Call(0)); \
@@ -4406,28 +3343,6 @@ WASM_SIMD_TEST(S128ConstAllOnes) {
RunSimdConstTest(execution_tier, lower_simd, expected);
}
-void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- Int8BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
-
- CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
- r.Call(0xff, 0x7fff));
- CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
- r.Call(0xfe, 0x7fff));
- CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
- r.Call(0xff, 0x7ffe));
-}
-
WASM_SIMD_TEST(I8x16LeUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
UnsignedLessEqual);
@@ -4445,28 +3360,6 @@ WASM_SIMD_TEST(I8x16GtUMixed) {
UnsignedGreater);
}
-void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- Int16BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
-
- CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xffff, 0x7fffffff));
- CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xfeff, 0x7fffffff));
- CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
- r.Call(0xffff, 0x7ffffeff));
-}
-
WASM_SIMD_TEST(I16x8LeUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
UnsignedLessEqual);
@@ -4571,7 +3464,7 @@ WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
#undef WASM_SIMD_SELECT_TEST
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
#undef WASM_SIMD_BOOL_REDUCTION_TEST
-#undef WASM_SIMD_TEST_NO_LOWERING
+#undef WASM_SIMD_TEST
#undef WASM_SIMD_ANYTRUE_TEST
#undef WASM_SIMD_ALLTRUE_TEST
#undef WASM_SIMD_F64x2_QFMA
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 49995028324..250820ed19a 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -3083,6 +3083,47 @@ WASM_EXEC_TEST(CallIndirect_canonical) {
CHECK_TRAP(r.Call(5));
}
+WASM_EXEC_TEST(Regress_PushReturns) {
+ ValueType kSigTypes[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(12, 0, kSigTypes);
+ WasmRunner<int32_t> r(execution_tier);
+
+ WasmFunctionCompiler& f1 = r.NewFunction(&sig);
+ BUILD(f1, WASM_I32V(1), WASM_I32V(2), WASM_I32V(3), WASM_I32V(4),
+ WASM_I32V(5), WASM_I32V(6), WASM_I32V(7), WASM_I32V(8), WASM_I32V(9),
+ WASM_I32V(10), WASM_I32V(11), WASM_I32V(12));
+
+ BUILD(r, WASM_CALL_FUNCTION0(f1.function_index()), WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP);
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(Regress_EnsureArguments) {
+ ValueType kSigTypes[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(0, 12, kSigTypes);
+ WasmRunner<int32_t> r(execution_tier);
+
+ WasmFunctionCompiler& f2 = r.NewFunction(&sig);
+ BUILD(f2, kExprReturn);
+
+ BUILD(r, WASM_I32V(42), kExprReturn,
+ WASM_CALL_FUNCTION(f2.function_index(), WASM_I32V(1)));
+ CHECK_EQ(42, r.Call());
+}
+
+WASM_EXEC_TEST(Regress_PushControl) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ WasmRunner<int32_t> r(execution_tier);
+ BUILD(r, WASM_I32V(42),
+ WASM_IF(WASM_I32V(0), WASM_UNREACHABLE, kExprIf, kVoidCode, kExprEnd));
+ CHECK_EQ(42, r.Call());
+}
+
WASM_EXEC_TEST(F32Floor) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_FLOOR(WASM_LOCAL_GET(0)));
@@ -3341,7 +3382,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
WasmRunner<void> r(tier);
- FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
+ FunctionSig* sig = sigs.many(r.zone(), kWasmVoid, param, num_params);
r.builder().AddSignature(sig);
r.builder().AddSignature(sig);
@@ -3821,6 +3862,18 @@ TEST(Regression_1085507) {
WASM_BLOCK_X(sig_v_i, kExprDrop), kExprElse, kExprEnd, WASM_I32V_1(0));
}
+TEST(Regression_1185323_1185492) {
+ WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
+ r.builder().AddIndirectFunctionTable(nullptr, 1);
+ BUILD(r, WASM_I32V_1(0),
+ // Use a long leb128 encoding of kExprTableSize instruction.
+ // This exercises a bug in the interpreter which tries to read the
+ // immediate at pc+2 (it should be pc+4).
+ kNumericPrefix, 0x90, 0x80, 0x00, 0x00, // table.size 0.
+ WASM_UNREACHABLE, kExprTableSet, 0x00); // Hits a DCHECK if reached.
+ r.Call();
+}
+
#undef B1
#undef B2
#undef RET
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 92ad2050704..639bc663360 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -393,7 +393,7 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
// Add an invalid global to the module. The decoder will fail there.
- builder.AddGlobal(kWasmStmt, true, WasmInitExpr::GlobalGet(12));
+ builder.AddGlobal(kWasmVoid, true, WasmInitExpr::GlobalGet(12));
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
uint8_t code[] = {kExprLocalGet, 0, kExprEnd};
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 156bfb55acf..9c2edfe75f0 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -237,7 +237,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
CHECK_EQ(expected.locals.size(), num_locals);
for (int i = 0; i < num_locals; ++i) {
WasmValue local_value = debug_info->GetLocalValue(
- i, frame->pc(), frame->fp(), frame->callee_fp());
+ i, frame->pc(), frame->fp(), frame->callee_fp(), isolate_);
CHECK_EQ(WasmValWrapper{expected.locals[i]}, WasmValWrapper{local_value});
}
@@ -245,7 +245,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
CHECK_EQ(expected.stack.size(), stack_depth);
for (int i = 0; i < stack_depth; ++i) {
WasmValue stack_value = debug_info->GetStackValue(
- i, frame->pc(), frame->fp(), frame->callee_fp());
+ i, frame->pc(), frame->fp(), frame->callee_fp(), isolate_);
CHECK_EQ(WasmValWrapper{expected.stack[i]}, WasmValWrapper{stack_value});
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index 2c49e2de81f..b55b971b718 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -7,6 +7,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-metrics.h"
#include "src/api/api-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "test/cctest/cctest.h"
#include "test/common/wasm/flag-utils.h"
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index e23a549ddd8..79ba524ffb3 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -205,8 +205,8 @@ TEST(DeserializeWithSourceUrl) {
const std::string url = "http://example.com/example.wasm";
Handle<WasmModuleObject> module_object;
CHECK(test.Deserialize(VectorOf(url)).ToHandle(&module_object));
- String source_url = String::cast(module_object->script().source_url());
- CHECK_EQ(url, source_url.ToCString().get());
+ String url_str = String::cast(module_object->script().name());
+ CHECK_EQ(url, url_str.ToCString().get());
}
test.CollectGarbage();
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 9faab4479eb..5e59f13c3f2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -74,13 +74,21 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// Line and column are 1-based in v8::StackFrame, just as in ExceptionInfo.
CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
CHECK_EQ(excInfos[frameNr].column, frame->GetColumn());
+ v8::Local<v8::String> scriptSource = frame->GetScriptSource();
+ if (frame->IsWasm()) {
+ CHECK(scriptSource.IsEmpty());
+ } else {
+ CHECK(scriptSource->IsString());
+ }
}
- CheckComputeLocation(i_isolate, exc, excInfos[0]);
+ CheckComputeLocation(i_isolate, exc, excInfos[0],
+ stack->GetFrame(v8_isolate, 0));
}
void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
- const ExceptionInfo& topLocation) {
+ const ExceptionInfo& topLocation,
+ const v8::Local<v8::StackFrame> stackFrame) {
MessageLocation loc;
CHECK(i_isolate->ComputeLocationFromStackTrace(&loc, exc));
printf("loc start: %d, end: %d\n", loc.start_pos(), loc.end_pos());
@@ -97,6 +105,13 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// whether Script::PositionInfo.column should be the offset
// relative to the module or relative to the function.
// CHECK_EQ(topLocation.column - 1, message->GetColumnNumber());
+ String scriptSource = message->GetSource();
+ CHECK(scriptSource.IsString());
+ if (stackFrame->IsWasm()) {
+ CHECK_EQ(scriptSource.length(), 0);
+ } else {
+ CHECK_GT(scriptSource.length(), 0);
+ }
}
#undef CHECK_CSTREQ
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 82f7824315a..163e7b87995 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -10,6 +10,7 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
@@ -359,16 +360,18 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
const byte* start, const byte* end) {
WasmFeatures unused_detected_features;
FunctionBody body(sig, 0, start, end);
+ std::vector<compiler::WasmLoopInfo> loops;
DecodeResult result =
BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, nullptr);
+ &unused_detected_features, body, &loops, nullptr);
if (result.failed()) {
#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
// Retry the compilation with the tracing flag on, to help in debugging.
FLAG_trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr,
- builder, &unused_detected_features, body, nullptr);
+ result =
+ BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
+ &unused_detected_features, body, &loops, nullptr);
}
#endif
@@ -376,9 +379,6 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
result.error().message().c_str());
}
builder->LowerInt64(compiler::WasmGraphBuilder::kCalledFromWasm);
- if (!CpuFeatures::SupportsWasmSimd128()) {
- builder->SimdScalarLoweringForTesting();
- }
}
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
@@ -481,8 +481,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
for (size_t i = 0; i < num_params + 1; i++) {
rep_builder.AddParam(MachineRepresentation::kWord32);
}
- compiler::Int64Lowering r(graph(), machine(), common(), zone(),
- rep_builder.Build());
+ compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
+ zone(), rep_builder.Build());
r.LowerGraph();
}
@@ -561,8 +561,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
DCHECK_NOT_NULL(code);
DisallowGarbageCollection no_gc;
Script script = builder_->instance_object()->module_object().script();
- std::unique_ptr<char[]> source_url =
- String::cast(script.source_url()).ToCString();
+ std::unique_ptr<char[]> source_url = String::cast(script.name()).ToCString();
if (WasmCode::ShouldBeLogged(isolate())) {
code->LogCode(isolate(), source_url.get(), script.id());
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index f873390283d..e4b0868e47c 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -531,6 +531,19 @@ class WasmRunnerBase : public InitializedHandleScope {
static bool trap_happened;
};
+template <typename T>
+inline WasmValue WasmValueInitializer(T value) {
+ return WasmValue(value);
+}
+template <>
+inline WasmValue WasmValueInitializer(int8_t value) {
+ return WasmValue(static_cast<int32_t>(value));
+}
+template <>
+inline WasmValue WasmValueInitializer(int16_t value) {
+ return WasmValue(static_cast<int32_t>(value));
+}
+
template <typename ReturnType, typename... ParamTypes>
class WasmRunner : public WasmRunnerBase {
public:
@@ -557,6 +570,11 @@ class WasmRunner : public WasmRunnerBase {
lower_simd) {}
ReturnType Call(ParamTypes... p) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ // Save the original context, because CEntry (for runtime calls) will
+ // reset / invalidate it when returning.
+ SaveContext save_context(isolate);
+
DCHECK(compiled_);
if (interpret()) return CallInterpreter(p...);
@@ -586,7 +604,7 @@ class WasmRunner : public WasmRunnerBase {
ReturnType CallInterpreter(ParamTypes... p) {
interpreter()->Reset();
- std::array<WasmValue, sizeof...(p)> args{{WasmValue(p)...}};
+ std::array<WasmValue, sizeof...(p)> args{{WasmValueInitializer(p)...}};
interpreter()->InitFrame(function(), args.data());
interpreter()->Run();
CHECK_GT(interpreter()->NumInterpretedCalls(), 0);
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
new file mode 100644
index 00000000000..64a3e63aaac
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
@@ -0,0 +1,752 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/wasm/wasm-simd-utils.h"
+
+#include <cmath>
+
+#include "src/base/logging.h"
+#include "src/base/memory.h"
+#include "src/common/globals.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT8_INPUTS(x) {
+ r.Call(x);
+ int8_t expected = expected_op(x);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
+ }
+ }
+}
+
+template <typename T, typename OpType>
+void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op) {
+ WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
+ // Global to hold output.
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ for (T x : compiler::ValueHelper::GetVector<T>()) {
+ for (T y : compiler::ValueHelper::GetVector<T>()) {
+ r.Call(x, y);
+ T expected = expected_op(x, y);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
+ }
+ }
+ }
+}
+
+// Explicit instantiations of uses.
+template void RunI8x16BinOpTest<int8_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Int8BinOp);
+
+template void RunI8x16BinOpTest<uint8_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Uint8BinOp);
+
+void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8ShiftOp expected_op) {
+ // Intentionally shift by 8, should be no-op.
+ for (int shift = 1; shift <= 8; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
+ int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT8_INPUTS(x) {
+ r.Call(x);
+ int8_t expected = expected_op(x, shift);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int8BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ byte temp3 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xff, 0x7fff));
+ CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xfe, 0x7fff));
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
+ r.Call(0xff, 0x7ffe));
+}
+
+void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT16_INPUTS(x) {
+ r.Call(x);
+ int16_t expected = expected_op(x);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
+ }
+ }
+}
+
+template <typename T, typename OpType>
+void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op) {
+ WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
+ // Global to hold output.
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ for (T x : compiler::ValueHelper::GetVector<T>()) {
+ for (T y : compiler::ValueHelper::GetVector<T>()) {
+ r.Call(x, y);
+ T expected = expected_op(x, y);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
+ }
+ }
+ }
+}
+
+// Explicit instantiations of uses.
+template void RunI16x8BinOpTest<int16_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Int16BinOp);
+template void RunI16x8BinOpTest<uint16_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Uint16BinOp);
+
+void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16ShiftOp expected_op) {
+ // Intentionally shift by 16, should be no-op.
+ for (int shift = 1; shift <= 16; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
+ int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT16_INPUTS(x) {
+ r.Call(x);
+ int16_t expected = expected_op(x, shift);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int16BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ byte temp3 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xffff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xfeff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
+ r.Call(0xffff, 0x7ffffeff));
+}
+
+void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT32_INPUTS(x) {
+ r.Call(x);
+ int32_t expected = expected_op(x);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+}
+
+void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
+ r.Call(x, y);
+ int32_t expected = expected_op(x, y);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+ }
+}
+
+void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32ShiftOp expected_op) {
+ // Intentionally shift by 32, should be no-op.
+ for (int shift = 1; shift <= 32; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
+ int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT32_INPUTS(x) {
+ r.Call(x);
+ int32_t expected = expected_op(x, shift);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+}
+
+void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op) {
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
+ r.Call(x, y);
+ int64_t expected = expected_op(x, y);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op) {
+ // Intentionally shift by 64, should be no-op.
+ for (int shift = 1; shift <= 64; shift++) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x, shift);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+bool IsExtreme(float x) {
+ float abs_x = std::fabs(x);
+ const float kSmallFloatThreshold = 1.0e-32f;
+ const float kLargeFloatThreshold = 1.0e32f;
+ return abs_x != 0.0f && // 0 or -0 are fine.
+ (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
+}
+
+bool IsSameNan(float expected, float actual) {
+ // Sign is non-deterministic.
+ uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
+ uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x00400000) == actual_bits);
+}
+
+bool IsCanonical(float actual) {
+ uint32_t actual_bits = bit_cast<uint32_t>(actual);
+ // Canonical NaN has quiet bit and no payload.
+ return (actual_bits & 0xFFC00000) == actual_bits;
+}
+
+void CheckFloatResult(float x, float y, float expected, float actual,
+ bool exact) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ if (std::isnan(x) && IsSameNan(x, actual)) return;
+ if (std::isnan(y) && IsSameNan(y, actual)) return;
+ if (IsSameNan(expected, actual)) return;
+ if (IsCanonical(actual)) return;
+ // This is expected to assert; it's useful for debugging.
+ CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
+ } else {
+ if (exact) {
+ CHECK_EQ(expected, actual);
+ // The sign of 0's must match.
+ CHECK_EQ(std::signbit(expected), std::signbit(actual));
+ return;
+ }
+ // Otherwise, perform an approximate equality test. First check for
+ // equality to handle +/-Infinity where approximate equality doesn't work.
+ if (expected == actual) return;
+
+ // 1% error allows all platforms to pass easily.
+ constexpr float kApproximationError = 0.01f;
+ float abs_error = std::abs(expected) * kApproximationError,
+ min = expected - abs_error, max = expected + abs_error;
+ CHECK_LE(min, actual);
+ CHECK_GE(max, actual);
+ }
+}
+
+void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatUnOp expected_op, bool exact) {
+ WasmRunner<int32_t, float> r(execution_tier, lower_simd);
+ // Global to hold output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ float expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<FloatUnOp>(expected_op))
+ expected = FpOpWorkaround<float>(x, expected);
+#endif
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, expected, actual, exact);
+ }
+ }
+
+ FOR_FLOAT32_NAN_INPUTS(i) {
+ float x = bit_cast<float>(nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ float expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, expected, actual, exact);
+ }
+ }
+}
+
+void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatBinOp expected_op) {
+ WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
+ // Global to hold output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ float expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+
+ FOR_FLOAT32_NAN_INPUTS(i) {
+ float x = bit_cast<float>(nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_NAN_INPUTS(j) {
+ float y = bit_cast<float>(nan_test_array[j]);
+ if (!PlatformCanRepresent(y)) continue;
+ float expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+}
+
+void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ FloatCompareOp expected_op) {
+ WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ float diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(x, y);
+ int32_t expected = expected_op(x, y);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+ }
+}
+
+bool IsExtreme(double x) {
+ double abs_x = std::fabs(x);
+ const double kSmallFloatThreshold = 1.0e-298;
+ const double kLargeFloatThreshold = 1.0e298;
+ return abs_x != 0.0f && // 0 or -0 are fine.
+ (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
+}
+
+bool IsSameNan(double expected, double actual) {
+ // Sign is non-deterministic.
+ uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
+ uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x0008000000000000) == actual_bits);
+}
+
+bool IsCanonical(double actual) {
+ uint64_t actual_bits = bit_cast<uint64_t>(actual);
+ // Canonical NaN has quiet bit and no payload.
+ return (actual_bits & 0xFFF8000000000000) == actual_bits;
+}
+
+void CheckDoubleResult(double x, double y, double expected, double actual,
+ bool exact) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ if (std::isnan(x) && IsSameNan(x, actual)) return;
+ if (std::isnan(y) && IsSameNan(y, actual)) return;
+ if (IsSameNan(expected, actual)) return;
+ if (IsCanonical(actual)) return;
+ // This is expected to assert; it's useful for debugging.
+ CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
+ } else {
+ if (exact) {
+ CHECK_EQ(expected, actual);
+ // The sign of 0's must match.
+ CHECK_EQ(std::signbit(expected), std::signbit(actual));
+ return;
+ }
+ // Otherwise, perform an approximate equality test. First check for
+ // equality to handle +/-Infinity where approximate equality doesn't work.
+ if (expected == actual) return;
+
+ // 1% error allows all platforms to pass easily.
+ constexpr double kApproximationError = 0.01f;
+ double abs_error = std::abs(expected) * kApproximationError,
+ min = expected - abs_error, max = expected + abs_error;
+ CHECK_LE(min, actual);
+ CHECK_GE(max, actual);
+ }
+}
+
+void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleUnOp expected_op, bool exact) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<DoubleUnOp>(expected_op))
+ expected = FpOpWorkaround<double>(x, expected);
+#endif
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+}
+
+void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleBinOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(x)) continue;
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_NAN_INPUTS(j) {
+ double y = bit_cast<double>(double_nan_test_array[j]);
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+}
+
+void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ DoubleCompareOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ // Make the lanes of each temp compare differently:
+ // temp1 = y, x and temp2 = y, y.
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp1,
+ WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ double diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(x, y);
+ int64_t expected0 = expected_op(x, y);
+ int64_t expected1 = expected_op(y, y);
+ CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
+ CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.h b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
new file mode 100644
index 00000000000..157731df27b
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
@@ -0,0 +1,177 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/base/macros.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using Int8UnOp = int8_t (*)(int8_t);
+using Int8BinOp = int8_t (*)(int8_t, int8_t);
+using Uint8BinOp = uint8_t (*)(uint8_t, uint8_t);
+using Int8CompareOp = int (*)(int8_t, int8_t);
+using Int8ShiftOp = int8_t (*)(int8_t, int);
+
+using Int16UnOp = int16_t (*)(int16_t);
+using Int16BinOp = int16_t (*)(int16_t, int16_t);
+using Uint16BinOp = uint16_t (*)(uint16_t, uint16_t);
+using Int16ShiftOp = int16_t (*)(int16_t, int);
+using Int32UnOp = int32_t (*)(int32_t);
+using Int32BinOp = int32_t (*)(int32_t, int32_t);
+using Int32ShiftOp = int32_t (*)(int32_t, int);
+using Int64UnOp = int64_t (*)(int64_t);
+using Int64BinOp = int64_t (*)(int64_t, int64_t);
+using Int64ShiftOp = int64_t (*)(int64_t, int);
+using FloatUnOp = float (*)(float);
+using FloatBinOp = float (*)(float, float);
+using FloatCompareOp = int32_t (*)(float, float);
+using DoubleUnOp = double (*)(double);
+using DoubleBinOp = double (*)(double, double);
+using DoubleCompareOp = int64_t (*)(double, double);
+
+void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8UnOp expected_op);
+
+template <typename T = int8_t, typename OpType = T (*)(T, T)>
+void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op);
+
+void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8ShiftOp expected_op);
+void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int8BinOp expected_op);
+
+void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16UnOp expected_op);
+template <typename T = int16_t, typename OpType = T (*)(T, T)>
+void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op);
+void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16ShiftOp expected_op);
+void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int16BinOp expected_op);
+
+void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32UnOp expected_op);
+void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32BinOp expected_op);
+void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32ShiftOp expected_op);
+
+void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op);
+void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op);
+void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op);
+
+// Generic expected value functions.
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
+T Negate(T a) {
+ return -a;
+}
+
+#if V8_OS_AIX
+template <typename T>
+bool MightReverseSign(T float_op) {
+ return float_op == static_cast<T>(Negate) ||
+ float_op == static_cast<T>(std::abs);
+}
+#endif
+
+// Test some values not included in the float inputs from value_helper. These
+// tests are useful for opcodes that are synthesized during code gen, like Min
+// and Max on ia32 and x64.
+static constexpr uint32_t nan_test_array[] = {
+ // Bit patterns of quiet NaNs and signaling NaNs, with or without
+ // additional payload.
+ 0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
+ // NaN with top payload bit unset.
+ 0x7FA00000,
+ // Both Infinities.
+ 0x7F800000, 0xFF800000,
+ // Some "normal" numbers, 1 and -1.
+ 0x3F800000, 0xBF800000};
+
+#define FOR_FLOAT32_NAN_INPUTS(i) \
+ for (size_t i = 0; i < arraysize(nan_test_array); ++i)
+
+// Test some values not included in the double inputs from value_helper. These
+// tests are useful for opcodes that are synthesized during code gen, like Min
+// and Max on ia32 and x64.
+static constexpr uint64_t double_nan_test_array[] = {
+ // quiet NaNs, + and -
+ 0x7FF8000000000001, 0xFFF8000000000001,
+ // with payload
+ 0x7FF8000000000011, 0xFFF8000000000011,
+ // signaling NaNs, + and -
+ 0x7FF0000000000001, 0xFFF0000000000001,
+ // with payload
+ 0x7FF0000000000011, 0xFFF0000000000011,
+ // Both Infinities.
+ 0x7FF0000000000000, 0xFFF0000000000000,
+ // Some "normal" numbers, 1 and -1.
+ 0x3FF0000000000000, 0xBFF0000000000000};
+
+#define FOR_FLOAT64_NAN_INPUTS(i) \
+ for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
+
+// Returns true if the platform can represent the result.
+template <typename T>
+bool PlatformCanRepresent(T x) {
+#if V8_TARGET_ARCH_ARM
+ return std::fpclassify(x) != FP_SUBNORMAL;
+#else
+ return true;
+#endif
+}
+
+// Returns true for very small and very large numbers. We skip these test
+// values for the approximation instructions, which don't work at the extremes.
+bool IsExtreme(float x);
+bool IsSameNan(float expected, float actual);
+bool IsCanonical(float actual);
+void CheckFloatResult(float x, float y, float expected, float actual,
+ bool exact = true);
+
+bool IsExtreme(double x);
+bool IsSameNan(double expected, double actual);
+bool IsCanonical(double actual);
+void CheckDoubleResult(double x, double y, double expected, double actual,
+ bool exact = true);
+
+void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatUnOp expected_op,
+ bool exact = true);
+
+void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatBinOp expected_op);
+
+void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ FloatCompareOp expected_op);
+
+void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleUnOp expected_op,
+ bool exact = true);
+void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleBinOp expected_op);
+void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ DoubleCompareOp expected_op);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index fb1a1fcddf1..ba021366cb5 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -121,8 +121,8 @@ class TestSignatures {
FunctionSig* iii_v() { return &sig_iii_v; }
FunctionSig* many(Zone* zone, ValueType ret, ValueType param, int count) {
- FunctionSig::Builder builder(zone, ret == kWasmStmt ? 0 : 1, count);
- if (ret != kWasmStmt) builder.AddReturn(ret);
+ FunctionSig::Builder builder(zone, ret == kWasmVoid ? 0 : 1, count);
+ if (ret != kWasmVoid) builder.AddReturn(ret);
for (int i = 0; i < count; i++) {
builder.AddParam(param);
}
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index 4a4d08524a4..9f7217699b6 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -894,8 +894,14 @@ class SideTable : public ZoneObject {
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
- CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
- imm.out_arity());
+ int target_stack_height = stack_height - imm.in_arity();
+ if (target_stack_height < 0) {
+ // Allowed in unreachable code, but the stack height stays at 0.
+ DCHECK(unreachable);
+ target_stack_height = 0;
+ }
+ CLabel* end_label = CLabel::New(&control_transfer_zone,
+ target_stack_height, imm.out_arity());
CLabel* catch_label =
CLabel::New(&control_transfer_zone, stack_height, 0);
control_stack.emplace_back(i.pc(), end_label, catch_label,
@@ -940,6 +946,7 @@ class SideTable : public ZoneObject {
TRACE("control @%u: End\n", i.pc_offset());
// Only loops have bound labels.
DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
+ bool rethrow = false;
if (!c->end_label->target) {
if (c->else_label) {
if (*c->pc == kExprIf) {
@@ -948,30 +955,34 @@ class SideTable : public ZoneObject {
} else if (!exception_stack.empty()) {
// No catch_all block, prepare for implicit rethrow.
DCHECK_EQ(*c->pc, kExprTry);
- Control* next_try_block =
- &control_stack[exception_stack.back()];
constexpr int kUnusedControlIndex = -1;
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
kUnusedControlIndex);
- if (!unreachable) {
- next_try_block->else_label->Ref(
- i.pc(), c->else_label->target_stack_height);
- }
+ DCHECK_IMPLIES(
+ !unreachable,
+ stack_height >= c->else_label->target_stack_height);
+ stack_height = c->else_label->target_stack_height;
+ rethrow = !unreachable;
}
} else if (c->unwind) {
DCHECK_EQ(*c->pc, kExprTry);
rethrow_map_.emplace(i.pc() - i.start(),
static_cast<int>(control_stack.size()) - 1);
if (!exception_stack.empty()) {
- Control* next_try_block =
- &control_stack[exception_stack.back()];
- if (!unreachable) {
- next_try_block->else_label->Ref(i.pc(), stack_height);
- }
+ rethrow = !unreachable;
}
}
c->end_label->Bind(i.pc() + 1);
}
+ if (rethrow) {
+ Control* next_try_block = &control_stack[exception_stack.back()];
+ next_try_block->else_label->Ref(i.pc(), stack_height);
+ // We normally update the max stack height before the switch.
+ // However 'end' is not in the list of throwing opcodes so we don't
+ // take into account that it may unpack an exception.
+ max_stack_height_ =
+ std::max(max_stack_height_, stack_height + max_exception_arity);
+ }
c->Finish(&map_, code->start);
DCHECK_IMPLIES(!unreachable,
@@ -1345,25 +1356,23 @@ class WasmInterpreterInternals {
StackValue(WasmValue v, WasmInterpreterInternals* impl, sp_t index)
: value_(v) {
if (IsReferenceValue()) {
- value_ = WasmValue(Handle<Object>::null());
+ value_ = WasmValue(Handle<Object>::null(), value_.type());
int ref_index = static_cast<int>(index);
- impl->reference_stack_->set(ref_index, *v.to_externref());
+ impl->reference_stack_->set(ref_index, *v.to_ref());
}
}
WasmValue ExtractValue(WasmInterpreterInternals* impl, sp_t index) {
if (!IsReferenceValue()) return value_;
- DCHECK(value_.to_externref().is_null());
+ DCHECK(value_.to_ref().is_null());
int ref_index = static_cast<int>(index);
Isolate* isolate = impl->isolate_;
Handle<Object> ref(impl->reference_stack_->get(ref_index), isolate);
DCHECK(!ref->IsTheHole(isolate));
- return WasmValue(ref);
+ return WasmValue(ref, value_.type());
}
- bool IsReferenceValue() const {
- return value_.type().is_reference_to(HeapType::kExtern);
- }
+ bool IsReferenceValue() const { return value_.type().is_reference(); }
void ClearValue(WasmInterpreterInternals* impl, sp_t index) {
if (!IsReferenceValue()) return;
@@ -1433,13 +1442,13 @@ class WasmInterpreterInternals {
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kOptRef: {
- val = WasmValue(isolate_->factory()->null_value());
+ val = WasmValue(isolate_->factory()->null_value(), p);
break;
}
case kRef: // TODO(7748): Implement.
case kRtt:
case kRttWithDepth:
- case kStmt:
+ case kVoid:
case kBottom:
case kI8:
case kI16:
@@ -1823,7 +1832,7 @@ class WasmInterpreterInternals {
return true;
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
@@ -1848,7 +1857,7 @@ class WasmInterpreterInternals {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments);
@@ -1858,7 +1867,7 @@ class WasmInterpreterInternals {
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
uint64_t size = ToMemType(Pop());
uint64_t src = ToMemType(Pop());
@@ -1877,7 +1886,7 @@ class WasmInterpreterInternals {
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
uint64_t size = ToMemType(Pop());
uint32_t value = Pop().to<uint32_t>();
@@ -1892,7 +1901,7 @@ class WasmInterpreterInternals {
}
case kExprTableInit: {
TableInitImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
@@ -1906,14 +1915,14 @@ class WasmInterpreterInternals {
}
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
instance_object_->dropped_elem_segments()[imm.index] = 1;
return true;
}
case kExprTableCopy: {
TableCopyImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
@@ -1927,13 +1936,13 @@ class WasmInterpreterInternals {
}
case kExprTableGrow: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
isolate_);
auto delta = Pop().to<uint32_t>();
- auto value = Pop().to_externref();
+ auto value = Pop().to_ref();
int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
Push(WasmValue(result));
*len += imm.length;
@@ -1941,7 +1950,7 @@ class WasmInterpreterInternals {
}
case kExprTableSize: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -1953,10 +1962,10 @@ class WasmInterpreterInternals {
}
case kExprTableFill: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto count = Pop().to<uint32_t>();
- auto value = Pop().to_externref();
+ auto value = Pop().to_ref();
auto start = Pop().to<uint32_t>();
auto table = handle(
@@ -2391,12 +2400,11 @@ class WasmInterpreterInternals {
BINOP_CASE(I16x8SubSatS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
BINOP_CASE(I16x8SubSatU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
- base::RoundingAverageUnsigned<uint16_t>(a, b))
+ RoundingAverageUnsigned<uint16_t>(a, b))
BINOP_CASE(I16x8Q15MulRSatS, i16x8, int8, 8,
SaturateRoundingQMul<int16_t>(a, b))
BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
BINOP_CASE(I8x16MinU, i8x16, int16, 16,
static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
@@ -2408,7 +2416,7 @@ class WasmInterpreterInternals {
BINOP_CASE(I8x16SubSatS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
BINOP_CASE(I8x16SubSatU, i8x16, int16, 16, SaturateSub<uint8_t>(a, b))
BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
- base::RoundingAverageUnsigned<uint8_t>(a, b))
+ RoundingAverageUnsigned<uint8_t>(a, b))
#undef BINOP_CASE
#define UNOP_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
@@ -2753,28 +2761,6 @@ class WasmInterpreterInternals {
Push(WasmValue(Simd128(res)));
return true;
}
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result1); \
- res.val[LANE(i, res)] = result1; \
- auto result2 = s2.val[LANE(i * 2, s2)] + s2.val[LANE(i * 2 + 1, s2)]; \
- possible_nondeterminism_ |= has_nondeterminism(result2); \
- res.val[LANE(i + count / 2, res)] = result2; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
- ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
- ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
-#undef ADD_HORIZ_CASE
case kExprI32x4DotI16x8S: {
int8 v2 = Pop().to_s128().to_i16x8();
int8 v1 = Pop().to_s128().to_i16x8();
@@ -2844,10 +2830,10 @@ class WasmInterpreterInternals {
Push(WasmValue(res)); \
return true; \
}
- REDUCTION_CASE(V64x2AllTrue, i64x2, int2, 2, &)
- REDUCTION_CASE(V32x4AllTrue, i32x4, int4, 4, &)
- REDUCTION_CASE(V16x8AllTrue, i16x8, int8, 8, &)
- REDUCTION_CASE(V8x16AllTrue, i8x16, int16, 16, &)
+ REDUCTION_CASE(I64x2AllTrue, i64x2, int2, 2, &)
+ REDUCTION_CASE(I32x4AllTrue, i32x4, int4, 4, &)
+ REDUCTION_CASE(I16x8AllTrue, i16x8, int8, 8, &)
+ REDUCTION_CASE(I8x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
#define QFM_CASE(op, name, stype, count, operation) \
case kExpr##op: { \
@@ -2947,18 +2933,6 @@ class WasmInterpreterInternals {
return DoSimdStoreLane<int2, int64_t, int64_t>(
decoder, code, pc, len, MachineRepresentation::kWord64);
}
- case kExprI8x16SignSelect: {
- return DoSimdSignSelect<int16>();
- }
- case kExprI16x8SignSelect: {
- return DoSimdSignSelect<int8>();
- }
- case kExprI32x4SignSelect: {
- return DoSimdSignSelect<int4>();
- }
- case kExprI64x2SignSelect: {
- return DoSimdSignSelect<int2>();
- }
case kExprI32x4ExtAddPairwiseI16x8S: {
return DoSimdExtAddPairwise<int4, int8, int32_t, int16_t>();
}
@@ -2971,16 +2945,6 @@ class WasmInterpreterInternals {
case kExprI16x8ExtAddPairwiseI8x16U: {
return DoSimdExtAddPairwise<int8, int16, uint16_t, uint8_t>();
}
- case kExprPrefetchT:
- case kExprPrefetchNT: {
- // Max alignment doesn't matter, use an arbitrary value.
- MemoryAccessImmediate<Decoder::kNoValidation> imm(
- decoder, code->at(pc + *len), 4, module()->is_memory64);
- // Pop address and do nothing.
- Pop().to<uint32_t>();
- *len += imm.length;
- return true;
- }
default:
return false;
}
@@ -3071,7 +3035,8 @@ class WasmInterpreterInternals {
SimdLaneImmediate<Decoder::kNoValidation> lane_imm(
decoder, code->at(pc + *len + imm.length));
- Push(WasmValue(value.val[LANE(lane_imm.lane, value)]));
+ Push(WasmValue(
+ static_cast<result_type>(value.val[LANE(lane_imm.lane, value)])));
// ExecuteStore will update the len, so pass it unchanged here.
if (!ExecuteStore<result_type, load_type>(decoder, code, pc, len, rep,
@@ -3101,21 +3066,6 @@ class WasmInterpreterInternals {
return true;
}
- template <typename s_type>
- bool DoSimdSignSelect() {
- constexpr int lanes = kSimd128Size / sizeof(s_type::val[0]);
- auto c = Pop().to_s128().to<s_type>();
- auto v2 = Pop().to_s128().to<s_type>();
- auto v1 = Pop().to_s128().to<s_type>();
- s_type res;
- for (int i = 0; i < lanes; ++i) {
- res.val[LANE(i, res)] =
- c.val[LANE(i, c)] < 0 ? v1.val[LANE(i, v1)] : v2.val[LANE(i, v2)];
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
-
template <typename DstSimdType, typename SrcSimdType, typename Wide,
typename Narrow>
bool DoSimdExtAddPairwise() {
@@ -3156,6 +3106,9 @@ class WasmInterpreterInternals {
// it to 0 here such that we report the same position as in compiled code.
frames_.back().pc = 0;
isolate_->StackOverflow();
+ if (FLAG_experimental_wasm_eh) {
+ possible_nondeterminism_ = true;
+ }
if (HandleException(isolate_) == WasmInterpreter::HANDLED) {
ReloadFromFrameOnException(decoder, target, pc, limit);
return true;
@@ -3234,8 +3187,8 @@ class WasmInterpreterInternals {
case HeapType::kExtern:
case HeapType::kFunc:
case HeapType::kAny: {
- Handle<Object> externref = value.to_externref();
- encoded_values->set(encoded_index++, *externref);
+ Handle<Object> ref = value.to_ref();
+ encoded_values->set(encoded_index++, *ref);
break;
}
case HeapType::kBottom:
@@ -3254,7 +3207,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3356,9 +3309,9 @@ class WasmInterpreterInternals {
case HeapType::kExtern:
case HeapType::kFunc:
case HeapType::kAny: {
- Handle<Object> externref(encoded_values->get(encoded_index++),
- isolate_);
- value = WasmValue(externref);
+ Handle<Object> ref(encoded_values->get(encoded_index++),
+ isolate_);
+ value = WasmValue(ref, sig->GetParam(i));
break;
}
default:
@@ -3372,7 +3325,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3605,7 +3558,8 @@ class WasmInterpreterInternals {
HeapTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
len = 1 + imm.length;
- Push(WasmValue(isolate_->factory()->null_value()));
+ Push(WasmValue(isolate_->factory()->null_value(),
+ ValueType::Ref(imm.type, kNullable)));
break;
}
case kExprRefFunc: {
@@ -3616,7 +3570,7 @@ class WasmInterpreterInternals {
Handle<WasmExternalFunction> function =
WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate_, instance_object_, imm.index);
- Push(WasmValue(function));
+ Push(WasmValue(function, kWasmFuncRef));
len = 1 + imm.length;
break;
}
@@ -3762,7 +3716,7 @@ class WasmInterpreterInternals {
std::tie(global_buffer, global_index) =
WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
global);
- Handle<Object> ref = Pop().to_externref();
+ Handle<Object> ref = Pop().to_ref();
global_buffer->set(global_index, *ref);
break;
}
@@ -3770,7 +3724,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3791,7 +3745,7 @@ class WasmInterpreterInternals {
}
Handle<Object> value =
WasmTableObject::Get(isolate_, table, entry_index);
- Push(WasmValue(value));
+ Push(WasmValue(value, table->type()));
len = 1 + imm.length;
break;
}
@@ -3803,7 +3757,7 @@ class WasmInterpreterInternals {
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
isolate_);
uint32_t table_size = table->current_length();
- Handle<Object> value = Pop().to_externref();
+ Handle<Object> value = Pop().to_ref();
uint32_t entry_index = Pop().to<uint32_t>();
if (entry_index >= table_size) {
return DoTrap(kTrapTableOutOfBounds, pc);
@@ -3953,7 +3907,7 @@ class WasmInterpreterInternals {
case kExprRefIsNull: {
len = 1;
HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t result = Pop().to_externref()->IsNull() ? 1 : 0;
+ uint32_t result = Pop().to_ref()->IsNull() ? 1 : 0;
Push(WasmValue(result));
break;
}
@@ -4071,7 +4025,9 @@ class WasmInterpreterInternals {
}
void Push(WasmValue val) {
- DCHECK_NE(kWasmStmt, val.type());
+ DCHECK_NE(kWasmVoid, val.type());
+ DCHECK_NE(kWasmI8, val.type());
+ DCHECK_NE(kWasmI16, val.type());
DCHECK_LE(1, stack_limit_ - sp_);
DCHECK(StackValue::IsClearedValue(this, StackHeight()));
StackValue stack_value(val, this, StackHeight());
@@ -4083,7 +4039,7 @@ class WasmInterpreterInternals {
void Push(WasmValue* vals, size_t arity) {
DCHECK_LE(arity, stack_limit_ - sp_);
for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
- DCHECK_NE(kWasmStmt, val->type());
+ DCHECK_NE(kWasmVoid, val->type());
Push(*val);
}
}
@@ -4160,13 +4116,13 @@ class WasmInterpreterInternals {
PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
break;
}
- case kStmt:
+ case kVoid:
PrintF("void");
break;
case kRef:
case kOptRef: {
if (val.type().is_reference_to(HeapType::kExtern)) {
- Handle<Object> ref = val.to_externref();
+ Handle<Object> ref = val.to_ref();
if (ref->IsNull()) {
PrintF("ref:null");
} else {
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 7ddc32fc89f..ee51d6a0dd0 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -807,7 +807,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
//------------------------------------------------------------------------------
// Memory Operations.
//------------------------------------------------------------------------------
-#define WASM_GROW_MEMORY(x) x, kExprMemoryGrow, 0
+#define WASM_MEMORY_GROW(x) x, kExprMemoryGrow, 0
#define WASM_MEMORY_SIZE kExprMemorySize, 0
#define SIG_ENTRY_v_v kWasmFunctionTypeCode, 0, 0
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index c74d0ec56c7..770b320dfdf 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -66,14 +66,15 @@ OwnedVector<WasmValue> MakeDefaultInterpreterArguments(Isolate* isolate,
break;
case kOptRef:
arguments[i] =
- WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
+ WasmValue(Handle<Object>::cast(isolate->factory()->null_value()),
+ sig->GetParam(i));
break;
case kRef:
case kRtt:
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -108,7 +109,7 @@ OwnedVector<Handle<Object>> MakeDefaultArguments(Isolate* isolate,
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
diff --git a/deps/v8/test/debugger/debug/debug-break-class-fields.js b/deps/v8/test/debugger/debug/debug-break-class-fields.js
index b6b9c932356..02b6c3bb6b2 100644
--- a/deps/v8/test/debugger/debug/debug-break-class-fields.js
+++ b/deps/v8/test/debugger/debug/debug-break-class-fields.js
@@ -64,76 +64,82 @@ assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") === -1);
Debug.clearBreakPoint(b3);
assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") === -1);
+// The computed properties are evaluated during class construction,
+// not as part of the initializer function. As a consequence of which,
+// they aren't breakable here in the initializer function, but
+// instead, are part of the enclosing function.
+
function foo() {}
-var bar = "bar";
+var bar = 'bar';
class X {
[foo()] = 1;
- [bar] = 2;
baz = foo();
}
-// The computed properties are evaluated during class construction,
-// not as part of the initializer function. As a consequence of which,
-// they aren't breakable here in the initializer function, but
-// instead, are part of the enclosing function.
-//
// class X {
-// [foo()] = [B0]1;
-// [bar] = [B1]2;
-// [baz] = [B2]foo();
+// [foo()] = 1;
+// baz = [B0]foo();
// }
initializer = %GetInitializerFunction(X);
b1 = Debug.setBreakPoint(initializer, 0, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf('[foo()] = 1;') === 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
+b1 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf('baz = [B0]foo()') > 0);
+Debug.clearBreakPoint(b1);
+
+function t() {
+ class X {
+ [foo()] = 1;
+ [bar] = 2;
+ baz = foo();
+ }
+}
+
+// class X {
+// [[B0]foo()] = 1;
+// [[B1]bar] = 2;
+// baz = foo();
+// }
+
+b1 = Debug.setBreakPoint(t, 2, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
+
+b2 = Debug.setBreakPoint(t, 3, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') > 0);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = [B0]2;') === -1);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") > 0);
+b3 = Debug.setBreakPoint(t, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") === -1);
-b1 = Debug.setBreakPoint(initializer, 0, 0);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") > 0);
+b1 = Debug.setBreakPoint(t, 2, 0);
+b2 = Debug.setBreakPoint(t, 3, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B1]bar] = 2;') > 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B1]bar] = 2;') === -1);
-b1 = Debug.setBreakPoint(initializer, 0, 0);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+b1 = Debug.setBreakPoint(t, 2, 0);
+b3 = Debug.setBreakPoint(initializer, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+b2 = Debug.setBreakPoint(t, 3, 0);
+b3 = Debug.setBreakPoint(t, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') === -1);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
-
-function t() {
- class X {
- [foo()] = 1;
- }
-}
-
-b1 = Debug.setBreakPoint(t, 0, 0);
-assertTrue(Debug.showBreakPoints(t).indexOf("[[B0]foo()] = 1;")> 0);
-Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[[B0]foo()] = 1;") === -1);
diff --git a/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
index fb988bcb572..28329fdcee0 100644
--- a/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
@@ -34,7 +34,7 @@ function f() {
class Derived extends GetBase() {} // 0.
}
-var bp = Debug.setBreakPoint(f, 0);
+var bp = Debug.setBreakPoint(f, 1, 20);
f();
assertEquals(4, stepCount);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 5054dc53273..b862b3cad21 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -153,6 +153,12 @@
'debug/wasm/*': [SKIP],
}],
+##############################################################################
+# Tests requiring Sparkplug.
+['arch not in (x64, arm64, ia32, arm)', {
+ 'regress/regress-crbug-1199681': [SKIP],
+}],
+
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
diff --git a/deps/v8/test/debugger/regress/regress-crbug-1199681.js b/deps/v8/test/debugger/regress/regress-crbug-1199681.js
new file mode 100644
index 00000000000..211475250d8
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-crbug-1199681.js
@@ -0,0 +1,52 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --sparkplug --allow-natives-syntax
+
+function f() {
+ debugger;
+ return 1;
+}
+
+function g() {
+ return f(); // Break
+}
+
+function h() {
+ return g();
+}
+
+// Ensure FeedbackVector to consider f for inlining.
+%EnsureFeedbackVectorForFunction(f);
+%CompileBaseline(g);
+
+%PrepareFunctionForOptimization(h);
+h();
+h();
+
+var Debug = debug.Debug;
+var step_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (step_count == 0) {
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ } else {
+ assertTrue(exec_state.frame().sourceLineText().includes('Break'));
+ }
+ step_count++;
+ } catch (e) {
+ exception = e;
+ print(e);
+ }
+}
+
+Debug.setListener(listener);
+%OptimizeFunctionOnNextCall(h);
+h();
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(2, step_count);
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
index fa0743500fb..760e435bfa0 100644
--- a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
@@ -19,11 +19,11 @@ var func_a_idx =
builder.addFunction('wasm_B', kSig_v_i)
.addBody([
kExprLoop,
- kWasmStmt, // while
+ kWasmVoid, // while
kExprLocalGet,
0, // -
kExprIf,
- kWasmStmt, // if <param0> != 0
+ kWasmVoid, // if <param0> != 0
kExprLocalGet,
0, // -
kExprI32Const,
diff --git a/deps/v8/test/fuzzer/BUILD.gn b/deps/v8/test/fuzzer/BUILD.gn
index 7c837464c58..5bbe62d7063 100644
--- a/deps/v8/test/fuzzer/BUILD.gn
+++ b/deps/v8/test/fuzzer/BUILD.gn
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("../../gni/v8.gni")
+
group("v8_fuzzer") {
testonly = true
@@ -18,10 +20,15 @@ group("v8_fuzzer") {
"./parser/",
"./regexp/",
"./regexp_builtins/",
- "./multi_return/",
- "./wasm/",
- "./wasm_async/",
- "./wasm_code/",
- "./wasm_compile/",
]
+
+ if (v8_enable_webassembly) {
+ data += [
+ "./multi_return/",
+ "./wasm/",
+ "./wasm_async/",
+ "./wasm_code/",
+ "./wasm_compile/",
+ ]
+ }
}
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 06294b9f7e4..8b1523d4e0b 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -61,12 +61,14 @@ std::unique_ptr<FuzzerSupport> FuzzerSupport::fuzzer_support_;
// static
void FuzzerSupport::InitializeFuzzerSupport(int* argc, char*** argv) {
+#if V8_ENABLE_WEBASSEMBLY
if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
constexpr bool kUseDefaultTrapHandler = true;
if (!v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultTrapHandler)) {
FATAL("Could not register trap handler");
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_NULL(FuzzerSupport::fuzzer_support_);
FuzzerSupport::fuzzer_support_ =
std::make_unique<v8_fuzzer::FuzzerSupport>(argc, argv);
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index f865018cc52..4a8bc4d286c 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -5,14 +5,14 @@
[
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'multi_return/*': [SKIP],
'wasm/*': [SKIP],
'wasm_async/*': [SKIP],
'wasm_code/*': [SKIP],
'wasm_compile/*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
diff --git a/deps/v8/test/fuzzer/inspector-fuzzer.cc b/deps/v8/test/fuzzer/inspector-fuzzer.cc
index 7f09f92ae66..77e2402fa8a 100644
--- a/deps/v8/test/fuzzer/inspector-fuzzer.cc
+++ b/deps/v8/test/fuzzer/inspector-fuzzer.cc
@@ -245,10 +245,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
v8::FunctionTemplate::New(
isolate, &InspectorExtension::SetMaxAsyncTaskStacks));
inspector->Set(
- ToV8String(isolate, "dumpAsyncTaskStacksStateForTest"),
- v8::FunctionTemplate::New(
- isolate, &InspectorExtension::DumpAsyncTaskStacksStateForTest));
- inspector->Set(
ToV8String(isolate, "breakProgram"),
v8::FunctionTemplate::New(isolate, &InspectorExtension::BreakProgram));
inspector->Set(
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 4e8949412a8..b8af27a6b37 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -45,18 +45,14 @@ class AsyncFuzzerResolver : public i::wasm::CompilationResultResolver {
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// Set some more flags.
FLAG_wasm_async_compilation = true;
FLAG_wasm_max_mem_pages = 32;
FLAG_wasm_max_table_size = 100;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -68,6 +64,12 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
i::HandleScope internal_scope(i_isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ OneTimeEnableStagedWasmFeatures(isolate);
+
TryCatch try_catch(isolate);
testing::SetupIsolateForWasmModule(i_isolate);
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 6804cfa5c9a..663ef33a943 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -122,7 +122,7 @@ class WasmGenerator {
: gen_(gen), emit_end_(emit_end) {
gen->blocks_.emplace_back(br_types.begin(), br_types.end());
if (param_types.size() == 0 && result_types.size() == 0) {
- gen->builder_->EmitWithU8(block_type, kWasmStmt.value_type_code());
+ gen->builder_->EmitWithU8(block_type, kWasmVoid.value_type_code());
return;
}
if (param_types.size() == 0 && result_types.size() == 1) {
@@ -135,11 +135,11 @@ class WasmGenerator {
FunctionSig::Builder builder(zone, result_types.size(),
param_types.size());
for (auto& type : param_types) {
- DCHECK_NE(type, kWasmStmt);
+ DCHECK_NE(type, kWasmVoid);
builder.AddParam(type);
}
for (auto& type : result_types) {
- DCHECK_NE(type, kWasmStmt);
+ DCHECK_NE(type, kWasmVoid);
builder.AddReturn(type);
}
FunctionSig* sig = builder.Build();
@@ -199,10 +199,10 @@ class WasmGenerator {
template <ValueKind T, IfType type>
void if_(DataRange* data) {
- static_assert(T == kStmt || type == kIfElse,
+ static_assert(T == kVoid || type == kIfElse,
"if without else cannot produce a value");
if_({},
- T == kStmt ? Vector<ValueType>{} : VectorOf({ValueType::Primitive(T)}),
+ T == kVoid ? Vector<ValueType>{} : VectorOf({ValueType::Primitive(T)}),
type, data);
}
@@ -217,7 +217,7 @@ class WasmGenerator {
uint8_t delegate_target = data->get<uint8_t>() % (try_blocks_.size() + 1);
bool is_unwind = num_catch == 0 && !has_catch_all && !is_delegate;
- Vector<const ValueType> return_type_vec = return_type.kind() == kStmt
+ Vector<const ValueType> return_type_vec = return_type.kind() == kVoid
? Vector<ValueType>{}
: VectorOf(&return_type, 1);
BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec,
@@ -293,7 +293,7 @@ class WasmGenerator {
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void br_if(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
@@ -305,9 +305,9 @@ class WasmGenerator {
builder_->EmitWithI32V(
kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
ConsumeAndGenerate(break_types,
- wanted_type == kStmt
+ wanted_kind == kVoid
? Vector<ValueType>{}
- : VectorOf({ValueType::Primitive(wanted_type)}),
+ : VectorOf({ValueType::Primitive(wanted_kind)}),
data);
}
@@ -424,13 +424,13 @@ class WasmGenerator {
}
}
- template <WasmOpcode memory_op, ValueKind... arg_types>
+ template <WasmOpcode memory_op, ValueKind... arg_kinds>
void memop(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
const uint32_t offset = data->get<uint32_t>();
// Generate the index and the arguments, if any.
- Generate<kI32, arg_types...>(data);
+ Generate<kI32, arg_kinds...>(data);
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
@@ -496,14 +496,14 @@ class WasmGenerator {
enum CallDirect : bool { kCallDirect = true, kCallIndirect = false };
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void call(DataRange* data) {
- call(data, ValueType::Primitive(wanted_type), kCallDirect);
+ call(data, ValueType::Primitive(wanted_kind), kCallDirect);
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void call_indirect(DataRange* data) {
- call(data, ValueType::Primitive(wanted_type), kCallIndirect);
+ call(data, ValueType::Primitive(wanted_kind), kCallIndirect);
}
void Convert(ValueType src, ValueType dst) {
@@ -536,16 +536,16 @@ class WasmGenerator {
void ConvertOrGenerate(ValueType src, ValueType dst, DataRange* data) {
if (src == dst) return;
- if (src == kWasmStmt && dst != kWasmStmt) {
+ if (src == kWasmVoid && dst != kWasmVoid) {
Generate(dst, data);
- } else if (dst == kWasmStmt && src != kWasmStmt) {
+ } else if (dst == kWasmVoid && src != kWasmVoid) {
builder_->Emit(kExprDrop);
} else {
Convert(src, dst);
}
}
- void call(DataRange* data, ValueType wanted_type, CallDirect call_direct) {
+ void call(DataRange* data, ValueType wanted_kind, CallDirect call_direct) {
uint8_t random_byte = data->get<uint8_t>();
int func_index = random_byte % functions_.size();
uint32_t sig_index = functions_[func_index];
@@ -579,12 +579,12 @@ class WasmGenerator {
builder_->EmitByte(0); // Table index.
}
}
- if (sig->return_count() == 0 && wanted_type != kWasmStmt) {
+ if (sig->return_count() == 0 && wanted_kind != kWasmVoid) {
// The call did not generate a value. Thus just generate it here.
- Generate(wanted_type, data);
+ Generate(wanted_kind, data);
return;
}
- if (wanted_type == kWasmStmt) {
+ if (wanted_kind == kWasmVoid) {
// The call did generate values, but we did not want one.
for (size_t i = 0; i < sig->return_count(); ++i) {
builder_->Emit(kExprDrop);
@@ -593,16 +593,16 @@ class WasmGenerator {
}
auto return_types = VectorOf(sig->returns().begin(), sig->return_count());
auto wanted_types =
- VectorOf(&wanted_type, wanted_type == kWasmStmt ? 0 : 1);
+ VectorOf(&wanted_kind, wanted_kind == kWasmVoid ? 0 : 1);
ConsumeAndGenerate(return_types, wanted_types, data);
}
struct Var {
uint32_t index;
- ValueType type = kWasmStmt;
+ ValueType type = kWasmVoid;
Var() = default;
Var(uint32_t index, ValueType type) : index(index), type(type) {}
- bool is_valid() const { return type != kWasmStmt; }
+ bool is_valid() const { return type != kWasmVoid; }
};
Var GetRandomLocal(DataRange* data) {
@@ -616,34 +616,34 @@ class WasmGenerator {
return {index, type};
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void local_op(DataRange* data, WasmOpcode opcode) {
Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
if (!local.is_valid()) {
- if (wanted_type == kStmt) return;
- return Generate<wanted_type>(data);
+ if (wanted_kind == kVoid) return;
+ return Generate<wanted_kind>(data);
}
if (opcode != kExprLocalGet) Generate(local.type, data);
builder_->EmitWithU32V(opcode, local.index);
- if (wanted_type != kStmt && local.type.kind() != wanted_type) {
- Convert(local.type, ValueType::Primitive(wanted_type));
+ if (wanted_kind != kVoid && local.type.kind() != wanted_kind) {
+ Convert(local.type, ValueType::Primitive(wanted_kind));
}
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void get_local(DataRange* data) {
- static_assert(wanted_type != kStmt, "illegal type");
- local_op<wanted_type>(data, kExprLocalGet);
+ static_assert(wanted_kind != kVoid, "illegal type");
+ local_op<wanted_kind>(data, kExprLocalGet);
}
- void set_local(DataRange* data) { local_op<kStmt>(data, kExprLocalSet); }
+ void set_local(DataRange* data) { local_op<kVoid>(data, kExprLocalSet); }
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void tee_local(DataRange* data) {
- local_op<wanted_type>(data, kExprLocalTee);
+ local_op<wanted_kind>(data, kExprLocalTee);
}
template <size_t num_bytes>
@@ -669,42 +669,42 @@ class WasmGenerator {
return {index, type};
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void global_op(DataRange* data) {
- constexpr bool is_set = wanted_type == kStmt;
+ constexpr bool is_set = wanted_kind == kVoid;
Var global = GetRandomGlobal(data, is_set);
// If there are no globals, just generate any value (if a value is needed),
// or do nothing.
if (!global.is_valid()) {
- if (wanted_type == kStmt) return;
- return Generate<wanted_type>(data);
+ if (wanted_kind == kVoid) return;
+ return Generate<wanted_kind>(data);
}
if (is_set) Generate(global.type, data);
builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
global.index);
- if (!is_set && global.type.kind() != wanted_type) {
- Convert(global.type, ValueType::Primitive(wanted_type));
+ if (!is_set && global.type.kind() != wanted_kind) {
+ Convert(global.type, ValueType::Primitive(wanted_kind));
}
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void get_global(DataRange* data) {
- static_assert(wanted_type != kStmt, "illegal type");
- global_op<wanted_type>(data);
+ static_assert(wanted_kind != kVoid, "illegal type");
+ global_op<wanted_kind>(data);
}
- template <ValueKind select_type>
+ template <ValueKind select_kind>
void select_with_type(DataRange* data) {
- static_assert(select_type != kStmt, "illegal type for select");
- Generate<select_type, select_type, kI32>(data);
+ static_assert(select_kind != kVoid, "illegal kind for select");
+ Generate<select_kind, select_kind, kI32>(data);
// num_types is always 1.
uint8_t num_types = 1;
builder_->EmitWithU8U8(kExprSelectWithType, num_types,
- ValueType::Primitive(select_type).value_type_code());
+ ValueType::Primitive(select_kind).value_type_code());
}
- void set_global(DataRange* data) { global_op<kStmt>(data); }
+ void set_global(DataRange* data) { global_op<kVoid>(data); }
void throw_or_rethrow(DataRange* data) {
bool rethrow = data->get<uint8_t>() % 2;
@@ -822,31 +822,31 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::block<kStmt>(DataRange* data) {
+void WasmGenerator::block<kVoid>(DataRange* data) {
block({}, {}, data);
}
template <>
-void WasmGenerator::loop<kStmt>(DataRange* data) {
+void WasmGenerator::loop<kVoid>(DataRange* data) {
loop({}, {}, data);
}
template <>
-void WasmGenerator::Generate<kStmt>(DataRange* data) {
+void WasmGenerator::Generate<kVoid>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() == 0) return;
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kStmt, kStmt>,
- &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt>,
- &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt, kStmt, kStmt, kStmt,
- kStmt>,
- &WasmGenerator::block<kStmt>,
- &WasmGenerator::loop<kStmt>,
- &WasmGenerator::if_<kStmt, kIf>,
- &WasmGenerator::if_<kStmt, kIfElse>,
+ &WasmGenerator::sequence<kVoid, kVoid>,
+ &WasmGenerator::sequence<kVoid, kVoid, kVoid, kVoid>,
+ &WasmGenerator::sequence<kVoid, kVoid, kVoid, kVoid, kVoid, kVoid, kVoid,
+ kVoid>,
+ &WasmGenerator::block<kVoid>,
+ &WasmGenerator::loop<kVoid>,
+ &WasmGenerator::if_<kVoid, kIf>,
+ &WasmGenerator::if_<kVoid, kIfElse>,
&WasmGenerator::br,
- &WasmGenerator::br_if<kStmt>,
+ &WasmGenerator::br_if<kVoid>,
&WasmGenerator::memop<kExprI32StoreMem, kI32>,
&WasmGenerator::memop<kExprI32StoreMem8, kI32>,
@@ -872,13 +872,13 @@ void WasmGenerator::Generate<kStmt>(DataRange* data) {
&WasmGenerator::drop,
- &WasmGenerator::call<kStmt>,
- &WasmGenerator::call_indirect<kStmt>,
+ &WasmGenerator::call<kVoid>,
+ &WasmGenerator::call_indirect<kVoid>,
&WasmGenerator::set_local,
&WasmGenerator::set_global,
&WasmGenerator::throw_or_rethrow,
- &WasmGenerator::try_block<kStmt>};
+ &WasmGenerator::try_block<kVoid>};
GenerateOneOf(alternatives, data);
}
@@ -897,9 +897,9 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::i32_const<3>,
&WasmGenerator::i32_const<4>,
- &WasmGenerator::sequence<kI32, kStmt>,
- &WasmGenerator::sequence<kStmt, kI32>,
- &WasmGenerator::sequence<kStmt, kI32, kStmt>,
+ &WasmGenerator::sequence<kI32, kVoid>,
+ &WasmGenerator::sequence<kVoid, kI32>,
+ &WasmGenerator::sequence<kVoid, kI32, kVoid>,
&WasmGenerator::op<kExprI32Eqz, kI32>,
&WasmGenerator::op<kExprI32Eq, kI32, kI32>,
@@ -1001,13 +1001,13 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
kI32>,
&WasmGenerator::op_with_prefix<kExprV128AnyTrue, kS128>,
- &WasmGenerator::op_with_prefix<kExprV8x16AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV16x8AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI16x8BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV32x4AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI32x4BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV64x2AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI64x2BitMask, kS128>,
&WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneS, 16, kS128>,
&WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneU, 16, kS128>,
@@ -1049,9 +1049,9 @@ void WasmGenerator::Generate<kI64>(DataRange* data) {
&WasmGenerator::i64_const<7>,
&WasmGenerator::i64_const<8>,
- &WasmGenerator::sequence<kI64, kStmt>,
- &WasmGenerator::sequence<kStmt, kI64>,
- &WasmGenerator::sequence<kStmt, kI64, kStmt>,
+ &WasmGenerator::sequence<kI64, kVoid>,
+ &WasmGenerator::sequence<kVoid, kI64>,
+ &WasmGenerator::sequence<kVoid, kI64, kVoid>,
&WasmGenerator::op<kExprI64Add, kI64, kI64>,
&WasmGenerator::op<kExprI64Sub, kI64, kI64>,
@@ -1154,9 +1154,9 @@ void WasmGenerator::Generate<kF32>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kF32, kStmt>,
- &WasmGenerator::sequence<kStmt, kF32>,
- &WasmGenerator::sequence<kStmt, kF32, kStmt>,
+ &WasmGenerator::sequence<kF32, kVoid>,
+ &WasmGenerator::sequence<kVoid, kF32>,
+ &WasmGenerator::sequence<kVoid, kF32, kVoid>,
&WasmGenerator::op<kExprF32Abs, kF32>,
&WasmGenerator::op<kExprF32Neg, kF32>,
@@ -1211,9 +1211,9 @@ void WasmGenerator::Generate<kF64>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kF64, kStmt>,
- &WasmGenerator::sequence<kStmt, kF64>,
- &WasmGenerator::sequence<kStmt, kF64, kStmt>,
+ &WasmGenerator::sequence<kF64, kVoid>,
+ &WasmGenerator::sequence<kVoid, kF64>,
+ &WasmGenerator::sequence<kVoid, kF64, kVoid>,
&WasmGenerator::op<kExprF64Abs, kF64>,
&WasmGenerator::op<kExprF64Neg, kF64>,
@@ -1304,7 +1304,6 @@ void WasmGenerator::Generate<kS128>(DataRange* data) {
&WasmGenerator::op_with_prefix<kExprI8x16SubSatU, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MinS, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MinU, kS128, kS128>,
- // I8x16Mul is prototyped but not in the proposal, thus omitted here.
&WasmGenerator::op_with_prefix<kExprI8x16MaxS, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MaxU, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16RoundingAverageU, kS128, kS128>,
@@ -1513,8 +1512,8 @@ void WasmGenerator::grow_memory(DataRange* data) {
void WasmGenerator::Generate(ValueType type, DataRange* data) {
switch (type.kind()) {
- case kStmt:
- return Generate<kStmt>(data);
+ case kVoid:
+ return Generate<kVoid>(data);
case kI32:
return Generate<kI32>(data);
case kI64:
@@ -1555,7 +1554,7 @@ void WasmGenerator::Generate(Vector<const ValueType> types, DataRange* data) {
}
if (types.size() == 0) {
- Generate(kWasmStmt, data);
+ Generate(kWasmVoid, data);
return;
}
if (types.size() == 1) {
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 597789c7e19..76fde895efa 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -307,33 +307,30 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
-void OneTimeEnableStagedWasmFeatures() {
+void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate) {
struct EnableStagedWasmFeatures {
- EnableStagedWasmFeatures() {
+ explicit EnableStagedWasmFeatures(v8::Isolate* isolate) {
#define ENABLE_STAGED_FEATURES(feat, desc, val) \
FLAG_experimental_wasm_##feat = true;
FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
#undef ENABLE_STAGED_FEATURES
+ isolate->InstallConditionalFeatures(isolate->GetCurrentContext());
}
};
// The compiler will properly synchronize the constructor call.
- static EnableStagedWasmFeatures one_time_enable_staged_features;
+ static EnableStagedWasmFeatures one_time_enable_staged_features(isolate);
}
void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
bool require_valid) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// Strictly enforce the input size limit. Note that setting "max_len" on the
// fuzzer target is not enough, since different fuzzers are used and not all
// respect that limit.
if (data.size() > max_input_size()) return;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -342,6 +339,12 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ OneTimeEnableStagedWasmFeatures(isolate);
+
v8::TryCatch try_catch(isolate);
HandleScope scope(i_isolate);
@@ -356,6 +359,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
// compiled with Turbofan and which one with Liftoff.
uint8_t tier_mask = data.empty() ? 0 : data[0];
if (!data.empty()) data += 1;
+ uint8_t debug_mask = data.empty() ? 0 : data[0];
+ if (!data.empty()) data += 1;
if (!GenerateModule(i_isolate, &zone, data, &buffer, &num_args,
&interpreter_args, &compiler_args)) {
return;
@@ -374,6 +379,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
FlagScope<bool> liftoff(&FLAG_liftoff, true);
FlagScope<bool> no_tier_up(&FLAG_wasm_tier_up, false);
FlagScope<int> tier_mask_scope(&FLAG_wasm_tier_mask_for_testing, tier_mask);
+ FlagScope<int> debug_mask_scope(&FLAG_wasm_debug_mask_for_testing,
+ debug_mask);
compiled_module = i_isolate->wasm_engine()->SyncCompile(
i_isolate, enabled_features, &interpreter_thrower, wire_bytes);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index d74a26ffab1..04350e3d809 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -33,7 +33,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// no-ops. This avoids race conditions with threads reading the flags. Fuzzers
// are executed in their own process anyway, so this should not interfere with
// anything.
-void OneTimeEnableStagedWasmFeatures();
+void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate);
class WasmExecutionFuzzer {
public:
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index fe3cdfcbea1..48d91089022 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -21,18 +21,14 @@
namespace i = v8::internal;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// We reduce the maximum memory size and table size of WebAssembly instances
// to avoid OOMs in the fuzzer.
i::FLAG_wasm_max_mem_pages = 32;
i::FLAG_wasm_max_table_size = 100;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -43,6 +39,12 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures(isolate);
+
v8::TryCatch try_catch(isolate);
i::wasm::testing::SetupIsolateForWasmModule(i_isolate);
i::wasm::ModuleWireBytes wire_bytes(data, data + size);
diff --git a/deps/v8/test/fuzzer/wasm/regress-1191853.wasm b/deps/v8/test/fuzzer/wasm/regress-1191853.wasm
new file mode 100644
index 00000000000..8e8237eb2a3
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm/regress-1191853.wasm
Binary files differ
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index cf039da0bef..14c1704daa9 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -23,7 +23,6 @@ v8_source_set("inspector_test") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"../../src/inspector:inspector_test_headers",
]
}
diff --git a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
index 17008f35505..ac194fdd106 100644
--- a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
@@ -6,7 +6,7 @@ function testFunction() {
async function f1() {
for (let x = |_|0; x |_|< 1; ++|_|x) |_|await x;
|_|return |_|await Promise.|C|resolve(2);|R|
- |R|}
+ }
async function f2() {
let r = |_|await |C|f1() + |_|await |C|f1();
@@ -17,7 +17,7 @@ function testFunction() {
let p = |_|Promise.|C|resolve(42);
|_|await p;
|_|return r;|R|
- |R|}
+ }
return |C|f2();|R|
}
diff --git a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
index 24bda366a9f..4af1b05d661 100644
--- a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
@@ -21,7 +21,7 @@ function testFunction() {
|_|(async function asyncF() {
let r = |_|await Promise.|C|resolve(42);
|_|return r;|R|
- |R|})|C|();
+ })|C|();
|_|return promise;|R|
}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
new file mode 100644
index 00000000000..6968ed3eab7
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
@@ -0,0 +1,7 @@
+Checks if we keep alive breakpoint information for top-level functions when calling getPossibleBreakpoints.
+Result of get possible breakpoints in topLevel.js
+[{"scriptId":"3","lineNumber":0,"columnNumber":0},{"scriptId":"3","lineNumber":0,"columnNumber":8,"type":"call"},{"scriptId":"3","lineNumber":0,"columnNumber":43,"type":"return"}]
+Result of get possible breakpoints in moduleFunc.js
+[{"scriptId":"5","lineNumber":0,"columnNumber":22},{"scriptId":"5","lineNumber":0,"columnNumber":30,"type":"call"},{"scriptId":"5","lineNumber":0,"columnNumber":63,"type":"return"},{"scriptId":"5","lineNumber":0,"columnNumber":64,"type":"return"}]
+Result of get possible breakpoints in mixedFunctions.js
+[{"scriptId":"7","lineNumber":0,"columnNumber":15,"type":"return"},{"scriptId":"7","lineNumber":1,"columnNumber":2},{"scriptId":"7","lineNumber":1,"columnNumber":10,"type":"call"},{"scriptId":"7","lineNumber":2,"columnNumber":0,"type":"return"}]
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
new file mode 100644
index 00000000000..097d0b99af1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
@@ -0,0 +1,60 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we keep alive breakpoint information for top-level functions when calling getPossibleBreakpoints.');
+
+session.setupScriptMap();
+var executionContextId;
+
+const callGarbageCollector = `
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+`;
+
+const topLevelFunction = `console.log('This is a top level function')`;
+const moduleFunction =
+ `function testFunc() { console.log('This is a module function') }`;
+let mixedFunctions = ` function A() {}
+ console.log('This is a top level function');
+`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ executionContextId = messageObject.params.context.id;
+ await testGetPossibleBreakpoints(
+ executionContextId, topLevelFunction, 'topLevel.js');
+ await testGetPossibleBreakpoints(
+ executionContextId, moduleFunction, 'moduleFunc.js');
+ await testGetPossibleBreakpoints(
+ executionContextId, mixedFunctions, 'mixedFunctions.js');
+ InspectorTest.completeTest();
+}
+
+async function testGetPossibleBreakpoints(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+ const location = {start: {lineNumber: 0, columnNumber: 0, scriptId}};
+ await Protocol.Runtime.runScript({scriptId});
+ await Protocol.Runtime.evaluate({expression: `${callGarbageCollector}`});
+ const {result: {locations}} =
+ await Protocol.Debugger.getPossibleBreakpoints(location);
+ InspectorTest.log(`Result of get possible breakpoints in ${url}`);
+ InspectorTest.log(JSON.stringify(locations));
+}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index 27346bffea9..f7f9b7ca254 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -236,7 +236,7 @@ async function testPromiseAsyncWithCode() {
|R|}
|C|main();
|_|return testPromise;|R|
-|R|}
+}
function returnFunction() {
|_|return returnObject;|R|
@@ -249,7 +249,7 @@ async function testPromiseComplex() {
async function foo() {
|_|await Promise.|C|resolve();
|_|return 42;|R|
- |R|}
+ }
var x = |_|1;
var y = |_|2;
|C|returnFunction(|C|emptyFunction(), x++, --y, x => 2 |_|* x|R|, |C|returnCall())|C|().a = |_|await |C|foo((a => 2 |_|*a|R|)|C|(5));
@@ -257,7 +257,7 @@ async function testPromiseComplex() {
|R|}
|C|main();
|_|return testPromise;|R|
-|R|}
+}
function twiceDefined() {
|_|return a + b;|R|
diff --git a/deps/v8/test/inspector/debugger/regress-1190290-expected.txt b/deps/v8/test/inspector/debugger/regress-1190290-expected.txt
new file mode 100644
index 00000000000..5d3b31479f6
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regress-1190290-expected.txt
@@ -0,0 +1,10 @@
+Checks if we correctly handle exceptions thrown on setBreakpointByUrl if script is invalid.
+[
+]
+[
+ [0] : {
+ columnNumber : 22
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
diff --git a/deps/v8/test/inspector/debugger/regress-1190290.js b/deps/v8/test/inspector/debugger/regress-1190290.js
new file mode 100644
index 00000000000..eab16d3f1d1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regress-1190290.js
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we correctly handle exceptions thrown on setBreakpointByUrl if script is invalid.');
+
+session.setupScriptMap();
+var executionContextId;
+
+const invalidFunction = `console.lo g('This is a top level function')`;
+const moduleFunction =
+ `function testFunc() { console.log('This is a module function') }`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ executionContextId = messageObject.params.context.id;
+ await testSetBreakpoint(
+ executionContextId, invalidFunction, 'invalidFunc.js');
+ await testSetBreakpoint(executionContextId, moduleFunction, 'moduleFunc.js');
+ InspectorTest.completeTest();
+}
+
+async function testSetBreakpoint(executionContextId, func, url) {
+ await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber: 0, url});
+ InspectorTest.logMessage(locations);
+}
diff --git a/deps/v8/test/inspector/debugger/regression-1185540-expected.txt b/deps/v8/test/inspector/debugger/regression-1185540-expected.txt
new file mode 100644
index 00000000000..a495f05ec59
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regression-1185540-expected.txt
@@ -0,0 +1,2 @@
+Check that setting a breakpoint in an invalid function is not crashing.
+[]
diff --git a/deps/v8/test/inspector/debugger/regression-1185540.js b/deps/v8/test/inspector/debugger/regression-1185540.js
new file mode 100644
index 00000000000..ce1f2a85afa
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regression-1185540.js
@@ -0,0 +1,34 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Check that setting a breakpoint in an invalid function is not crashing.');
+
+const invalidFunc = `console.l og('hi');//# sourceURL=invalid.js`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ const executionContextId = messageObject.params.context.id;
+ await testSetBreakpoint(executionContextId, invalidFunc, 'invalid.js');
+}
+
+async function testSetBreakpoint(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber: 0, url});
+ InspectorTest.log(JSON.stringify(locations));
+ InspectorTest.completeTest();
+};
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
index 02bfe0d80cd..a85aab6fe0c 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
@@ -1,7 +1,13 @@
Tests that setting breakpoint before enabling debugger produces an error
-setBreakpointByUrl error: undefined
+setBreakpointByUrl error: {
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
+}
setBreakpoint error: {
- "code": -32602,
- "message": "Invalid parameters",
- "data": "Failed to deserialize params.location - BINDINGS: mandatory field missing at <some position>"
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
+}
+setBreakpointOnFunctionCall error: {
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
index 5af1085c874..4401466a921 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
@@ -10,12 +10,19 @@ function didSetBreakpointByUrlBeforeEnable(message)
{
InspectorTest.log("setBreakpointByUrl error: " + JSON.stringify(
InspectorTest.trimErrorMessage(message).error, null, 2));
- Protocol.Debugger.setBreakpoint().then(didSetBreakpointBeforeEnable);
+ Protocol.Debugger.setBreakpoint({location: { scriptId: "4", lineNumber: 0, columnNumber: 0 }}).then(didSetBreakpointBeforeEnable);
}
function didSetBreakpointBeforeEnable(message)
{
InspectorTest.log("setBreakpoint error: " + JSON.stringify(
InspectorTest.trimErrorMessage(message).error, null, 2));
+ Protocol.Debugger.setBreakpointOnFunctionCall({objectId: "4"}).then(didSetBreakpointOnFunctionCallBeforeEnable);
+}
+
+function didSetBreakpointOnFunctionCallBeforeEnable(message)
+{
+ InspectorTest.log("setBreakpointOnFunctionCall error: " + JSON.stringify(
+ InspectorTest.trimErrorMessage(message).error, null, 2));
InspectorTest.completeTest();
}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
index 5f7fa80a979..17cc8e4a2bb 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
@@ -2,7 +2,7 @@ Tests if breakpoint set is first breakable location
Set breakpoint outside of any function: (0, 0).
Setting breakpoint for id: 3 at 0, 0.
No breakable location inside a function was found
-Set breakpoint adds a breakpoint at (8, 1).
+Set breakpoint adds a breakpoint at (4, 2).
Set breakpoint at a breakable location: (4, 17).
Setting breakpoint for id: 3 at 4, 17.
Location match for (4, 17).
@@ -10,4 +10,4 @@ Initial location is expected to be breakable: true.
Set breakpoint at non-breakable location: (7, 0).
Setting breakpoint for id: 3 at 7, 0.
Location match for (7, 2).
-Initial location is expected to be breakable: false. \ No newline at end of file
+Initial location is expected to be breakable: false.
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
index 9e8bc7dcb0c..609cb02ed28 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
@@ -39,10 +39,6 @@ hitBreakpoints contains breakpoint: true
Set breakpoint at empty line by url in top level function..
Breakpoint resolved at:
-// last line#
-
-Breakpoint hit at:
-// last line#
-
-hitBreakpoints contains breakpoint: true
+function i2(){#}
+// last line
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt
new file mode 100644
index 00000000000..ea363069cbf
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt
@@ -0,0 +1,66 @@
+Checks if we can set a breakpoint on a one-line inline functions.
+Setting breakpoint on `class X`
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ |_|this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on constructor, should resolve to same location
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ |_|this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on computed properties in class
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [|_|bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on initializer function
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [bar] = 2;
+ baz = |_|foo();
+}
+new X();
+
+Paused on location:
+(anonymous) (testInitializer.js:8:3)
+Paused on location:
+<instance_members_initializer> (testInitializer.js:9:8)
+X (testInitializer.js:5:13)
+(anonymous) (testInitializer.js:11:0)
+Paused on location:
+X (testInitializer.js:6:4)
+(anonymous) (testInitializer.js:11:0)
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js
new file mode 100644
index 00000000000..6ed984962a9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js
@@ -0,0 +1,75 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we can set a breakpoint on a one-line inline functions.');
+
+session.setupScriptMap();
+
+const testClassInitializer = `
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+//# sourceURL=testInitializer.js`
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ const executionContextId = messageObject.params.context.id;
+ await runTest(executionContextId, testClassInitializer, 'testInitializer.js');
+ InspectorTest.completeTest();
+}
+
+async function runTest(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+
+ InspectorTest.log('Setting breakpoint on `class X`');
+ await setBreakpoint(4, 'testInitializer.js');
+
+ InspectorTest.log(
+ 'Setting breakpoint on constructor, should resolve to same location');
+ await setBreakpoint(5, 'testInitializer.js');
+
+ InspectorTest.log('Setting breakpoint on computed properties in class');
+ await setBreakpoint(8, 'testInitializer.js');
+
+ InspectorTest.log('Setting breakpoint on initializer function');
+ await setBreakpoint(9, 'testInitializer.js');
+
+ Protocol.Runtime.runScript({scriptId});
+ const numBreaks = 3;
+ for (var i = 0; i < numBreaks; ++i) {
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on location:');
+ session.logCallFrames(callFrames);
+ Protocol.Debugger.resume();
+ }
+
+ InspectorTest.completeTest();
+};
+
+async function setBreakpoint(lineNumber, url) {
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber, url});
+ await session.logBreakLocations(locations);
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt
new file mode 100644
index 00000000000..9c69c6d4570
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt
@@ -0,0 +1,11 @@
+Checks if we can set a breakpoint on a one-line inline functions.
+Setting breakpoint
+ function test() {
+ function func(a) {|_|console.log(a);}
+ func("hi");
+ }
+
+Paused on location:
+func (testFunction.js:1:22)
+test (testFunction.js:2:4)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js
new file mode 100644
index 00000000000..630e20150ee
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js
@@ -0,0 +1,31 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we can set a breakpoint on a one-line inline functions.');
+
+session.setupScriptMap();
+const testFunction = ` function test() {
+ function func(a) {console.log(a);}
+ func("hi");
+ }
+ //# sourceURL=testFunction.js`;
+
+contextGroup.addScript(testFunction);
+
+(async function testSetBreakpoint() {
+ await Protocol.Debugger.enable();
+ await Protocol.Runtime.enable();
+
+ InspectorTest.log('Setting breakpoint');
+ const {result: {locations}} = await Protocol.Debugger.setBreakpointByUrl(
+ {lineNumber: 1, url: 'testFunction.js'});
+ await session.logBreakLocations(locations);
+
+ Protocol.Runtime.evaluate({expression: 'test()'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on location:');
+ session.logCallFrames(callFrames);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint.js b/deps/v8/test/inspector/debugger/set-breakpoint.js
index 2c641c3062e..194e4d2c4bc 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/set-breakpoint.js
@@ -92,22 +92,10 @@ eval('function sourceUrlFunc() { a = 2; }\\n//# sourceURL=sourceUrlScript');`);
Protocol.Runtime.evaluate({
expression: `//# sourceURL=test-script\nfunction i1(){};\n\n\n\n\nfunction i2(){}\n// last line`
});
- const [{
- params:{location}
- }, {
- params:{
- callFrames:[topFrame],
- hitBreakpoints
- }
- }] = await Promise.all([
- Protocol.Debugger.onceBreakpointResolved(),
- Protocol.Debugger.oncePaused()]);
+ const [{params: {location}}] =
+ await Promise.all([Protocol.Debugger.onceBreakpointResolved()]);
InspectorTest.log('Breakpoint resolved at:');
await session.logSourceLocation(location);
- InspectorTest.log('Breakpoint hit at:');
- await session.logSourceLocation(topFrame.location);
- const hitBreakpoint = hitBreakpoints[0] === breakpointId;
- InspectorTest.log(`hitBreakpoints contains breakpoint: ${hitBreakpoint}\n`);
}
await Protocol.Debugger.disable();
InspectorTest.completeTest();
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
new file mode 100644
index 00000000000..037057b13ba
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
@@ -0,0 +1,31 @@
+Tests GC object inspection.
+
+Running test: test
+Instantiating.
+Waiting for wasm script (ignoring first non-wasm script).
+Setting breakpoint at offset 107 on script wasm://wasm/22e4830a
+Calling main()
+Paused:
+Script wasm://wasm/22e4830a byte offset 107: Wasm opcode 0x21 (kExprLocalSet)
+Scope:
+at main (0:107):
+ - scope (wasm-expression-stack):
+ 0: Array ((ref $ArrC))
+ object details:
+ 0: Struct ((ref null $StrA))
+ length: 1 (number)
+ - scope (local):
+ $varA: Struct ((ref null $StrA))
+ $varB: null ((ref null $ArrC))
+ object details:
+ $byte: 127 (i8)
+ $word: 32767 (i16)
+ $pointer: Struct ((ref $StrB))
+ - scope (module):
+ instance: exports: "main" (Function)
+ module: Module
+ functions: "$main": (Function)
+ globals: "$global0": function 0() { [native code] } ((ref null $type3))
+at (anonymous) (0:17):
+ -- skipped
+exports.main returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
new file mode 100644
index 00000000000..2c4c774411d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
@@ -0,0 +1,214 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests GC object inspection.');
+session.setupScriptMap();
+
+const module_bytes = [
+ 0x00, 0x61, 0x73, 0x6d, 1, 0, 0, 0, // wasm magic
+
+ 0x01, // type section
+ 0x16, // section length
+ 0x04, // number of types
+ // type 0: struct $StrA (field ($byte i8) ($word i16) ($pointer (ref $StrB)))
+ 0x5f, // struct
+ 0x03, // field count
+ 0x7a, 0x01, // mut i8
+ 0x79, 0x00, // i16
+ 0x6b, 0x01, 0x01, // mut ref $StrB
+ // type 1: struct $StrB (field ($next (ref null $StrA)))
+ 0x5f, // struct
+ 0x01, // field count
+ 0x6c, 0x00, 0x01, // mut ref null $StrA
+ // type 2: array $ArrC (mut (ref null $StrA))
+ 0x5e, // array
+ 0x6c, 0x00, 0x01, // mut ref null $StrA
+ // type 3: func
+ 0x60, // signature
+ 0x00, // number of params
+ 0x00, // number of results
+
+ 0x03, // function section
+ 0x02, // section length
+ 0x01, // number of functions
+ 0x03, // function 0: signature 3
+
+ // This is just so that function index 0 counts as declared.
+ 0x06, // global section
+ 0x07, // section length
+ 0x01, // number of globals
+ 0x6c, 0x03, // type of global: ref null $sig3
+ 0x00, // immutable
+ 0xd2, 0x00, 0x0b, // initializer: ref.func $func1; end
+
+ 0x07, // export section
+ 0x08, // section length
+ 0x01, // number of exports
+ 0x04, // length of "main"
+ 0x6d, 0x61, 0x69, 0x6e, // "main"
+ 0x00, // kind: function
+ 0x00, // index: 0
+
+ /////////////////////////// CODE SECTION //////////////////////////
+ 0x0a, // code section
+ 0x35, // section length
+ 0x01, // number of functions
+
+ 0x33, // function 0: size
+ 0x02, // number of locals
+ 0x01, 0x6c, 0x00, // (local $varA (ref null $StrA))
+ 0x01, 0x6c, 0x02, // (local $varC (ref null $ArrC))
+ // $varA := new $StrA(127, 32767, new $StrB(null))
+ 0x41, 0xFF, 0x00, // i32.const 127
+ 0x41, 0xFF, 0xFF, 0x01, // i32.const 32767
+ 0xfb, 0x30, 0x01, // rtt.canon $StrB
+ 0xfb, 0x02, 0x01, // struct.new_default_with_rtt $StrB
+ 0xfb, 0x30, 0x00, // rtt.canon $StrA
+ 0xfb, 0x01, 0x00, // struct.new_with_rtt $StrA
+ 0x22, 0x00, // local.tee $varA
+ // $varA.$pointer.$next = $varA
+ 0xfb, 0x03, 0x00, 0x02, // struct.get $StrA $pointer
+ 0x20, 0x00, // local.get $varA
+ 0xfb, 0x06, 0x01, 0x00, // struct.set $StrB $next
+ // $varC := new $ArrC($varA)
+ 0x20, 0x00, // local.get $varA -- value
+ 0x41, 0x01, // i32.const 1 -- length
+ 0xfb, 0x30, 0x02, // rtt.canon $ArrC
+ 0xfb, 0x11, 0x02, // array.new_with_rtt $ArrC
+ 0x21, 0x01, // local.set $varC
+ 0x0b, // end
+
+ /////////////////////////// NAME SECTION //////////////////////////
+ 0x00, // name section
+ 0x4d, // section length
+ 0x04, // length of "name"
+ 0x6e, 0x61, 0x6d, 0x65, // "name"
+
+ 0x02, // "local names" subsection
+ 0x0f, // length of subsection
+ 0x01, // number of entries
+ 0x00, // for function 0
+ 0x02, // number of entries for function 0
+ 0x00, // local index
+ 0x04, // length of "varA"
+ 0x76, 0x61, 0x72, 0x41, // "varA"
+ 0x01, // local index
+ 0x04, // length of "varB"
+ 0x76, 0x61, 0x72, 0x42, // "varB"
+
+ 0x04, // "type names" subsection
+ 0x13, // length of subsection
+ 0x03, // number of entries
+ 0x00, // type index
+ 0x04, // name length
+ 0x53, 0x74, 0x72, 0x41, // "StrA"
+ 0x01, // type index
+ 0x04, // name length
+ 0x53, 0x74, 0x72, 0x42, // "StrB"
+ 0x02, // type index
+ 0x04, // name length
+ 0x41, 0x72, 0x72, 0x43, // "ArrC"
+
+ 0x0a, // "field names" subsection
+ 0x20, // length of subsection
+ 0x02, // number of types
+ 0x00, // for type $StrA
+ 0x03, // number of entries for $StrA
+ 0x00, // field index 0
+ 0x04, // length of "byte"
+ 0x62, 0x79, 0x74, 0x65, // "byte"
+ 0x01, // field index 1
+ 0x04, // length of "word"
+ 0x77, 0x6f, 0x72, 0x64, // "word"
+ 0x02, // field index 2
+ 0x07, // length of "pointer"
+ 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, // "pointer"
+ 0x01, // for type $StrB
+ 0x01, // number of entries for $StrB
+ 0x00, // field index
+ 0x04, // length of "next"
+ 0x6e, 0x65, 0x78, 0x74, // "next"
+];
+
+const getResult = msg => msg.result || InspectorTest.logMessage(msg);
+
+function setBreakpoint(offset, scriptId, scriptUrl) {
+ InspectorTest.log(
+ 'Setting breakpoint at offset ' + offset + ' on script ' + scriptUrl);
+ return Protocol.Debugger
+ .setBreakpoint({
+ 'location':
+ {'scriptId': scriptId, 'lineNumber': 0, 'columnNumber': offset}
+ })
+ .then(getResult);
+}
+
+Protocol.Debugger.onPaused(async msg => {
+ let loc = msg.params.callFrames[0].location;
+ InspectorTest.log('Paused:');
+ await session.logSourceLocation(loc);
+ InspectorTest.log('Scope:');
+ for (var frame of msg.params.callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var lineNumber = frame.location.lineNumber;
+ var columnNumber = frame.location.columnNumber;
+ InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
+ if (!/^wasm/.test(frame.url)) {
+ InspectorTest.log(' -- skipped');
+ continue;
+ }
+ for (var scope of frame.scopeChain) {
+ InspectorTest.logObject(' - scope (' + scope.type + '):');
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var properties =
+ await Protocol.Runtime.getProperties({objectId});
+ await WasmInspectorTest.dumpScopeProperties(properties);
+ if (scope.type === 'wasm-expression-stack' || scope.type === 'local') {
+ for (var value of properties.result.result) {
+ var details = await Protocol.Runtime.getProperties(
+ {objectId: value.value.objectId});
+ var nested_value =
+ details.result.result.find(({name}) => name === 'value');
+ if (!nested_value.value.objectId) continue;
+ details = await Protocol.Runtime.getProperties(
+ {objectId: nested_value.value.objectId});
+ InspectorTest.log(' object details:');
+ await WasmInspectorTest.dumpScopeProperties(details);
+ }
+ }
+ }
+ }
+
+ Protocol.Debugger.resume();
+});
+
+InspectorTest.runAsyncTestSuite([
+ async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating.');
+ // Spawn asynchronously:
+ WasmInspectorTest.instantiate(module_bytes);
+ InspectorTest.log(
+ 'Waiting for wasm script (ignoring first non-wasm script).');
+ // Ignore javascript and full module wasm script, get scripts for functions.
+ const [, {params: wasm_script}] =
+ await Protocol.Debugger.onceScriptParsed(2);
+ let offset = 107; // "local.set $varC" at the end.
+ await setBreakpoint(offset, wasm_script.scriptId, wasm_script.url);
+ InspectorTest.log('Calling main()');
+ await WasmInspectorTest.evalWithUrl('instance.exports.main()', 'runWasm');
+ InspectorTest.log('exports.main returned!');
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
new file mode 100644
index 00000000000..a9e1c768136
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
@@ -0,0 +1,10 @@
+Tests GC within DebugBreak
+
+Running test: test
+Script wasm://wasm/38e28046 byte offset 51: Wasm opcode 0x20 (kExprLocalGet)
+GC triggered
+Debugger.resume
+Hello World (v8://test/instantiate:11:36)
+ at bar (v8://test/instantiate:11:36)
+exports.main returned!
+
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
new file mode 100644
index 00000000000..f6ad6b6bfa5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
@@ -0,0 +1,50 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-reftypes --expose-gc
+utils.load('test/inspector/wasm-inspector-test.js');
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests GC within DebugBreak');
+session.setupScriptMap();
+
+let builder = new WasmModuleBuilder();
+
+let f_index = builder.addImport('foo', 'bar', kSig_v_r);
+
+builder.addFunction('wasm_A', kSig_v_r)
+ .addBody([
+ kExprLocalGet, 0, // -
+ kExprCallFunction, f_index // -
+ ])
+ .exportAs('main');
+
+let module_bytes = builder.toArray();
+
+Protocol.Debugger.onPaused(async message => {
+ let frames = message.params.callFrames;
+ await session.logSourceLocation(frames[0].location);
+ await Protocol.Runtime.evaluate({expression: 'gc()'});
+ InspectorTest.log('GC triggered');
+ let action = 'resume';
+ InspectorTest.log('Debugger.' + action);
+ await Protocol.Debugger[action]();
+})
+
+contextGroup.addScript(`
+function test() {
+ debug(instance.exports.main);
+ instance.exports.main({val: "Hello World"});
+}
+//# sourceURL=test.js`);
+
+InspectorTest.runAsyncTestSuite([async function test() {
+ utils.setLogConsoleApiMessageCalls(true);
+ await Protocol.Debugger.enable();
+ await WasmInspectorTest.instantiate(
+ module_bytes, 'instance', '{foo: {bar: (x) => console.log(x.val)}}');
+ await Protocol.Runtime.evaluate(
+ {expression: 'test()', includeCommandLineAPI: true});
+ InspectorTest.log('exports.main returned!');
+}]);
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
index 931ce978c12..beb6ca9fa9a 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
@@ -22,8 +22,8 @@ var func_idx = builder.addFunction('helper', kSig_v_v)
builder.addFunction('main', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprCallFunction, func_idx,
kExprEnd,
kExprEnd
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
index b3e3c38c582..d300b8bb9a1 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
@@ -22,8 +22,15 @@ Protocol.Debugger.onPaused(async msg => {
var frame = msg.params.callFrames[0];
for (var scope of frame.scopeChain) {
if (scope.type == 'module') continue;
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
var scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
+ await Protocol.Runtime.getProperties({objectId});
let str = (await Promise.all(scope_properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value))))
.join(', ');
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
index fd79e43626f..cb61260730d 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
@@ -35,3 +35,37 @@ Paused at v8://test/instantiate with reason "instrumentation".
Paused at wasm://wasm/20da547a with reason "instrumentation".
Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
Done.
+
+Running test: testBreakInExportedFunction
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Instantiating wasm module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Calling exported function 'func' (should trigger a breakpoint).
+Paused at v8://test/call_func with reason "instrumentation".
+Paused at wasm://wasm/8c388106 with reason "instrumentation".
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
+Calling exported function 'func' a second time (should trigger no breakpoint).
+Paused at v8://test/call_func with reason "instrumentation".
+Done.
+
+Running test: testBreakOnlyWithSourceMap
+Setting instrumentation breakpoint for source maps only
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Instantiating wasm module without source map.
+Calling exported function 'func' (should trigger no breakpoint).
+Instantiating wasm module with source map.
+Calling exported function 'func' (should trigger a breakpoint).
+Paused at wasm://wasm/c8e3a856 with reason "instrumentation".
+Script wasm://wasm/c8e3a856 byte offset 33: Wasm opcode 0x01 (kExprNop)
+Done.
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
index feeff659993..9f1d897daa8 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
@@ -18,9 +18,6 @@ Protocol.Debugger.onPaused(async msg => {
Protocol.Debugger.resume();
});
-// TODO(clemensb): Add test for 'beforeScriptWithSourceMapExecution'.
-// TODO(clemensb): Add test for module without start function.
-
InspectorTest.runAsyncTestSuite([
async function testBreakInStartFunction() {
const builder = new WasmModuleBuilder();
@@ -64,5 +61,59 @@ InspectorTest.runAsyncTestSuite([
await WasmInspectorTest.instantiate(builder.toArray());
InspectorTest.log('Done.');
await Protocol.Debugger.disable();
- }
+ },
+
+ async function testBreakInExportedFunction() {
+ const builder = new WasmModuleBuilder();
+ const func =
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'}));
+ InspectorTest.log('Instantiating wasm module.');
+ await WasmInspectorTest.instantiate(builder.toArray());
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger a breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log(
+ 'Calling exported function \'func\' a second time ' +
+ '(should trigger no breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ },
+
+ async function testBreakOnlyWithSourceMap() {
+ const builder = new WasmModuleBuilder();
+ const func =
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+ const bytes_no_source_map = builder.toArray();
+ builder.addCustomSection('sourceMappingURL', [3, 97, 98, 99]);
+ const bytes_with_source_map = builder.toArray();
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log(
+ 'Setting instrumentation breakpoint for source maps only');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptWithSourceMapExecution'}));
+
+ InspectorTest.log('Instantiating wasm module without source map.');
+ await WasmInspectorTest.instantiate(bytes_no_source_map);
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger no breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+
+ InspectorTest.log('Instantiating wasm module with source map.');
+ await WasmInspectorTest.instantiate(bytes_with_source_map);
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger a breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ },
+
]);
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index a7c8d9eedb8..7778b57f417 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -15,6 +15,7 @@ Script wasm://wasm/e33badc2 byte offset 169: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at C (interpreted) (0:169):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -28,8 +29,7 @@ at C (interpreted) (0:169):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -46,6 +46,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -64,7 +65,7 @@ Script wasm://wasm/e33badc2 byte offset 171: Wasm opcode 0x24 (kExprGlobalSet)
Scope:
at C (interpreted) (0:171):
- scope (wasm-expression-stack):
- 0: 42 (i32)
+ stack: "0": 42 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -78,8 +79,7 @@ at C (interpreted) (0:171):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -96,6 +96,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -114,6 +115,7 @@ Script wasm://wasm/e33badc2 byte offset 173: Wasm opcode 0x41 (kExprI32Const)
Scope:
at C (interpreted) (0:173):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -127,8 +129,7 @@ at C (interpreted) (0:173):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -145,6 +146,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -163,7 +165,7 @@ Script wasm://wasm/e33badc2 byte offset 175: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at C (interpreted) (0:175):
- scope (wasm-expression-stack):
- 0: 47 (i32)
+ stack: "0": 47 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -177,8 +179,7 @@ at C (interpreted) (0:175):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -195,6 +196,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -213,6 +215,7 @@ Script wasm://wasm/e33badc2 byte offset 177: Wasm opcode 0x0b (kExprEnd)
Scope:
at C (interpreted) (0:177):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 47 (i32)
@@ -226,8 +229,7 @@ at C (interpreted) (0:177):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -244,6 +246,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -262,8 +265,7 @@ Script wasm://wasm/e33badc2 byte offset 160: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:160):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -280,6 +282,7 @@ at B (liftoff) (0:160):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -298,7 +301,7 @@ Script wasm://wasm/e33badc2 byte offset 161: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:161):
- scope (wasm-expression-stack):
- 0: 42 (i32)
+ stack: "0": 42 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -315,6 +318,7 @@ at B (liftoff) (0:161):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -333,6 +337,7 @@ Script wasm://wasm/e33badc2 byte offset 162: Wasm opcode 0x0b (kExprEnd)
Scope:
at B (liftoff) (0:162):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -349,6 +354,7 @@ at B (liftoff) (0:162):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -367,6 +373,7 @@ Script wasm://wasm/e33badc2 byte offset 130: Wasm opcode 0x0b (kExprEnd)
Scope:
at A (liftoff) (0:130):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 0849840abe4..5c0162e4cdf 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -22,7 +22,7 @@ function createModule(...customSections) {
var builder = new WasmModuleBuilder();
builder.addFunction('nopFunction', kSig_v_v).addBody([kExprNop]);
builder.addFunction('main', kSig_v_v)
- .addBody([kExprBlock, kWasmStmt, kExprI32Const, 2, kExprDrop, kExprEnd])
+ .addBody([kExprBlock, kWasmVoid, kExprI32Const, 2, kExprDrop, kExprEnd])
.exportAs('main');
for (var { name, value } of customSections) {
builder.addCustomSection(name, value);
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
index 8c183963187..ae59334b937 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
@@ -21,8 +21,8 @@ var func_idx = builder.addFunction('helper', kSig_v_v)
builder.addFunction('main', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprCallFunction, func_idx,
kExprEnd,
kExprEnd
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index 9ab6c323bf5..49836f93aba 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -10,6 +10,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -17,6 +18,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -39,6 +41,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -46,6 +49,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -59,6 +63,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -72,7 +77,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 3 (i32)
+ stack: "0": 3 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -86,6 +91,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -99,7 +105,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 3 (i32)
+ stack: "0": 3 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -113,8 +119,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 3 (i32)
- 1: 1 (i32)
+ stack: "0": 3 (i32), "1": 1 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -128,7 +133,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -142,6 +147,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -149,6 +155,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -162,6 +169,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -169,6 +177,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -182,6 +191,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -195,7 +205,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -209,6 +219,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -222,7 +233,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -236,8 +247,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 2 (i32)
- 1: 1 (i32)
+ stack: "0": 2 (i32), "1": 1 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -251,7 +261,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -265,6 +275,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -272,6 +283,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -285,6 +297,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -292,6 +305,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -305,6 +319,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -318,7 +333,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -332,6 +347,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -345,7 +361,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -359,8 +375,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 1 (i32)
- 1: 1 (i32)
+ stack: "0": 1 (i32), "1": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -374,7 +389,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 0 (i32)
+ stack: "0": 0 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -388,6 +403,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -395,6 +411,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -408,6 +425,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -415,6 +433,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -428,6 +447,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -441,7 +461,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 0 (i32)
+ stack: "0": 0 (i32)
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -455,6 +475,7 @@ Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b (kExprEnd)
Scope:
at wasm_B (0:61):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index 5db1e369794..1b28959f7ac 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -17,9 +17,9 @@ const func_a =
const func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-source.js b/deps/v8/test/inspector/debugger/wasm-source.js
index 3fd522f8d02..d13fb247cd6 100644
--- a/deps/v8/test/inspector/debugger/wasm-source.js
+++ b/deps/v8/test/inspector/debugger/wasm-source.js
@@ -20,7 +20,7 @@ var sig_index = builder.addType(kSig_v_v);
builder.addFunction('main', kSig_v_v)
.addBody([
- kExprBlock, kWasmStmt, kExprI32Const, 0, kExprCallIndirect, sig_index,
+ kExprBlock, kWasmVoid, kExprI32Const, 0, kExprCallIndirect, sig_index,
kTableZero, kExprEnd
])
.exportAs('main');
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-check.js b/deps/v8/test/inspector/debugger/wasm-stack-check.js
index 4189abd3e1f..cd2384acecb 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-check.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack-check.js
@@ -68,9 +68,16 @@ async function inspect(frame) {
// Inspect only the top wasm frame.
for (var scope of frame.scopeChain) {
if (scope.type == 'module') continue;
- var scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- let str = (await Promise.all(scope_properties.result.result.map(
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var properties =
+ await Protocol.Runtime.getProperties({objectId});
+ let str = (await Promise.all(properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value))))
.join(', ');
line.push(`${scope.type}: [${str}]`);
diff --git a/deps/v8/test/inspector/debugger/wasm-stack.js b/deps/v8/test/inspector/debugger/wasm-stack.js
index e5a19e6fe8a..961ddc84fd8 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack.js
@@ -19,7 +19,7 @@ var call_imported_idx = builder.addFunction('call_func', kSig_v_v)
// Open a block in order to make the positions more interesting...
builder.addFunction('main', kSig_v_v)
.addBody(
- [kExprBlock, kWasmStmt, kExprCallFunction, call_imported_idx, kExprEnd])
+ [kExprBlock, kWasmVoid, kExprCallFunction, call_imported_idx, kExprEnd])
.exportAs('main');
var module_bytes = builder.toArray();
diff --git a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
index 6371e80874d..aa4b92b59b3 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
+++ b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
@@ -12,7 +12,7 @@ session.setupScriptMap();
var builder = new WasmModuleBuilder();
var callee = builder.addFunction('callee', kSig_v_v)
- .addBody([kExprBlock, kWasmStmt, kExprEnd])
+ .addBody([kExprBlock, kWasmVoid, kExprEnd])
.index;
var main = builder.addFunction('main', kSig_v_i)
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
index 4e4135a306c..a89db04cdb0 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
@@ -54,8 +54,15 @@ async function printPauseLocationAndStep(msg) {
let scopes = {};
for (let scope of frame.scopeChain) {
if (scope.type == 'module') continue;
- let scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var scope_properties =
+ await Protocol.Runtime.getProperties({objectId});
scopes[scope.type] = await Promise.all(scope_properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value)));
}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
index e315a565e0d..32301dc7bdc 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
@@ -18,9 +18,9 @@ const func_a_idx = func_a.index;
const func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 6cece203aeb..7d806281312 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -17,9 +17,9 @@ var func_a_idx =
builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
@@ -91,8 +91,15 @@ async function waitForPauseAndStep(stepAction) {
if (scope.type === 'global' || scope.type === 'module') {
InspectorTest.logObject(' -- skipped');
} else {
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
let properties = await Protocol.Runtime.getProperties(
- {objectId: scope.object.objectId});
+ {objectId});
for (let {name, value} of properties.result.result) {
value = await WasmInspectorTest.getWasmValue(value);
InspectorTest.log(` ${name}: ${value}`);
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index 83ca29606d4..30deadaaefc 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
@@ -17,9 +17,9 @@ var func_a_idx =
var func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 6a7e5e23e43..bc785e919d5 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -322,8 +322,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[1].As<v8::String>());
RunSyncTask(backend_runner_, [&context_group_id, name](IsolateData* data) {
- data->CreateContext(context_group_id,
- v8_inspector::StringView(name.data(), name.size()));
+ CHECK(data->CreateContext(
+ context_group_id,
+ v8_inspector::StringView(name.data(), name.size())));
});
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index a98df5e0104..5e13f52dfd7 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -124,6 +124,7 @@
['arch == riscv64', {
# SIMD support is still in progress.
'debugger/wasm-scope-info*': [SKIP],
+ 'debugger/wasm-step-after-trap': [SKIP],
}], # 'arch == riscv64'
################################################################################
@@ -144,6 +145,9 @@
# This test is just slow on TSan, and TSan coverage is not needed to test
# that we do not run OOM. Thus skip it on TSan.
'debugger/wasm-step-a-lot': [SKIP],
+
+ # Another slow test that does not need to run on TSan.
+ 'debugger/wasm-inspect-many-registers': [SKIP],
}], # 'tsan == True'
##############################################################################
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 52eb76eabbd..35f51c1fcd3 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -72,11 +72,14 @@ IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
int IsolateData::CreateContextGroup() {
int context_group_id = ++last_context_group_id_;
- CreateContext(context_group_id, v8_inspector::StringView());
+ if (!CreateContext(context_group_id, v8_inspector::StringView())) {
+ DCHECK(isolate_->IsExecutionTerminating());
+ return -1;
+ }
return context_group_id;
}
-void IsolateData::CreateContext(int context_group_id,
+bool IsolateData::CreateContext(int context_group_id,
v8_inspector::StringView name) {
v8::HandleScope handle_scope(isolate_.get());
v8::Local<v8::ObjectTemplate> global_template =
@@ -87,12 +90,14 @@ void IsolateData::CreateContext(int context_group_id,
}
v8::Local<v8::Context> context =
v8::Context::New(isolate_.get(), nullptr, global_template);
+ if (context.IsEmpty()) return false;
context->SetAlignedPointerInEmbedderData(kIsolateDataIndex, this);
// Should be 2-byte aligned.
context->SetAlignedPointerInEmbedderData(
kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
contexts_[context_group_id].emplace_back(isolate_.get(), context);
if (inspector_) FireContextCreated(context, context_group_id, name);
+ return true;
}
v8::Local<v8::Context> IsolateData::GetDefaultContext(int context_group_id) {
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index e38c6c50824..74a65628a30 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -50,7 +50,8 @@ class IsolateData : public v8_inspector::V8InspectorClient {
// Setting things up.
int CreateContextGroup();
- void CreateContext(int context_group_id, v8_inspector::StringView name);
+ V8_NODISCARD bool CreateContext(int context_group_id,
+ v8_inspector::StringView name);
void ResetContextGroup(int context_group_id);
v8::Local<v8::Context> GetDefaultContext(int context_group_id);
int GetContextGroupId(v8::Local<v8::Context> context);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt
new file mode 100644
index 00000000000..8a0dac11323
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt
@@ -0,0 +1,19 @@
+Regression test for crbug.com/1183664
+
+Running test: testMultipleScriptsInSameLineWithSameURL
+Setting breakpoint in first script
+[
+ [0] : {
+ columnNumber : 1
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
+Setting breakpoint in second script
+[
+ [0] : {
+ columnNumber : 65
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1183664.js b/deps/v8/test/inspector/regress/regress-crbug-1183664.js
new file mode 100644
index 00000000000..558ff2b2aa5
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1183664.js
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Regression test for crbug.com/1183664');
+
+const url = 'test://foo.js';
+const lineNumber = 0;
+
+const columnNumber1 = 1;
+contextGroup.addScript(`console.log("FIRST")`, lineNumber, columnNumber1, url);
+const columnNumber2 = 65;
+contextGroup.addScript(`console.log("SECOND")`, lineNumber, columnNumber2, url);
+
+InspectorTest.runAsyncTestSuite([
+ async function testMultipleScriptsInSameLineWithSameURL() {
+ await Protocol.Debugger.enable();
+ InspectorTest.logMessage('Setting breakpoint in first script')
+ {
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ url,
+ lineNumber,
+ columnNumber: columnNumber1,
+ });
+ InspectorTest.logMessage(locations);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
+ InspectorTest.logMessage('Setting breakpoint in second script')
+ {
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ url,
+ lineNumber,
+ columnNumber: columnNumber2,
+ });
+ InspectorTest.logMessage(locations);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
+ }
+]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt
new file mode 100644
index 00000000000..9f712a74109
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt
@@ -0,0 +1,9 @@
+Regression test for crbug/1199919
+
+Running test: testDefaultParameter
+defaultParameter (v8://test.js:2:2)
+(anonymous) (:0:0)
+
+Running test: testDestructuringParameter
+destructuringParameter (v8://test.js:6:2)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1199919.js b/deps/v8/test/inspector/regress/regress-crbug-1199919.js
new file mode 100644
index 00000000000..dcc7dc2655f
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1199919.js
@@ -0,0 +1,44 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Regression test for crbug/1199919');
+
+const source = `
+async function defaultParameter(x = 1) {
+ return x;
+}
+
+async function destructuringParameter({x}) {
+ return x;
+}
+`;
+const url = 'v8://test.js';
+
+contextGroup.addScript(source, 0, 0, url);
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testDefaultParameter() {
+ await Promise.all([Protocol.Runtime.enable(), Protocol.Debugger.enable()]);
+ const {result: {breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({lineNumber: 2, url});
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'defaultParameter()'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Promise.all([Protocol.Debugger.resume(), evalPromise]);
+ await Promise.all([Protocol.Runtime.disable(), Protocol.Debugger.disable()]);
+ },
+
+ async function testDestructuringParameter() {
+ await Promise.all([Protocol.Runtime.enable(), Protocol.Debugger.enable()]);
+ const {result: {breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({lineNumber: 6, url});
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'destructuringParameter({x: 5})'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Promise.all([Protocol.Debugger.resume(), evalPromise]);
+ await Promise.all([Protocol.Runtime.disable(), Protocol.Debugger.disable()]);
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 33521c82811..ca23ac8b10c 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -125,3 +125,6 @@ Internal properties
[[ArrayBufferData]] string 0x...
[[Int8Array]] object undefined
[[Uint8Array]] object undefined
+
+Running test: testObjectWithProtoProperty
+ __proto__ own object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index bc3ea8799fc..1a8aa9e99a9 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -104,6 +104,10 @@ InspectorTest.runAsyncTestSuite([
this.Uint8Array = this.uint8array_old;
delete this.uint8array_old;
})()`);
+ },
+
+ async function testObjectWithProtoProperty() {
+ await logExpressionProperties('Object.defineProperty({}, "__proto__", {enumerable: true, value: {b:"aaa"}})');
}
]);
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index 65fbeb4d6ba..ebd0b6378c7 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -53,7 +53,7 @@ TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
CHECK(Start());
}
-TaskRunner::~TaskRunner() { Join(); }
+TaskRunner::~TaskRunner() {}
void TaskRunner::Run() {
data_.reset(new IsolateData(this, std::move(setup_global_tasks_),
diff --git a/deps/v8/test/inspector/wasm-inspector-test.js b/deps/v8/test/inspector/wasm-inspector-test.js
index 47d8419055a..9fe13e9d7d1 100644
--- a/deps/v8/test/inspector/wasm-inspector-test.js
+++ b/deps/v8/test/inspector/wasm-inspector-test.js
@@ -36,9 +36,10 @@ WasmInspectorTest.compile = async function(bytes, module_name = 'module') {
};
WasmInspectorTest.instantiate =
- async function(bytes, instance_name = 'instance') {
+ async function(bytes, instance_name = 'instance', imports) {
const instantiate_code = `var ${instance_name} = (${
- WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)});`;
+ WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)},
+ ${imports});`;
await WasmInspectorTest.evalWithUrl(instantiate_code, 'instantiate');
};
@@ -51,12 +52,13 @@ WasmInspectorTest.dumpScopeProperties = async function(message) {
};
WasmInspectorTest.getWasmValue = async function(value) {
- let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ let msg = await Protocol.Runtime.getProperties({ objectId: value.objectId });
printIfFailure(msg);
const value_type = msg.result.result.find(({name}) => name === 'type');
const value_value = msg.result.result.find(({name}) => name === 'value');
return `${
value_value.value.unserializableValue ??
+ value_value.value.description ??
value_value.value.value} (${value_type.value.value})`;
};
diff --git a/deps/v8/test/intl/displaynames/getoptionsobject.js b/deps/v8/test/intl/displaynames/getoptionsobject.js
new file mode 100644
index 00000000000..b540ddda14c
--- /dev/null
+++ b/deps/v8/test/intl/displaynames/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.DisplayNames call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.DisplayNames("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index eb162bc697a..ee54c924612 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -60,11 +60,6 @@
'regress-7770': [SKIP],
}], # 'system == android'
-['msan == True', {
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'regress-364374': [SKIP],
-}], # msan == True
-
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
diff --git a/deps/v8/test/intl/list-format/getoptionsobject.js b/deps/v8/test/intl/list-format/getoptionsobject.js
new file mode 100644
index 00000000000..376d1dab5b6
--- /dev/null
+++ b/deps/v8/test/intl/list-format/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.ListFormat call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.ListFormat("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/intl/regress-11595.js b/deps/v8/test/intl/regress-11595.js
new file mode 100644
index 00000000000..cd7d8696540
--- /dev/null
+++ b/deps/v8/test/intl/regress-11595.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_best_fit_matcher
+
+const intl_objects = [
+ Intl.Collator,
+ Intl.DateTimeFormat,
+ Intl.DisplayNames,
+ Intl.ListFormat,
+ Intl.NumberFormat,
+ Intl.PluralRules,
+ Intl.RelativeTimeFormat,
+ Intl.Segmenter,
+];
+
+// Just ensure the f.supportedLocalesOf won't cause crash.
+intl_objects.forEach(f => {
+ let supported = f.supportedLocalesOf(["en"]);
+ assertEquals(1, supported.length);
+ assertEquals("en", supported[0]);
+});
diff --git a/deps/v8/test/intl/segmenter/getoptionsobject.js b/deps/v8/test/intl/segmenter/getoptionsobject.js
new file mode 100644
index 00000000000..134f0fdc8e2
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.Segmenter call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.Segmenter("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/js-perf-test/OWNERS b/deps/v8/test/js-perf-test/OWNERS
index e46cedb98bf..030f331cd44 100644
--- a/deps/v8/test/js-perf-test/OWNERS
+++ b/deps/v8/test/js-perf-test/OWNERS
@@ -1 +1 @@
-per-file JSTests.json=petermarshall@chromium.org
+per-file JSTests.json=marja@chromium.org
diff --git a/deps/v8/test/message/fail/await-non-async.out b/deps/v8/test/message/fail/await-non-async.out
index 3198e8d7b1e..36cd90784d8 100644
--- a/deps/v8/test/message/fail/await-non-async.out
+++ b/deps/v8/test/message/fail/await-non-async.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: SyntaxError: await is only valid in async function
+*%(basename)s:5: SyntaxError: await is only valid in async functions and the top level bodies of modules
function f() { await Promise.resolve(); }
^^^^^
-SyntaxError: await is only valid in async function
+SyntaxError: await is only valid in async functions and the top level bodies of modules
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.js b/deps/v8/test/message/fail/wasm-exception-rethrow.js
index 0f6073e17ab..c8425f7bda7 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.js
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.js
@@ -10,7 +10,7 @@ let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprI32Const, 23,
kExprThrow, except,
kExprCatch, except,
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
index f8b252b543a..f23cfa3a515 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
@@ -2,4 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry();
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
index 0844bc02b96..7775052c91d 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry: cleanup must be callable
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
let fg = new FinalizationRegistry();
^
TypeError: FinalizationRegistry: cleanup must be callable
at new FinalizationRegistry (<anonymous>)
- at *%(basename)s:*:10
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
index 2c540632871..599bfc6d05f 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
@@ -2,4 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry({});
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
index 69125af9842..278c3506bfe 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry: cleanup must be callable
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
let fg = new FinalizationRegistry({});
^
TypeError: FinalizationRegistry: cleanup must be callable
at new FinalizationRegistry (<anonymous>)
- at *%(basename)s:*:10
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-register1.js b/deps/v8/test/message/fail/weak-refs-register1.js
index 07d9c6c5b6f..7110a25e6c6 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.js
+++ b/deps/v8/test/message/fail/weak-refs-register1.js
@@ -2,5 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
fg.register(1);
diff --git a/deps/v8/test/message/fail/weak-refs-register1.out b/deps/v8/test/message/fail/weak-refs-register1.out
index aa4cbc2fa22..6a9b23ecf8b 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.out
+++ b/deps/v8/test/message/fail/weak-refs-register1.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry.prototype.register: target must be an object
+*%(basename)s:8: TypeError: FinalizationRegistry.prototype.register: target must be an object
fg.register(1);
^
TypeError: FinalizationRegistry.prototype.register: target must be an object
at FinalizationRegistry.register (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-register2.js b/deps/v8/test/message/fail/weak-refs-register2.js
index b57da62095a..31df8745852 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.js
+++ b/deps/v8/test/message/fail/weak-refs-register2.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
let o = {};
fg.register(o, o);
diff --git a/deps/v8/test/message/fail/weak-refs-register2.out b/deps/v8/test/message/fail/weak-refs-register2.out
index 04b1ff559f1..0f2c2f1ee2b 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.out
+++ b/deps/v8/test/message/fail/weak-refs-register2.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
+*%(basename)s:9: TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
fg.register(o, o);
^
TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
at FinalizationRegistry.register (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.js b/deps/v8/test/message/fail/weak-refs-unregister.js
index 7befe4a2f7c..0f41263cbaf 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.js
+++ b/deps/v8/test/message/fail/weak-refs-unregister.js
@@ -2,5 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
fg.unregister(1);
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.out b/deps/v8/test/message/fail/weak-refs-unregister.out
index 52945869838..766d04349fc 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.out
+++ b/deps/v8/test/message/fail/weak-refs-unregister.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: unregisterToken ('1') must be an object
+*%(basename)s:8: TypeError: unregisterToken ('1') must be an object
fg.unregister(1);
^
TypeError: unregisterToken ('1') must be an object
at FinalizationRegistry.unregister (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index d5d57e0b29c..03c7d6618c4 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -70,10 +70,13 @@
}],
################################################################################
-['arch == ppc64 or arch == mips64el or arch == mipsel', {
+['arch == mips64el or arch == mipsel', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
-}], # arch == ppc64 or arch == mips64el or arch == mipsel
-
+}], # arch == mips64el or arch == mipsel
+['arch == riscv64', {
+ # Tests that require Simd enabled.
+ 'wasm-trace-memory': [SKIP],
+}],
]
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.js b/deps/v8/test/message/weakref-finalizationregistry-error.js
index 42f5eb3bc0e..e4c47fed0dc 100644
--- a/deps/v8/test/message/weakref-finalizationregistry-error.js
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
// Flags: --no-stress-opt
// Since cleanup tasks are top-level tasks, errors thrown from them don't stop
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 303514947e4..e315f0f1052 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -218,7 +218,11 @@ short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
short_a.length = 10;
short_test(short_a, 0);
-assertUnoptimized(short_test);
+// TODO(v8:11457) Currently, we cannot inline stores if there is a dictionary
+// mode prototype on the prototype chain. Therefore, if
+// v8_dict_property_const_tracking is enabled, the optimized code only contains
+// a call to the IC handler and doesn't get deopted.
+assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(short_test));
// A test for when we would modify a phi index.
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index 2f4d4e6c06d..87eebb56337 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -509,6 +509,10 @@ assertThrows(() => {
Array.prototype.sort.call(undefined);
}, TypeError);
+assertThrows(() => {
+ Array.prototype.sort.call(null);
+}, TypeError);
+
// This test ensures that RemoveArrayHoles does not shadow indices in the
// prototype chain. There are multiple code paths, we force both and check that
// they have the same behavior.
@@ -748,3 +752,15 @@ function TestSortCmpPackedSetLengthToZero() {
xs.sort(create_cmpfn(() => xs.length = 0));
assertTrue(HasPackedSmi(xs));
}
+TestSortCmpPackedSetLengthToZero();
+
+(function TestSortingNonObjectConvertsToObject() {
+ const v1 = Array.prototype.sort.call(true);
+ assertEquals('object', typeof v1);
+
+ const v2 = Array.prototype.sort.call(false);
+ assertEquals('object', typeof v2);
+
+ const v3 = Array.prototype.sort.call(42);
+ assertEquals('object', typeof v3);
+})();
diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js
index d717c6dfa60..4b5f4e0da44 100644
--- a/deps/v8/test/mjsunit/array-store-and-grow.js
+++ b/deps/v8/test/mjsunit/array-store-and-grow.js
@@ -205,7 +205,11 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
grow_store(a,10,1);
assertOptimized(grow_store);
grow_store(a,2048,1);
- assertUnoptimized(grow_store);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(grow_store));
%ClearFunctionFeedback(grow_store);
})();
@@ -254,6 +258,10 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
assertOptimized(f);
// An attempt to grow should cause deopt
f(new Array("hi"), 1, 3);
- assertUnoptimized(f);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(f));
%ClearFunctionFeedback(f);
})();
diff --git a/deps/v8/test/mjsunit/baseline/cross-realm.js b/deps/v8/test/mjsunit/baseline/cross-realm.js
index 1d0fb6b0a2f..8c3cc0af06f 100644
--- a/deps/v8/test/mjsunit/baseline/cross-realm.js
+++ b/deps/v8/test/mjsunit/baseline/cross-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --sparkplug
+// Flags: --allow-natives-syntax --sparkplug --no-always-sparkplug
// Tier-up across Realms
@@ -17,16 +17,20 @@
var realm1 = Realm.createAllowCrossRealmAccess();
var realm2 = Realm.createAllowCrossRealmAccess();
+ // f1 and f2 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
let f1 = Realm.eval(realm1, "(" + factory1.toString() + ")")();
let f2 = Realm.eval(realm2, "(" + factory1.toString() + ")")();
%NeverOptimizeFunction(f1);
%NeverOptimizeFunction(f2);
+ // Compile f1 to baseline, f2 stays uncompiled
%CompileBaseline(f1);
assertEquals(0, f1(0));
assertTrue(isBaseline(f1));
assertFalse(isBaseline(f2));
+ // f2 tiers up to baseline via lazy compile
assertEquals(0, f2(0));
assertTrue(isBaseline(f1));
assertTrue(isBaseline(f2));
@@ -44,14 +48,18 @@
var realm1 = Realm.createAllowCrossRealmAccess();
var realm2 = Realm.createAllowCrossRealmAccess();
+ // f1, f2 and f3 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
let f1 = Realm.eval(realm1, "(" + factory2.toString() + ")")();
let realmFactory = Realm.eval(realm2, "(" + factory2.toString() + ")");
+ // f2 and f3 are in the same realm, so share a feedback vector cell.
let f2 = realmFactory();
let f3 = realmFactory();
%NeverOptimizeFunction(f1);
%NeverOptimizeFunction(f2);
%NeverOptimizeFunction(f3);
+ // Compile f1 to baseline, f2 to interpreter, f3 stays uncompiled.
assertEquals(0, f2(0));
%CompileBaseline(f1);
assertEquals(0, f1(0));
@@ -59,10 +67,55 @@
assertFalse(isBaseline(f2));
assertFalse(isBaseline(f3));
+ // Compile f3, tiers up to baseline via lazy compile and installs the feedback
+ // vector
assertEquals(0, f3(0));
assertTrue(isBaseline(f3));
assertFalse(isBaseline(f2));
+ // Run f2, tiers up to baseline via interpreter entry.
assertEquals(0, f2(0));
assertTrue(isBaseline(f2));
})();
+
+// Ensure a feedback vector is created when sharing baseline code and a closure
+// feedback cell array already exists.
+(function() {
+ function factory3() {
+ return function(a) {
+ return a;
+ }
+ }
+
+ var realm1 = Realm.createAllowCrossRealmAccess();
+ var realm2 = Realm.createAllowCrossRealmAccess();
+
+ // f1, f2 and f3 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
+ let f1 = Realm.eval(realm1, "(" + factory3.toString() + ")")();
+ let realmFactory = Realm.eval(realm2, "(" + factory3.toString() + ")");
+ // f2 and f3 are in the same realm, so share a feedback vector cell.
+ let f2 = realmFactory();
+ let f3 = realmFactory();
+ %NeverOptimizeFunction(f1);
+ %NeverOptimizeFunction(f2);
+ %NeverOptimizeFunction(f3);
+
+ // Compile f1 to baseline, f2 to interpreter, f3 stays uncompiled.
+ assertEquals(0, f2(0));
+ %CompileBaseline(f1);
+ assertEquals(0, f1(0));
+ assertTrue(isBaseline(f1));
+ assertFalse(isBaseline(f2));
+ assertFalse(isBaseline(f3));
+
+ // Run f2, tiers up to baseline via interpreter entry and installs the
+ // feedback vector
+ assertEquals(0, f2(0));
+ assertTrue(isBaseline(f2));
+ assertFalse(isBaseline(f3));
+
+ // Compile f3, tiers up to baseline via lazy compile.
+ assertEquals(0, f3(0));
+ assertTrue(isBaseline(f3));
+})();
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
index 409465c2104..a8b836dcbe1 100644
--- a/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
+++ b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --super-ic --sparkplug
+// Flags: --allow-natives-syntax --super-ic --sparkplug --no-always-sparkplug
export let exported = 17;
import imported from 'test-baseline-module-helper.mjs';
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline.js b/deps/v8/test/mjsunit/baseline/test-baseline.js
index b35a7ffbfff..7e427c0d548 100644
--- a/deps/v8/test/mjsunit/baseline/test-baseline.js
+++ b/deps/v8/test/mjsunit/baseline/test-baseline.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --super-ic --sparkplug
+// Flags: --allow-natives-syntax --super-ic --sparkplug --no-always-sparkplug
function run(f, ...args) {
try { f(...args); } catch (e) {}
@@ -81,11 +81,14 @@ assertEquals(run(()=>{ var x = 0; for(var i = 0; i < 10; ++i) x+=1; return x;}),
function testTypeOf(o, t) {
let types = ['number', 'string', 'symbol', 'boolean', 'bigint', 'undefined',
'function', 'object'];
- assertEquals(t, eval('run(()=>typeof ' + o + ')'));
- assertTrue(eval('run(()=>typeof ' + o + ' == "' + t + '")'));
+ assertEquals(t, eval('run(()=>typeof ' + o + ')'),
+ `(()=>typeof ${o})() == ${t}`);
+ assertTrue(eval('run(()=>typeof ' + o + ' == "' + t + '")'),
+ `typeof ${o} == ${t}`);
var other_types = types.filter((x) => x !== t);
for (var other of other_types) {
- assertFalse(eval('run(()=>typeof ' + o + ' == "' + other + '")'));
+ assertFalse(eval('run(()=>typeof ' + o + ' == "' + other + '")'),
+ `typeof ${o} != ${other}`);
}
}
@@ -100,15 +103,15 @@ testTypeOf('"42"', 'string');
testTypeOf('Symbol(42)', 'symbol');
testTypeOf('{}', 'object');
testTypeOf('[]', 'object');
-//testTypeOf('new Proxy({}, {})', 'object');
-//testTypeOf('new Proxy([], {})', 'object');
+testTypeOf('new Proxy({}, {})', 'object');
+testTypeOf('new Proxy([], {})', 'object');
testTypeOf('(_ => 42)', 'function');
testTypeOf('function() {}', 'function');
testTypeOf('function*() {}', 'function');
testTypeOf('async function() {}', 'function');
testTypeOf('async function*() {}', 'function');
-//testTypeOf('new Proxy(_ => 42, {})', 'function');
-//testTypeOf('class {}', 'function');
+testTypeOf('new Proxy(_ => 42, {})', 'function');
+testTypeOf('class {}', 'function');
testTypeOf('Object', 'function');
// Binop
@@ -265,6 +268,19 @@ for (let val of gen) {
}
assertEquals(4, i);
+// Generator with a lot of locals
+let gen_func_with_a_lot_of_locals = eval(`(function*() {
+ ${ Array(32*1024).fill().map((x,i)=>`let local_${i};`).join("\n") }
+ yield 1;
+ yield 2;
+ yield 3;
+})`);
+i = 1;
+for (let val of run(gen_func_with_a_lot_of_locals)) {
+ assertEquals(i++, val);
+}
+assertEquals(4, i);
+
// Async await
run(async function() {
await 1;
diff --git a/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js b/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js
new file mode 100644
index 00000000000..7376216ae10
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-sparkplug --allow-natives-syntax
+
+// This test mainly exists to make ClusterFuzz aware of
+// d8.test.verifySourcePositions.
+
+globalValue = false;
+
+function foo(param1, ...param2) {
+ try {
+ for (let key in param1) { param2.push(key); }
+ for (let a of param1) { param2.push(a); }
+ let [a, b] = param2;
+ let copy = [{literal:1}, {}, [], [1], 1, ...param2];
+ return a + b + copy.length;
+ } catch (e) {
+ return e.toString().match(/[a-zA-Z]+/g);
+ } finally {
+ globalValue = new String(23);
+ }
+ return Math.min(Math.random(), 0.5);
+}
+
+var obj = [...Array(10).keys()];
+obj.foo = 'bar';
+foo(obj, obj);
+
+d8.test.verifySourcePositions(foo);
+
+// Make sure invalid calls throw.
+assertThrows(() => {d8.test.verifySourcePositions(0)});
+assertThrows(() => {d8.test.verifySourcePositions(obj)});
+assertThrows(() => {d8.test.verifySourcePositions(new Proxy(foo, {}))});
+assertThrows(() => {d8.test.verifySourcePositions(%GetUndetectable())});
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 50318b5639a..ac0e9ef59e5 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -63,6 +63,13 @@ assertUnoptimized(add_field, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
-assertUnoptimized(add_field, "sync");
+if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) Currently, we cannot inline property stores if there is a
+ // dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get invalidated when the
+ // transition map changes.
+ assertUnoptimized(add_field, "sync");
+}
// Clear type info for stress runs.
%ClearFunctionFeedback(add_field);
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls.js b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
new file mode 100644
index 00000000000..c61a7a62a0d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
@@ -0,0 +1,148 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file excercises basic fast API calls and enables fuzzing of this
+// functionality.
+
+// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// --always-opt is disabled because we rely on particular feedback for
+// optimizing to the fastest path.
+// Flags: --no-always-opt
+
+assertThrows(() => d8.test.fast_c_api());
+const fast_c_api = new d8.test.fast_c_api();
+
+// ----------- add_all -----------
+// `add_all` has the following signature:
+// double add_all(bool /*should_fallback*/, int32_t, uint32_t,
+// int64_t, uint64_t, float, double)
+
+const max_safe_float = 2**24 - 1;
+const add_all_result = -42 + 45 + Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
+ max_safe_float * 0.5 + Math.PI;
+
+function add_all(should_fallback = false) {
+ return fast_c_api.add_all(should_fallback,
+ -42, 45, Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
+ max_safe_float * 0.5, Math.PI);
+}
+
+%PrepareFunctionForOptimization(add_all);
+assertEquals(add_all_result, add_all());
+%OptimizeFunctionOnNextCall(add_all);
+
+if (fast_c_api.supports_fp_params) {
+ // Test that regular call hits the fast path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(0, fast_c_api.slow_call_count());
+
+ // Test fallback to slow path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all(true));
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+
+ // Test that no fallback hits the fast path again.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(0, fast_c_api.slow_call_count());
+} else {
+ // Test that calling with unsupported types hits the slow path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(0, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+}
+
+// ----------- Test add_all signature mismatche -----------
+function add_all_mismatch() {
+ return fast_c_api.add_all(false /*should_fallback*/,
+ 45, -42, Number.MAX_SAFE_INTEGER, max_safe_float * 0.5,
+ Number.MIN_SAFE_INTEGER, Math.PI);
+}
+
+%PrepareFunctionForOptimization(add_all_mismatch);
+const add_all_mismatch_result = add_all_mismatch();
+%OptimizeFunctionOnNextCall(add_all_mismatch);
+
+fast_c_api.reset_counts();
+assertEquals(add_all_mismatch_result, add_all_mismatch());
+assertEquals(1, fast_c_api.slow_call_count());
+assertEquals(0, fast_c_api.fast_call_count());
+// If the function was ever optimized to the fast path, it should
+// have been deoptimized due to the argument types mismatch. If it
+// wasn't optimized due to lack of support for FP params, it will
+// stay optimized.
+if (fast_c_api.supports_fp_params) {
+ assertUnoptimized(add_all_mismatch);
+}
+
+// ----------- add_32bit_int -----------
+// `add_32bit_int` has the following signature:
+// int add_32bit_int(bool /*should_fallback*/, int32_t, uint32_t)
+
+const add_32bit_int_result = -42 + 45;
+
+function add_32bit_int(should_fallback = false) {
+ return fast_c_api.add_32bit_int(should_fallback, -42, 45);
+}
+
+%PrepareFunctionForOptimization(add_32bit_int);
+assertEquals(add_32bit_int_result, add_32bit_int());
+%OptimizeFunctionOnNextCall(add_32bit_int);
+
+// Test that regular call hits the fast path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int());
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(0, fast_c_api.slow_call_count());
+
+// Test fallback to slow path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int(true));
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(1, fast_c_api.slow_call_count());
+
+// Test that no fallback hits the fast path again.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int());
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(0, fast_c_api.slow_call_count());
+
+// ----------- Test various signature mismatches -----------
+function add_32bit_int_mismatch(arg0, arg1, arg2, arg3) {
+ return fast_c_api.add_32bit_int(arg0, arg1, arg2, arg3);
+}
+
+%PrepareFunctionForOptimization(add_32bit_int_mismatch);
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
+%OptimizeFunctionOnNextCall(add_32bit_int_mismatch);
+
+// Test that passing extra argument stays on the fast path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45, -42));
+assertEquals(1, fast_c_api.fast_call_count());
+
+// Test that passing wrong argument types stays on the fast path.
+fast_c_api.reset_counts();
+assertEquals(Math.round(-42 + 3.14), add_32bit_int_mismatch(false, -42, 3.14));
+assertEquals(1, fast_c_api.fast_call_count());
+
+// Test that passing too few argument falls down the slow path,
+// because it's an argument type mismatch (undefined vs. int).
+fast_c_api.reset_counts();
+assertEquals(-42, add_32bit_int_mismatch(false, -42));
+assertEquals(1, fast_c_api.slow_call_count());
+assertEquals(0, fast_c_api.fast_call_count());
+assertUnoptimized(add_32bit_int_mismatch);
+
+// Test that the function can be optimized again.
+%PrepareFunctionForOptimization(add_32bit_int_mismatch);
+%OptimizeFunctionOnNextCall(add_32bit_int_mismatch);
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
+assertEquals(1, fast_c_api.fast_call_count());
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
index 4d1425272a9..fb227dd93dc 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -125,8 +125,13 @@
let v1 = b.value;
maybe_sideeffect(b);
let v2 = b.value;
- %TurbofanStaticAssert(Object.is(v1, v2));
- %TurbofanStaticAssert(Object.is(v2, k));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // b has a dictionary mode prototype and the load elimination doesn't
+ // work, yet.
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
}
%EnsureFeedbackVectorForFunction(B);
@@ -151,8 +156,13 @@
let v1 = b.value;
maybe_sideeffect(b);
let v2 = b.value;
- %TurbofanStaticAssert(Object.is(v1, v2));
- %TurbofanStaticAssert(Object.is(v2, kk));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // b has a dictionary mode prototype and the load elimination doesn't
+ // work, yet.
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, kk));
+ }
}
%EnsureFeedbackVectorForFunction(B);
diff --git a/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
index 4150535462f..9341f8e8c91 100644
--- a/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
+++ b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax --expose-gc
+// Flags: --allow-natives-syntax --harmony-weak-refs --expose-gc
// Helper to convert setTimeout into an awaitable promise.
function asyncTimeout(timeout) {
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
index f01dcaffcd6..ae7f92a33d0 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -29,6 +29,13 @@
const b = makeObjectWithStableMap();
b.d = 1;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) In this mode we weren't able to inline the access, yet, so
+ // it stays optimized. See related TODO in
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ return;
+ }
+
// This should deoptimize foo.
assertUnoptimized(foo);
})();
@@ -58,6 +65,13 @@
const b = makeObjectWithStableMap();
b.z = 1;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) In this mode we weren't able to inline the access, yet, so
+ // it stays optimized. See related TODO in
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ return;
+ }
+
// This should deoptimize foo.
assertUnoptimized(foo);
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1215514.js b/deps/v8/test/mjsunit/compiler/regress-1215514.js
new file mode 100644
index 00000000000..a597b310498
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1215514.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-concurrent-recompilation
+
+new Array(4242).shift();
diff --git a/deps/v8/test/mjsunit/compiler/serializer-accessors.js b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
index da5b426c013..1281bed3dfe 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-accessors.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
@@ -11,7 +11,12 @@ class C {
return 42;
}
set prop(v) {
- assertEquals(expect_interpreted, %IsBeingInterpreted());
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // C.prototype is a dictionary mode object and we cannot inline the call
+ // to this setter, yet.
+ assertEquals(expect_interpreted, %IsBeingInterpreted());
+ }
%TurbofanStaticAssert(v === 43);
}
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
index b147530ba0d..3367a08e3e1 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
@@ -5,7 +5,12 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
function f(x) {
- %TurbofanStaticAssert(x.foo === 42);
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |x| in |main| is a dictionary mode object, and we cannot
+ // inline the storing of x.foo, yet.
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ %TurbofanStaticAssert(x.foo === 42);
+ }
return %IsBeingInterpreted();
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
index 6e321d5c1d7..3f24649f046 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
@@ -5,7 +5,12 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
function f(x) {
- %TurbofanStaticAssert(x.foo === 42);
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |x| in |main| is a dictionary mode object, and we cannot
+ // inline the storing of x.foo, yet.
+ %TurbofanStaticAssert(x.foo === 42);
+ }
return %IsBeingInterpreted();
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
index 6a6da6fa7e6..ff7a1c5a2b7 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
@@ -9,7 +9,12 @@ var expect_interpreted = true;
function C() {
this.a = 1;
assertEquals(expect_interpreted, %IsBeingInterpreted());
- %TurbofanStaticAssert(this.x == 42);
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |this| in D() is a dictionary mode object, and we cannot
+ // inline the storing of this.x, yet.
+ %TurbofanStaticAssert(this.x == 42);
+ }
};
function D() {
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
index 752423443b0..051239fb5fd 100644
--- a/deps/v8/test/mjsunit/const-dict-tracking.js
+++ b/deps/v8/test/mjsunit/const-dict-tracking.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --no-stress-flush-bytecode
+// Flags: --block-concurrent-recompilation
//
// Tests tracking of constness of properties stored in dictionary
// mode prototypes.
@@ -260,3 +262,471 @@ function testbench(o, proto, update_proto, check_constness) {
testbench(o, proto, update_z, false);
})();
+
+//
+// Below: Testing TF optimization of accessing constants in dictionary mode
+// protoypes.
+//
+
+// Test inlining with fast mode receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+})();
+
+// Test inlining with dictionary mode receiver that is a prototype.
+
+(function() {
+
+ var proto1 = Object.create(null);
+ proto1.x = 1;
+ var proto2 = Object.create(null);
+ var o = Object.create(proto1);
+ Object.setPrototypeOf(proto1, proto2);
+
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(proto1));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(proto1));
+ assertOptimized(read_x);
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ // TODO(v8:11457) This test doesn't work yet, see TODO in
+ // AccessInfoFactory::TryLoadPropertyDetails. Currently, we can't inline
+ // accesses with dictionary mode receivers.
+ // if (%IsDictPropertyConstTrackingEnabled()) {
+ // assertTrue(%HasFastProperties(o));
+ // assertFalse(%HasFastProperties(proto1));
+ // assertFalse(%HasFastProperties(proto2));
+ // assertUnoptimized(read_x);
+ // }
+})();
+
+// The machinery we use for detecting the invalidation of constants held by
+// dictionary mode objects (related to the prototype validity cell mechanism) is
+// specific to prototypes. This means that for non-prototype dictionary mode
+// objects, we have no way of detecting changes invalidating folded
+// constants. Therefore, we must not fold constants held by non-prototype
+// dictionary mode objects. This is tested here.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(null);
+ Object.setPrototypeOf(o, proto);
+ assertFalse(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ // We never inlined the acceess, so it's still optimized.
+ assertOptimized(read_x);
+ }
+})();
+
+// Test inlining of accessor.
+(function() {
+ var proto = Object.create(null);
+ proto.x_val = 1;
+ Object.defineProperty(proto, "x", {
+ get : function () {return this.x_val;}
+ });
+
+ var o = Object.create(proto);
+ assertFalse(%HasFastProperties(proto))
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+})();
+
+// Invalidation by adding same property to receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ o.x = 2;
+
+ assertEquals(2, read_x(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+})();
+
+// Invalidation by adding property to intermediate prototype.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var in_between = Object.create(null);
+ Object.setPrototypeOf(in_between, proto);
+
+ var o = Object.create(in_between);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ in_between.x = 2;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by changing prototype of receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var other_proto = Object.create(null);
+ other_proto.x = 2;
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+
+ Object.setPrototypeOf(o, other_proto);
+ assertEquals(2, read_x(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasFastProperties(other_proto));
+ assertUnoptimized(read_x);
+ }
+})();
+
+// Invalidation by changing [[Prototype]] of a prototype on the chain from the
+// receiver to the holder.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var other_proto = Object.create(null);
+ other_proto.x = 2;
+ var in_between = Object.create(null);
+ Object.setPrototypeOf(in_between, proto);
+
+ var o = Object.create(in_between);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ Object.setPrototypeOf(in_between, other_proto);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasFastProperties(other_proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by changing property on prototype itself.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ proto.x = 2;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by deleting property on prototype.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ read_x(o);
+ %OptimizeFunctionOnNextCall(read_x);
+ read_x(o);
+
+ delete proto.x;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(undefined, read_x(o));
+})();
+
+// Storing the same value does not invalidate const-ness. Store done from
+// runtime/without feedback.
+(function() {
+ var proto = Object.create(null);
+ var some_object = {bla: 123};
+ proto.x = 1;
+ proto.y = some_object
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_xy(arg_o) {
+ return [arg_o.x, arg_o.y];
+ }
+
+ %PrepareFunctionForOptimization(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ %OptimizeFunctionOnNextCall(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ assertOptimized(read_xy);
+
+ // Build value 1 without re-using proto.x.
+ var x2 = 0;
+ for(var i = 0; i < 5; ++i) {
+ x2 += 0.2;
+ }
+
+ // Storing the same values for x and y again:
+ proto.x = x2;
+ proto.y = some_object;
+ assertEquals(x2, proto.x);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertTrue(%HasOwnConstDataProperty(proto, "x"));
+ assertOptimized(read_xy);
+ }
+
+ proto.x = 2;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasOwnConstDataProperty(proto, "x"));
+ assertUnoptimized(read_xy);
+ }
+
+ assertEquals(2, read_xy(o)[0]);
+})();
+
+// Storing the same value does not invalidate const-ness. Store done by IC
+// handler.
+(function() {
+ var proto = Object.create(null);
+ var some_object = {bla: 123};
+ proto.x = 1;
+ proto.y = some_object
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_xy(arg_o) {
+ return [arg_o.x, arg_o.y];
+ }
+
+ %PrepareFunctionForOptimization(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ %OptimizeFunctionOnNextCall(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ assertOptimized(read_xy);
+
+ // Build value 1 without re-using proto.x.
+ var x2 = 0;
+ for(var i = 0; i < 5; ++i) {
+ x2 += 0.2;
+ }
+
+ function change_xy(obj, x, y) {
+ obj.x = x;
+ obj.y = y;
+ }
+
+ %PrepareFunctionForOptimization(change_xy);
+ // Storing the same values for x and y again:
+ change_xy(proto, 1, some_object);
+ change_xy(proto, 1, some_object);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertTrue(%HasOwnConstDataProperty(proto, "x"));
+ assertOptimized(read_xy);
+ }
+
+ change_xy(proto, 2, some_object);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasOwnConstDataProperty(proto, "x"));
+ assertUnoptimized(read_xy);
+ }
+
+ assertEquals(2, read_xy(o)[0]);
+})();
+
+// Invalidation by replacing a prototype. Just like the old prototype, the new
+// prototype owns the property as an accessor, but in the form of an
+// AccessorInfo rather than an AccessorPair.
+(function() {
+ var proto1 = Object.create(null);
+ Object.defineProperty(proto1, 'length', {get() {return 1}});
+ var proto2 = Object.create(proto1);
+ var o = Object.create(proto2);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+
+ function read_length(arg_o) {
+ return arg_o.length;
+ }
+
+ %PrepareFunctionForOptimization(read_length);
+ assertEquals(1, read_length(o));
+ %OptimizeFunctionOnNextCall(read_length, "concurrent");
+ assertEquals(1, read_length(o));
+ assertUnoptimized(read_length, "no sync");
+
+ var other_proto1 = [];
+ Object.setPrototypeOf(proto2, other_proto1);
+ %UnblockConcurrentRecompilation();
+ assertUnoptimized(read_length, "sync");
+ assertEquals(0, read_length(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+ assertFalse(%HasFastProperties(other_proto1));
+ assertUnoptimized(read_length);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/const-field-tracking-2.js b/deps/v8/test/mjsunit/const-field-tracking-2.js
index c1da5cf0dcc..86cbb51a3b9 100644
--- a/deps/v8/test/mjsunit/const-field-tracking-2.js
+++ b/deps/v8/test/mjsunit/const-field-tracking-2.js
@@ -104,7 +104,8 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
function warmup() { return new O().v; }
%EnsureFeedbackVectorForFunction(warmup);
warmup(); warmup(); warmup();
- assertTrue(%HasFastProperties(O.prototype));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(O.prototype));
// The parameter object is not constant but all the values have the same
// map and therefore the compiler knows the prototype object and can
diff --git a/deps/v8/test/mjsunit/const-field-tracking.js b/deps/v8/test/mjsunit/const-field-tracking.js
index bc979b80b40..2474c2e3154 100644
--- a/deps/v8/test/mjsunit/const-field-tracking.js
+++ b/deps/v8/test/mjsunit/const-field-tracking.js
@@ -101,7 +101,8 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
function warmup() { return new O().v; }
%EnsureFeedbackVectorForFunction(warmup);
warmup(); warmup(); warmup();
- assertTrue(%HasFastProperties(O.prototype));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(O.prototype));
// The parameter object is not constant but all the values have the same
// map and therefore the compiler knows the prototype object and can
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 9b3a625cbe3..c855f792afe 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --nostress-opt --opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-bytecode --no-lazy-feedback-allocation
function test(f, iterations) {
%PrepareFunctionForOptimization(f);
diff --git a/deps/v8/test/mjsunit/ensure-growing-store-learns.js b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
index ba5e5ae5a90..430823702cb 100644
--- a/deps/v8/test/mjsunit/ensure-growing-store-learns.js
+++ b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
@@ -32,7 +32,11 @@
foo(a, 3);
assertEquals(a[3], 5.3);
foo(a, 50000);
- assertUnoptimized(foo);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototypes on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(foo));
assertTrue(%HasDictionaryElements(a));
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
index 29b65dc3589..bc6ed471420 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
var global;
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
index 0353be32050..5f4fa25e54e 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
function TestSetWithCustomIterator(ctor) {
const k1 = {};
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
index 91b8767403d..45e4528a53f 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
function TestSetPrototypeModified(ctor) {
const originalPrototypeAdd = ctor.prototype.add;
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
index 3c0204466bb..631f315c061 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
@@ -48,7 +48,12 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertUnoptimized(C.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(C.prototype.foo));
})();
(function TestSuperpropertyAccessInlined() {
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt.js b/deps/v8/test/mjsunit/es6/super-ic-opt.js
index c360184a180..9b1b8d218a7 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt.js
@@ -118,7 +118,12 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertUnoptimized(D.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(D.prototype.foo));
})();
(function TestPropertyIsNonConstantData() {
@@ -239,7 +244,12 @@
assertEquals("new value", r);
// Assert that the function was deoptimized (holder changed).
- assertUnoptimized(C.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(C.prototype.foo));
})();
(function TestUnexpectedHomeObjectPrototypeDeoptimizes() {
@@ -278,7 +288,13 @@
assertEquals("new value", r);
// Assert that the function was deoptimized.
- assertUnoptimized(D.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(D.prototype.foo));
+
})();
(function TestUnexpectedReceiverDoesNotDeoptimize() {
diff --git a/deps/v8/test/mjsunit/field-type-tracking.js b/deps/v8/test/mjsunit/field-type-tracking.js
index 1ff336a6b3a..13f1c0236f8 100644
--- a/deps/v8/test/mjsunit/field-type-tracking.js
+++ b/deps/v8/test/mjsunit/field-type-tracking.js
@@ -163,7 +163,12 @@
%OptimizeFunctionOnNextCall(baz);
baz(f2, {b: 9});
baz(f3, {a: -1});
- assertUnoptimized(baz);
+ // TODO(v8:11457) Currently, Turbofan/Turboprop can never inline any stores if
+ // there is a dictionary mode object in the protoype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(baz));
})();
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
index e39ce7ca4ff..d94f6a71512 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
@@ -5,8 +5,8 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var error1, error2;
-import('modules-skip-11.mjs').catch(e => error1 = e);
-import('modules-skip-11.mjs').catch(e => error2 = e);
+import('modules-skip-11.mjs').catch(e => { error1 = e });
+import('modules-skip-11.mjs').catch(e => { error2 = e });
%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
index d7a590e4425..ab9263e1190 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
@@ -3,28 +3,9 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
-//
-// Note: This test fails with top level await due to test1, which tries to
-// import a module using top level await and expects it to fail.
var ran = false;
-async function test1() {
- try {
- let x = await import('modules-skip-8.mjs');
- %AbortJS('failure: should be unreachable');
- } catch(e) {
- assertEquals('Unexpected reserved word', e.message);
- ran = true;
- }
-}
-
-test1();
-%PerformMicrotaskCheckpoint();
-assertTrue(ran);
-
-ran = false;
-
async function test2() {
try {
let x = await import('modules-skip-9.mjs');
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index f879df9a2a6..547a688c2ae 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
(function TestConstructFinalizationRegistry() {
let fg = new FinalizationRegistry(() => {});
assertEquals(fg.toString(), "[object FinalizationRegistry]");
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
index 960ab89487d..eac92486a0f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index 4e760144e6a..8b43618c711 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
// This test asserts that the cleanup function call, scheduled by GC, is a
// microtask and not a normal task.
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
index 11a9b3099d4..3513c8f211a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanedUp = false;
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
index a824bd9d852..f2374efc885 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
index 730312cba53..ef60d3f1501 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = 0;
let holdings_list = [];
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
index 4cb24591722..7476f2bd4ed 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
@@ -5,7 +5,7 @@
// Flags: --harmony-weak-refs
// FinalizationRegistry#cleanupSome is normative optional and has its own
-// flag. Test that it's not present.
+// flag. Test that it's not present with only --harmony-weak-refs.
assertEquals(undefined, Object.getOwnPropertyDescriptor(
FinalizationRegistry.prototype, "cleanupSome"));
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
index 375c7f6d13f..6007f9c3608 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
// A newly created WeakRef is kept alive until the end of the next microtask
// checkpoint. V8 asserts that the kept objects list is cleared at the end of
// microtask checkpoints when the microtask policy is auto. Test that d8, which
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
index a6cda824859..274e714994d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
index 4e0ab2af8ea..72d2cae83ef 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
function cleanup(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
index 7a09273ca74..f63d17ed7fe 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let holdings_list = [];
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index 7db4d44a6a1..3b3f3412a20 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
// Flags: --no-stress-flush-bytecode
let cleanup0_call_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index 533c3cb631b..21b9ff709b7 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
index 07e23f614f9..235a34a5921 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let call_count = 0;
let reentrant_gc = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
index 6cfc1a1aa77..c17e7aa969d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let o1 = {};
let o2 = {};
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
index a45426e3f60..56d9b562a11 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index b23f396f38d..400385d1931 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index aebcc6a746f..efa4df52170 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index b3f425655eb..ff48758c07d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
index 903fb33a378..e607a1ead5a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 74799968445..e04b9f14859 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index ac1e0e2c412..e11fd3b8e93 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index f9ff219d657..772078e1077 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 05ba4f28d27..3b3e488a82d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index 3c8af1995b8..ee4b5ecb903 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let cleanup = function(holdings_arg) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
index 78e8865ac02..4c8641d8aa0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let wr;
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
index f7c05e88b85..eb02290dfdc 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let wr;
let wr_control; // control WeakRef for testing what happens without deref
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 144579703a0..eb27e5ba6e7 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -193,6 +193,9 @@ var isInterpreted;
// Returns true if given function in baseline.
var isBaseline;
+// Returns true if given function in unoptimized (interpreted or baseline).
+var isUnoptimized;
+
// Returns true if given function is optimized.
var isOptimized;
@@ -681,8 +684,7 @@ var prettyPrinted;
return;
}
var is_optimized = (opt_status & V8OptimizationStatus.kOptimized) !== 0;
- var is_baseline = (opt_status & V8OptimizationStatus.kBaseline) !== 0;
- assertFalse(is_optimized && !is_baseline, name_opt);
+ assertFalse(is_optimized, name_opt);
}
assertOptimized = function assertOptimized(
@@ -745,6 +747,10 @@ var prettyPrinted;
(opt_status & V8OptimizationStatus.kBaseline) !== 0;
}
+ isUnoptimized = function isUnoptimized(fun) {
+ return isInterpreted(fun) || isBaseline(fun);
+ }
+
isOptimized = function isOptimized(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index f021b73a7f0..b6dd59ec697 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -163,7 +163,8 @@
'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
# Test is only enabled on ASAN. Takes too long on many other bots.
- 'regress/regress-crbug-9161': [PASS, SLOW, ['asan == False', SKIP]],
+ # Also disabled on Mac ASAN for https://crbug.com/v8/11437.
+ 'regress/regress-crbug-9161': [PASS, SLOW, ['not asan or system == macos', SKIP]],
# OOM with too many isolates/memory objects (https://crbug.com/1010272)
# Predictable tests fail due to race between postMessage and GrowMemory
@@ -273,6 +274,7 @@
'unicode-test': [SKIP],
'whitespaces': [SKIP],
'baseline/*': [SKIP],
+ 'regress/regress-chromium-1194026': [SKIP],
# Unsuitable for GC stress because coverage information is lost on GC.
'code-coverage-ad-hoc': [SKIP],
@@ -331,20 +333,9 @@
'regress/asm/*': [SKIP],
'regress/wasm/*': [SKIP],
+ 'asm/*': [SKIP],
'wasm/*': [SKIP],
- 'asm/asm-heap': [SKIP],
- 'asm/asm-validation': [SKIP],
- 'asm/call-stdlib': [SKIP],
- 'asm/call-annotation': [SKIP],
- 'asm/global-imports': [SKIP],
- 'asm/regress-1027595': [SKIP],
- 'asm/regress-1069173': [SKIP],
- 'asm/regress-913822': [SKIP],
- 'asm/regress-937650': [SKIP],
- 'asm/regress-9531': [SKIP],
- 'asm/return-types': [SKIP],
-
# Tests tracing when generating wasm in TurboFan.
'tools/compiler-trace-flags-wasm': [SKIP],
}], # not has_webassembly or variant == jitless
@@ -360,6 +351,9 @@
'regexp-tier-up-multiple': [SKIP],
'regress/regress-996234': [SKIP],
+ # This test relies on TurboFan being enabled.
+ 'compiler/fast-api-calls': [SKIP],
+
# These tests check that we can trace the compiler.
'tools/compiler-trace-flags': [SKIP],
@@ -508,9 +502,6 @@
##############################################################################
['arch == arm64 and simulator_run', {
- 'compiler/osr-big': [PASS, SLOW],
- 'regress/regress-454725': [PASS, SLOW],
- 'json': [PASS, SLOW],
'try': [PASS, SLOW],
'non-extensible-array-reduce': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run'
@@ -574,9 +565,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7102
# Flaky due to huge string allocation.
'regress/regress-748069': [SKIP],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'regress/regress-crbug-627935': [SKIP],
}], # 'msan == True'
##############################################################################
@@ -613,6 +601,9 @@
# BUG(v8:9506): times out.
'wasm/shared-memory-worker-explicit-gc-stress': [SKIP],
+
+ # https://crbug.com/v8/9337 - OOMs on TSAN
+ 'compiler/regress-9017': [SKIP],
}], # 'tsan == True'
##############################################################################
@@ -831,6 +822,32 @@
# https://github.com/v8-riscv/v8/issues/418
'regress/regress-1138075': [SKIP],
'regress/regress-1138611': [SKIP],
+
+ # SIMD not be implemented
+ 'regress/wasm/regress-1054466': [SKIP],
+ 'regress/wasm/regress-1065599': [SKIP],
+ 'regress/wasm/regress-1070078': [SKIP],
+ 'regress/wasm/regress-1081030': [SKIP],
+ 'regress/wasm/regress-10831': [SKIP],
+ 'regress/wasm/regress-10309': [SKIP],
+ 'regress/wasm/regress-1111522': [SKIP],
+ 'regress/wasm/regress-1116019': [SKIP],
+ 'regress/wasm/regress-1124885': [SKIP],
+ 'regress/wasm/regress-1165966': [SKIP],
+ 'regress/wasm/regress-1112124': [SKIP],
+ 'regress/wasm/regress-1132461': [SKIP],
+ 'regress/wasm/regress-1161555': [SKIP],
+ 'regress/wasm/regress-1161954': [SKIP],
+ 'regress/wasm/regress-1187831': [SKIP],
+ 'regress/regress-1172797': [SKIP],
+ 'regress/wasm/regress-1179025': [SKIP],
+ 'wasm/simd-errors': [SKIP],
+ 'wasm/simd-globals': [SKIP],
+ 'wasm/multi-value-simd': [SKIP],
+ 'wasm/simd-call': [SKIP],
+ 'wasm/liftoff-simd-params': [SKIP],
+ 'wasm/exceptions-simd': [SKIP],
+
}], # 'arch == riscv64'
['arch == riscv64 and variant == stress_incremental_marking', {
@@ -938,6 +955,7 @@
'deopt-recursive-lazy-once': [SKIP],
'deopt-recursive-soft-once': [SKIP],
'code-coverage-block-opt': [SKIP],
+ 'compiler/fast-api-calls': [SKIP],
'compiler/serializer-apply': [SKIP],
'compiler/serializer-call': [SKIP],
'compiler/serializer-dead-after-jump': [SKIP],
@@ -1303,6 +1321,13 @@
}], # variant == assert_types
##############################################################################
+['variant == stress_snapshot', {
+ # This test initializes an embedder object that never needs to be serialized
+ # to the snapshot, so we don't have a SerializeInternalFieldsCallback for it.
+ 'compiler/fast-api-calls': [SKIP],
+}], # variant == stress_snapshot
+
+##############################################################################
['variant == stress_snapshot and arch != x64', {
# Deserialization fails due to read-only snapshot checksum verification.
# https://crbug.com/v8/10491
@@ -1421,11 +1446,12 @@
'regress/wasm/regress-1161555': [SKIP],
'regress/wasm/regress-1161954': [SKIP],
'regress/wasm/regress-1165966': [SKIP],
+ 'regress/wasm/regress-1187831': [SKIP],
}], # no_simd_sse == True
##############################################################################
-# TODO(v8:11421): Port baseline compiler to ia32, Arm, MIPS, S390 and PPC
-['arch not in (x64, arm64)', {
+# TODO(v8:11421): Port baseline compiler to other architectures.
+['arch not in (x64, arm64, ia32, arm)', {
'baseline/*': [SKIP],
}],
@@ -1434,4 +1460,9 @@
'regress/regress-779407': [SKIP],
}], # variant == experimental_regexp
+##############################################################################
+['variant == concurrent_inlining', {
+ 'concurrent-initial-prototype-change-1': [SKIP],
+}], # variant == concurrent_inlining
+
]
diff --git a/deps/v8/test/mjsunit/promise-hooks.js b/deps/v8/test/mjsunit/promise-hooks.js
deleted file mode 100644
index f7c1558c1d2..00000000000
--- a/deps/v8/test/mjsunit/promise-hooks.js
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-opt --deopt-every-n-times=0 --ignore-unhandled-promises
-
-let log = [];
-let asyncId = 0;
-
-function logEvent (type, args) {
- const promise = args[0];
- promise.asyncId = promise.asyncId || ++asyncId;
- log.push({
- type,
- promise,
- parent: args[1],
- argsLength: args.length
- })
-}
-function initHook(...args) {
- logEvent('init', args);
-}
-function resolveHook(...args) {
- logEvent('resolve', args);
-}
-function beforeHook(...args) {
- logEvent('before', args);
-}
-function afterHook(...args) {
- logEvent('after', args);
-}
-
-function printLog(message) {
- console.log(` --- ${message} --- `)
- for (const event of log) {
- console.log(JSON.stringify(event))
- }
-}
-
-function assertNextEvent(type, args) {
- const [ promiseOrId, parentOrId ] = args;
- const nextEvent = log.shift();
-
- assertEquals(type, nextEvent.type);
- assertEquals(type === 'init' ? 2 : 1, nextEvent.argsLength);
-
- assertTrue(nextEvent.promise instanceof Promise);
- if (promiseOrId instanceof Promise) {
- assertEquals(promiseOrId, nextEvent.promise);
- } else {
- assertTrue(typeof promiseOrId === 'number');
- assertEquals(promiseOrId, nextEvent.promise?.asyncId);
- }
-
- if (parentOrId instanceof Promise) {
- assertEquals(parentOrId, nextEvent.parent);
- assertTrue(nextEvent.parent instanceof Promise);
- } else if (typeof parentOrId === 'number') {
- assertEquals(parentOrId, nextEvent.parent?.asyncId);
- assertTrue(nextEvent.parent instanceof Promise);
- } else {
- assertEquals(undefined, parentOrId);
- assertEquals(undefined, nextEvent.parent);
- }
-}
-function assertEmptyLog() {
- assertEquals(0, log.length);
- asyncId = 0;
- log = [];
-}
-
-// Verify basic log structure of different promise behaviours
-function basicTest() {
- d8.promise.setHooks(initHook, beforeHook, afterHook, resolveHook);
-
- // `new Promise(...)` triggers init event with correct promise
- var done, p1 = new Promise(r => done = r);
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p1 ]);
- assertEmptyLog();
-
- // `promise.then(...)` triggers init event with correct promise and parent
- var p2 = p1.then(() => { });
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p2, p1 ]);
- assertEmptyLog();
-
- // `resolve(...)` triggers resolve event and any already attached continuations
- done();
- %PerformMicrotaskCheckpoint();
- assertNextEvent('resolve', [ p1 ]);
- assertNextEvent('before', [ p2 ]);
- assertNextEvent('resolve', [ p2 ]);
- assertNextEvent('after', [ p2 ]);
- assertEmptyLog();
-
- // `reject(...)` triggers the resolve event
- var done, p3 = new Promise((_, r) => done = r);
- done();
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p3 ]);
- assertNextEvent('resolve', [ p3 ]);
- assertEmptyLog();
-
- // `promise.catch(...)` triggers init event with correct promise and parent
- // When the promise is already completed, the continuation should also run
- // immediately at the next checkpoint.
- var p4 = p3.catch(() => { });
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p4, p3 ]);
- assertNextEvent('before', [ p4 ]);
- assertNextEvent('resolve', [ p4 ]);
- assertNextEvent('after', [ p4 ]);
- assertEmptyLog();
-
- // Detach hooks
- d8.promise.setHooks();
-}
-
-// Exceptions thrown in hook handlers should not raise or reject
-function exceptions() {
- function thrower() {
- throw new Error('unexpected!');
- }
-
- // Init hook
- d8.promise.setHooks(thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Before hook
- d8.promise.setHooks(undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .then(() => {})
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // After hook
- d8.promise.setHooks(undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .then(() => {})
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Resolve hook
- d8.promise.setHooks(undefined, undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Resolve hook for a reject
- d8.promise.setHooks(undefined, undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.reject()
- .then(assertUnreachable)
- .catch();
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-}
-
-// For now, expect the optimizer to bail out on async functions
-// when context promise hooks are attached.
-function optimizerBailout(test, verify) {
- // Warm up test method
- %PrepareFunctionForOptimization(test);
- assertUnoptimized(test);
- test();
- test();
- test();
- %PerformMicrotaskCheckpoint();
-
- // Prove transition to optimized code when no hooks are present
- assertUnoptimized(test);
- %OptimizeFunctionOnNextCall(test);
- test();
- assertOptimized(test);
- %PerformMicrotaskCheckpoint();
-
- // Verify that attaching hooks deopts the async function
- d8.promise.setHooks(initHook, beforeHook, afterHook, resolveHook);
- // assertUnoptimized(test);
-
- // Verify log structure of deoptimized call
- %PrepareFunctionForOptimization(test);
- test();
- %PerformMicrotaskCheckpoint();
-
- verify();
-
- // Optimize and verify log structure again
- %OptimizeFunctionOnNextCall(test);
- test();
- assertOptimized(test);
- %PerformMicrotaskCheckpoint();
-
- verify();
-
- d8.promise.setHooks();
-}
-
-optimizerBailout(async () => {
- await Promise.resolve();
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-optimizerBailout(async () => {
- await { then (cb) { cb() } };
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2, 1 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('after', [ 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-basicTest();
-exceptions();
-
-(function regress1126309() {
- function __f_16(test) {
- test();
- d8.promise.setHooks(undefined, () => {});
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
- }
- __f_16(async () => { await Promise.resolve()});
-})();
-
-(function boundFunction() {
- function hook() {};
- const bound = hook.bind(this);
- d8.promise.setHooks(bound, bound, bound, bound);
- Promise.resolve();
- Promise.reject();
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-})();
-
-
-(function promiseAll() {
- let initCount = 0;
- d8.promise.setHooks(() => { initCount++});
- Promise.all([Promise.resolve(1)]);
- %PerformMicrotaskCheckpoint();
- assertEquals(initCount, 3);
-
- d8.promise.setHooks();
-})();
diff --git a/deps/v8/test/mjsunit/proto-accessor-not-accessible.js b/deps/v8/test/mjsunit/proto-accessor-not-accessible.js
new file mode 100644
index 00000000000..4e86f4e4ab2
--- /dev/null
+++ b/deps/v8/test/mjsunit/proto-accessor-not-accessible.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Accessors for __proto__ are defined in Object.prototype (spec:
+// https://tc39.es/ecma262/#sec-object.prototype.__proto__ ). If
+// Object.prototype is not in the prototype chain of an object, the accessors
+// are not accessible. In particular, __proto__ is treated as a normal property
+// and the special meaning (that getting __proto__ would return the prototype
+// and setting __proto__ would change the prototype) is lost.
+
+function testObjectWithNullProto(object) {
+ assertNull(Object.getPrototypeOf(object));
+
+ // The __proto__ getter is not accessible.
+ assertEquals(undefined, object.__proto__);
+
+ // The __proto__ setter is not accessible. Setting __proto__ will create a
+ // normal property called __proto__ and not change the prototype.
+ object.__proto__ = {};
+ assertNull(Object.getPrototypeOf(object));
+
+ // Object.setPrototypeOf can still be used for really setting the prototype.
+ const proto1 = {};
+ Object.setPrototypeOf(object, proto1);
+
+ // Now the accessors are accessible again.
+ assertEquals(proto1, object.__proto__);
+
+ const proto2 = {};
+ object.__proto__ = proto2;
+ assertEquals(proto2, object.__proto__);
+}
+
+(function TestObjectCreatedWithObjectCreate() {
+ testObjectWithNullProto(Object.create(null));
+})();
+
+(function TestProtoSetToNullAfterCreation() {
+ let object_with_null_proto = {};
+ object_with_null_proto.__proto__ = null;
+ testObjectWithNullProto(Object.create(null));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-673297.js b/deps/v8/test/mjsunit/regress/asm/regress-673297.js
index 9a00a908350..9a00a908350 100644
--- a/deps/v8/test/mjsunit/regress/regress-673297.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-673297.js
diff --git a/deps/v8/test/mjsunit/regress/regress-743622.js b/deps/v8/test/mjsunit/regress/asm/regress-743622.js
index 60512585c2c..60512585c2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-743622.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-743622.js
diff --git a/deps/v8/test/mjsunit/regress/regress-1067270.js b/deps/v8/test/mjsunit/regress/regress-1067270.js
index 1c6eddf505a..eb505e5b2bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-1067270.js
+++ b/deps/v8/test/mjsunit/regress/regress-1067270.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stack-size=1200
const needle = Array(1802).join(" +") + Array(16884).join("A");
const string = "A";
diff --git a/deps/v8/test/mjsunit/regress/regress-1146880.js b/deps/v8/test/mjsunit/regress/regress-1146880.js
new file mode 100644
index 00000000000..c6bfddf84fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1146880.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --assert-types
+
+function f(a,b) {
+ let t = a >= b;
+ while (t != 0) {
+ a = a | (b - a);
+ let unused = a >= b;
+ t = a < b;
+ }
+}
+function test() {
+ f(Infinity,1);
+ f(undefined, undefined);
+}
+
+// Trigger TurboFan compilation
+%PrepareFunctionForOptimization(test);
+%PrepareFunctionForOptimization(f);
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-11491.js b/deps/v8/test/mjsunit/regress/regress-11491.js
new file mode 100644
index 00000000000..795480a15db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-11491.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function test() {
+ // Create a generator constructor with the maximum number of allowed parameters.
+ const args = new Array(65535);
+ function* gen() {}
+ const c = gen.constructor.apply(null, args);
+
+ // 'c' having 65535 parameters causes the parameters/registers fixed array
+ // attached to the generator object to be considered a large object.
+ // We call it twice so that it both covers the CreateJSGeneratorObject() C++
+ // runtime function as well as the CreateGeneratorObject() CSA builtin.
+ c();
+ c();
+}
+
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-11519.js b/deps/v8/test/mjsunit/regress/regress-11519.js
new file mode 100644
index 00000000000..ae4c83a0a32
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-11519.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=500 --stress-compaction
+
+function bar(a) {
+ return Object.defineProperty(a, 'x', {get() { return 1; }});
+}
+
+function foo() {
+ return {};
+}
+
+%NeverOptimizeFunction(bar);
+%PrepareFunctionForOptimization(foo);
+const o = foo(); // Keep a reference so the GC doesn't kill the map.
+%SimulateNewspaceFull();
+bar(o);
+const a = bar(foo());
+%SimulateNewspaceFull();
+%OptimizeFunctionOnNextCall(foo);
+const b = bar(foo());
+
+assertTrue(%HaveSameMap(a, b));
diff --git a/deps/v8/test/mjsunit/regress/regress-1181240.js b/deps/v8/test/mjsunit/regress/regress-1181240.js
new file mode 100644
index 00000000000..bf053a53109
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1181240.js
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ t();
+ } catch (e) {
+ f(true);
+ }
+ }
+ t();
+}
+
+var a = {x: 10};
+var b = {y: 10};
+function inner(should_deopt) {
+ if (should_deopt == true) {
+ a.x;
+ }
+ return b.y;
+}
+
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(inner);
+f(false);
+f(false);
+%OptimizeFunctionOnNextCall(f);
+f(false);
+
+function f(x) {
+ // Pass a large number of arguments so the stack check would fail.
+ inner(x,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ );
+}
+
+runNearStackLimit(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-1185072.js b/deps/v8/test/mjsunit/regress/regress-1185072.js
new file mode 100644
index 00000000000..7dd2802c993
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1185072.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+function __getRandomProperty(obj, seed) {
+ let properties = __getProperties(obj);
+ return properties[seed % properties.length];
+}
+let __v_19 = [];
+class __c_0 extends Array {}
+Object.defineProperty(__v_19, 'constructor', {
+ get() {
+ return __c_0;
+ }
+});
+Object.defineProperty(__v_19, __getRandomProperty(__v_19, 776790), {
+ value: 4294967295
+});
+assertThrows(() => __v_19.concat([1])[9], RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1187170.js b/deps/v8/test/mjsunit/regress/regress-1187170.js
new file mode 100644
index 00000000000..58a6f7ef5fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1187170.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+
+var bar = 0;
+function foo(outer_arg) {
+ var arr = [1];
+ var func = function (arg) {
+ bar += arg;
+ if (outer_arg) {}
+ };
+ try {
+ arr.filter(func);
+ } catch (e) {}
+};
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+bar = {};
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1193903.js b/deps/v8/test/mjsunit/regress/regress-1193903.js
new file mode 100644
index 00000000000..491ba1150d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1193903.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var no_sync_uninternalized = "no " + "sync";
+%InternalizeString(no_sync_uninternalized);
+
+// Make sure %GetOptimizationStatus works with a non-internalized string
+// parameter.
+%GetOptimizationStatus(function() {}, no_sync_uninternalized)
diff --git a/deps/v8/test/mjsunit/regress/regress-673241.js b/deps/v8/test/mjsunit/regress/regress-673241.js
deleted file mode 100644
index a4d6ffe96fc..00000000000
--- a/deps/v8/test/mjsunit/regress/regress-673241.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --validate-asm
-
-function generateAsmJs() {
- 'use asm';
- function fun() { fun(); }
- return fun;
-}
-
-assertThrows(generateAsmJs());
diff --git a/deps/v8/test/mjsunit/regress/regress-7115.js b/deps/v8/test/mjsunit/regress/regress-7115.js
index 8bbb1ded20a..f17c2e6bb94 100644
--- a/deps/v8/test/mjsunit/regress/regress-7115.js
+++ b/deps/v8/test/mjsunit/regress/regress-7115.js
@@ -5,7 +5,15 @@
// Flags: --allow-natives-syntax
function TestBuiltinSubclassing(Builtin) {
- assertTrue(%HasFastProperties(Builtin));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11248) In the current implementation of
+ // v8_dict_property_const_tracking, prototypes are converted to dictionary
+ // mode in many places, but we don't guarantee that they are *created* as
+ // dictionary mode objects, yet. This will be fixed in the future. Until
+ // then, if v8_dict_property_const_tracking is enabled, we cannot always
+ // know for sure if a builtin has been converted already or not.
+ assertTrue(%HasFastProperties(Builtin));
+ }
assertTrue(%HasFastProperties(Builtin.prototype));
assertEquals(!%IsDictPropertyConstTrackingEnabled(),
%HasFastProperties(Builtin.prototype.__proto__));
diff --git a/deps/v8/test/mjsunit/regress/regress-923723.js b/deps/v8/test/mjsunit/regress/regress-923723.js
index 5a838e558f6..4bd0d43777b 100644
--- a/deps/v8/test/mjsunit/regress/regress-923723.js
+++ b/deps/v8/test/mjsunit/regress/regress-923723.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=50
+// Flags: --stack-size=100
function __f_3() {
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-992389.js b/deps/v8/test/mjsunit/regress/regress-992389.js
index 66fa9696f67..2eb0f755f38 100644
--- a/deps/v8/test/mjsunit/regress/regress-992389.js
+++ b/deps/v8/test/mjsunit/regress/regress-992389.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --jitless --gc-interval=12 --stack-size=50
+// Flags: --jitless --gc-interval=12 --stack-size=100
__f_0();
function __f_0() {
diff --git a/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js b/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js
new file mode 100644
index 00000000000..2b5f5c69123
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js
@@ -0,0 +1,69 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-sharedarraybuffer
+
+function workerCode1() {
+ onmessage = function(e) {
+ const a = new Int32Array(e.sab);
+ while(true) {
+ // This worker tries to switch the value from 1 to 2; if it succeeds, it
+ // also notifies.
+ const ret = Atomics.compareExchange(a, 0, 1, 2);
+ if (ret === 1) {
+ Atomics.notify(a, 0);
+ }
+ // Check if we're asked to terminate:
+ if (Atomics.load(a, 1) == 1) {
+ return;
+ }
+ }
+ }
+}
+
+function workerCode2() {
+ const MAX_ROUNDS = 40;
+ onmessage = function(e) {
+ const a = new Int32Array(e.sab);
+ let round = 0;
+ function nextRound() {
+ while (true) {
+ if (round == MAX_ROUNDS) {
+ // Tell worker1 to terminate.
+ Atomics.store(a, 1, 1);
+ postMessage('done');
+ return;
+ }
+
+ // This worker changes the value to 1, and waits for it to change to 2
+ // via Atomics.waitAsync.
+ Atomics.store(a, 0, 1);
+
+ const res = Atomics.waitAsync(a, 0, 1);
+ if (res.async) {
+ res.value.then(() => { ++round; nextRound();},
+ ()=> {});
+ return;
+ }
+ // Else: continue looping. (This happens when worker1 changed the value
+ // back to 2 before waitAsync started.)
+ }
+ }
+
+ nextRound();
+ }
+}
+
+let sab = new SharedArrayBuffer(8);
+
+let w1 = new Worker(workerCode1, {type: 'function'});
+w1.postMessage({sab: sab});
+
+let w2 = new Worker(workerCode2, {type: 'function'});
+w2.postMessage({sab: sab});
+
+// Wait for worker2.
+w2.getMessage();
+w1.terminate();
+w2.terminate();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
index ec61fee068a..e06e63db659 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
@@ -10,6 +10,16 @@ function foo(first_run) {
Object.defineProperty(o, 'x', { get() { return 1; }, configurable: true, enumerable: true });
delete o.x;
o.x = 23;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(11248, ishell) Adding a property always sets it to constant if
+ // V8_DICT_PROPERTY_CONST_TRACKING is enabled, even if the property was
+ // deleted before and is re-added. See
+ // LookupIterator::PrepareTransitionToDataProperty, specically the usage of
+ // PropertyDetails::kConstIfDictConstnessTracking in there.
+ return;
+ }
+
if (first_run) assertFalse(%HasOwnConstDataProperty(o, 'x'));
}
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js
new file mode 100644
index 00000000000..e84d98a6ecf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(x) { delete x.bla; x.bla = 23 }
+
+function foo() {
+ let obj = {bla: 0};
+ Object.defineProperty(obj, 'bla', {writable: false});
+ bar(obj);
+ return obj.bla;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(23, foo());
+assertEquals(23, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(23, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js b/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js
new file mode 100644
index 00000000000..87df25605fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let arr = [];
+for (var i = 0; i < 1000000; i++) {
+ arr[i] = [];
+}
+assertEquals(1000000, i);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js b/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
index 1bced5623e4..9f10604e764 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
@@ -27,9 +27,9 @@ assertFalse(%HasOwnConstDataProperty(o3, "b"));
Object.defineProperty(o2, "a", {
value:2, enumerable: false, configurable: true, writable: true,
});
-assertFalse(%HasOwnConstDataProperty(o1, "a"));
+assertTrue(%HasOwnConstDataProperty(o1, "a"));
assertFalse(%HasOwnConstDataProperty(o1, "b"));
-assertFalse(%HasOwnConstDataProperty(o3, "a"));
+assertTrue(%HasOwnConstDataProperty(o3, "a"));
assertFalse(%HasOwnConstDataProperty(o3, "b"));
assertFalse(%HasOwnConstDataProperty(o2, "a"));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9534.js b/deps/v8/test/mjsunit/regress/regress-v8-9534.js
index 0eb0217e7f4..7fc98a9d74d 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-9534.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9534.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --stack-size=50 --ignore-unhandled-promises
+// Flags: --allow-natives-syntax --stack-size=100 --ignore-unhandled-promises
let i = 0;
function f() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js b/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js
new file mode 100644
index 00000000000..06d3dc64d02
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// This test creates the situation where BranchElimination's VisitIf sees a
+// different condition than the preceding VisitBranch (because an interleaved
+// CommonOperatorReducer replaced the condition).
+
+(function foo() {
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", kSig_v_l)
+ .addLocals(kWasmI32, 2)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0x02,
+ kExprLocalTee, 0x01,
+ kExprIf, kWasmVoid,
+ kExprElse,
+ kExprLoop, kWasmVoid,
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0x01,
+ kExprIf, kWasmVoid,
+ kExprElse,
+ kExprLocalGet, 0x02,
+ kExprBrIf, 0x04,
+ kExprBr, 0x01,
+ kExprEnd,
+ kExprLocalGet, 0x00,
+ kExprCallFunction, 0x01,
+ kExprLocalTee, 0x02,
+ kExprBrIf, 0x00,
+ kExprEnd,
+ kExprLocalGet, 0x01,
+ kExprBrIf, 0x00,
+ kExprEnd,
+ kExprEnd,
+ kExprBr, 0x00,
+ kExprEnd])
+ .exportAs("main");
+
+ builder.addFunction("callee", kSig_i_l)
+ .addBody([kExprLocalGet, 0, kExprI32ConvertI64]);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js b/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
index b353b7a94a7..1d0d1470ee6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
@@ -23,7 +23,7 @@ kExprEnd, // @3
// signature: d_v
// body:
kExprBlock, kWasmF64, // @3 f64
- kExprBlock, kWasmStmt, // @5
+ kExprBlock, kWasmVoid, // @5
kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f,
kExprLocalTee, 0x00,
kExprLocalTee, 0x01,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
index 99519d8ffe1..99d3da93298 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
@@ -10,7 +10,7 @@ const NUM_CASES = 3073;
let body = [];
// Add one block, so we can jump to this block or to the function end.
body.push(kExprBlock);
-body.push(kWasmStmt);
+body.push(kWasmVoid);
// Add the big BrTable.
body.push(kExprLocalGet, 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
index eec0a46432e..f3e3b59b7ad 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
@@ -19,15 +19,15 @@ builder.addFunction(undefined, sig)
kExprLocalGet, 0x1b, // local.get
kExprLocalSet, 0x1c, // local.set
kExprI32Const, 0x00, // i32.const
-kExprIf, kWasmStmt, // if @11
+kExprIf, kWasmVoid, // if @11
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x1e, // local.set
- kExprBlock, kWasmStmt, // block @19
+ kExprBlock, kWasmVoid, // block @19
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x21, // local.set
- kExprBlock, kWasmStmt, // block @25
- kExprBlock, kWasmStmt, // block @27
- kExprBlock, kWasmStmt, // block @29
+ kExprBlock, kWasmVoid, // block @25
+ kExprBlock, kWasmVoid, // block @27
+ kExprBlock, kWasmVoid, // block @29
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x0a, // local.set
kExprI32Const, 0x00, // i32.const
@@ -42,19 +42,19 @@ kExprIf, kWasmStmt, // if @11
kExprI32Const, 0x01, // i32.const
kExprLocalSet, 0x36, // local.set
kExprI32Const, 0x00, // i32.const
- kExprIf, kWasmStmt, // if @56
+ kExprIf, kWasmVoid, // if @56
kExprEnd, // end @59
kExprLocalGet, 0x00, // local.get
kExprLocalSet, 0x10, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x38, // local.set
- kExprBlock, kWasmStmt, // block @69
+ kExprBlock, kWasmVoid, // block @69
kExprI32Const, 0x7f, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x39, // local.set
kExprI32Const, 0x01, // i32.const
- kExprIf, kWasmStmt, // if @78
+ kExprIf, kWasmVoid, // if @78
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x11, // local.set
kExprI32Const, 0x00, // i32.const
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
index 413630d1b08..c0c2d0dcfc5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
@@ -25,7 +25,7 @@ builder.addFunction(undefined, sig)
kExprElse, // else @45
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @48
- kExprIf, kWasmStmt, // if @49
+ kExprIf, kWasmVoid, // if @49
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kAtomicPrefix, kExprI32AtomicSub, 0x01, 0x04, // i32.atomic.sub
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10831.js b/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
index 58c6c4dec1d..29334684ed4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
@@ -39,7 +39,7 @@ kExprI32Const, 0xfc, 0xf8, 0x01, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kSimdPrefix, kExprF64x2Max, 0x01, // f64x2.max
kSimdPrefix, kExprI16x8MaxS, 0x01, // i16x8.max_s
-kSimdPrefix, kExprV8x16AllTrue, // v8x16.all_true
+kSimdPrefix, kExprI8x16AllTrue, // i8x16.all_true
kExprEnd, // end @70
]);
builder.addExport('main', 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10898.js b/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
index 61c8c721048..be366883d23 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
@@ -29,7 +29,7 @@ kExprLocalTee, 0x00, // local.tee
kExprI32Const, 0xff, 0x00, // i32.const
kAtomicPrefix, kExprAtomicNotify, 0x02, 0x03, // atomic.notify
kExprI32LoadMem16S, 0x00, 0x02, // i32.load16_s
-kExprIf, kWasmStmt, // if @28
+kExprIf, kWasmVoid, // if @28
kExprLocalGet, 0x00, // local.get
kExprReturn, // return
kExprElse, // else @33
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js b/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
index 36331d094ab..aaf63724b4f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
@@ -10,9 +10,9 @@ builder.addType(makeSig(
[]));
builder.addFunction(undefined, 0 /* sig */).addBody([
kExprI32Const, 0, // i32.const
- kExprIf, kWasmStmt, // if @3
+ kExprIf, kWasmVoid, // if @3
kExprI32Const, 1, // i32.const
- kExprIf, kWasmStmt, // if @7
+ kExprIf, kWasmVoid, // if @7
kExprNop, // nop
kExprElse, // else @10
kExprUnreachable, // unreachable
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js b/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
index aacaedc93f8..407f5a2f874 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
@@ -19,11 +19,11 @@ kExprI32Const, 0x10, // i32.const
kExprI32Sub, // i32.sub
kExprLocalTee, 0x02, // local.tee
kExprGlobalSet, 0x00, // global.set
-kExprBlock, kWasmStmt, // block @12
+kExprBlock, kWasmVoid, // block @12
kExprLocalGet, 0x00, // local.get
kExprI32LoadMem, 0x02, 0x00, // i32.load
kExprI32Eqz, // i32.eqz
- kExprIf, kWasmStmt, // if @20
+ kExprIf, kWasmVoid, // if @20
kExprLocalGet, 0x02, // local.get
kExprI32Const, 0x00, // i32.const
kExprI32StoreMem, 0x02, 0x0c, // i32.store
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
index d9d80e58ccc..56c0b7d194a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
@@ -35,9 +35,9 @@ kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x01, // i32.const
kExprI32Sub, // i32.sub
kExprLocalSet, 0x07, // local.set
-kExprBlock, kWasmStmt, // block @45
+kExprBlock, kWasmVoid, // block @45
kExprI32Const, 0x00, // i32.const
- kExprIf, kWasmStmt, // if @49
+ kExprIf, kWasmVoid, // if @49
kExprLocalGet, 0x0a, // local.get
kExprLocalSet, 0x08, // local.set
kExprElse, // else @55
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
index 989da11a25b..a86866429c4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
@@ -21,7 +21,7 @@ kExprI32Const, 0x00, // i32.const
kExprLocalSet, 0x04, // local.set
kExprI32Const, 0x01, // i32.const
kExprLocalSet, 0x05, // local.set
-kExprBlock, kWasmStmt, // block @11
+kExprBlock, kWasmVoid, // block @11
kExprBr, 0x00, // br depth=0
kExprEnd, // end @15
kExprGlobalGet, 0x01, // global.get
@@ -35,7 +35,7 @@ kExprLocalSet, 0x01, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x07, // local.set
-kExprBlock, kWasmStmt, // block @36
+kExprBlock, kWasmVoid, // block @36
kExprBr, 0x00, // br depth=0
kExprEnd, // end @40
kExprGlobalGet, 0x01, // global.get
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
index 93f2c3b556f..f9427989275 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
@@ -34,7 +34,7 @@ kExprLocalGet, 0x01, // local.get
kExprLocalGet, 0x01, // local.get
kExprGlobalGet, 0x00, // global.get
kExprDrop, // drop
-kExprLoop, kWasmStmt, // loop @8
+kExprLoop, kWasmVoid, // loop @8
kExprLoop, 0x00, // loop @10
kExprI32Const, 0x01, // i32.const
kExprMemoryGrow, 0x00, // memory.grow
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
index 907cf563c9c..0bcdd7bd44d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
@@ -13,7 +13,7 @@ builder.addFunction(undefined, kSig_i_v)
.addBody([
kExprI64Const, 0x0, // i64.const
kExprI32Const, 0x0, // i32.const
-kExprIf, kWasmStmt, // if
+kExprIf, kWasmVoid, // if
kExprI32Const, 0x0, // i32.const
kExprI32LoadMem, 0x01, 0x23, // i32.load
kExprBrTable, 0x01, 0x00, 0x00, // br_table
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js b/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js
new file mode 100644
index 00000000000..2fe4fbb1072
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('foo', kSig_v_v).addBody([kExprDrop]);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
new file mode 100644
index 00000000000..3c6e9980207
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --liftoff --no-wasm-tier-up --wasm-tier-mask-for-testing=2
+// Flags: --experimental-wasm-reftypes
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+
+// Generate a Liftoff call with too many reference parameters to fit in
+// parameter registers, to force stack parameter slots.
+
+const kManyParams = 32;
+const kSigWithManyRefParams = makeSig(
+ new Array(kManyParams).fill(kWasmExternRef), []);
+const kPrepareManyParamsCallBody = Array.from(
+ {length: kManyParams * 2},
+ (item, index) => index % 2 == 0 ? kExprLocalGet : 0);
+
+
+builder.addFunction(undefined, kSigWithManyRefParams).addBody([
+]);
+
+builder.addFunction(undefined, kSigWithManyRefParams)
+.addBody([
+ ...kPrepareManyParamsCallBody,
+ kExprCallFunction, 0, // call 0
+]);
+
+builder.addFunction(undefined, kSigWithManyRefParams).addBody([
+ ...kPrepareManyParamsCallBody,
+ kExprCallFunction, 1, // call 1
+]).exportAs('manyRefs');
+
+const instance = builder.instantiate();
+instance.exports.manyRefs();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js b/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js
new file mode 100644
index 00000000000..84e7ed5429c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addType(makeSig([], []));
+builder.setTableBounds(1, 1);
+builder.addElementSegment(0, 0, false, [0]);
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x03, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+kSimdPrefix, kExprI32x4ExtAddPairwiseI16x8U, // i32x4.extadd_pairwise_i16x8_u
+kSimdPrefix, kExprI8x16ExtractLaneU, 0x00, // i8x16.extract_lane_u
+kExprEnd, // end @15
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(3, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
new file mode 100644
index 00000000000..9a4cb1ecbd0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+load('test/mjsunit/wasm/wasm-module-builder.js')
+let obj = {};
+let proxy = new Proxy(obj, {});
+let builder = new WasmModuleBuilder();
+builder.addType(kSig_v_v);
+let imports = builder.addImport("m","f", kSig_v_v);
+let exception = builder.addException(kSig_v_v);
+builder.addFunction("foo", kSig_v_v)
+ .addBody([
+ kExprTry,
+ kWasmVoid,
+ kExprCallFunction, imports,
+ kExprCatch, exception,
+ kExprEnd]
+ ).exportFunc();
+let inst = builder.instantiate({
+ m: {
+ f: function () {
+ throw proxy;
+ }
+ }
+});
+assertThrows(inst.exports.foo);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js b/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js
new file mode 100644
index 00000000000..3d716cd2ca6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function Regress1188975() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("f", kSig_v_v)
+ .addBody([
+ kExprUnreachable,
+ kExprTry, kWasmVoid,
+ kExprElse,
+ kExprCatchAll,
+ kExprEnd,
+ ]);
+ assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js b/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js
new file mode 100644
index 00000000000..9dd512f27ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js
@@ -0,0 +1,218 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --experimental-wasm-threads
+
+// During Turbofan optimizations, when a TrapIf/Unless node is found to always
+// trap, its uses need to be marked as dead. However, in the case that one of
+// these uses is a Merge or Loop node, only the input of the Merge/Loop that
+// corresponds to the trap should be marked as dead.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+builder.addStruct([makeField(kWasmI32, true)]);
+
+builder.addFunction('test', makeSig([wasmOptRefType(0)], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprRefIsNull,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, 0, 0,
+ kExprElse,
+ kExprI32Const, 42,
+ kExprEnd
+ ])
+ .exportFunc();
+builder.instantiate();
+
+// We include a clusterfuzz-generated testcase for this error verbatim.
+const module = new WebAssembly.Module(new Uint8Array([
+ 0, 97, 115, 109, 1, 0, 0, 0, 1, 51, 9, 96, 0, 0, 96,
+ 0, 1, 125, 96, 0, 1, 124, 96, 2, 124, 127, 1, 125, 96, 4,
+ 126, 126, 125, 127, 1, 127, 96, 1, 126, 1, 127, 96, 7, 127, 126,
+ 126, 125, 124, 127, 125, 1, 124, 96, 0, 1, 127, 96, 1, 124, 1,
+ 125, 3, 23, 22, 0, 4, 0, 5, 6, 0, 7, 0, 2, 0, 3,
+ 1, 0, 8, 0, 0, 0, 0, 0, 2, 2, 0, 4, 5, 1, 112,
+ 1, 9, 9, 5, 4, 1, 3, 1, 1, 6, 6, 1, 127, 1, 65,
+ 10, 11, 7, 213, 1, 14, 6, 102, 117, 110, 99, 95, 48, 0, 0,
+ 14, 102, 117, 110, 99, 95, 49, 95, 105, 110, 118, 111, 107, 101, 114,
+ 0, 2, 14, 102, 117, 110, 99, 95, 52, 95, 105, 110, 118, 111, 107,
+ 101, 114, 0, 5, 14, 102, 117, 110, 99, 95, 54, 95, 105, 110, 118,
+ 111, 107, 101, 114, 0, 7, 14, 102, 117, 110, 99, 95, 56, 95, 105,
+ 110, 118, 11, 107, 101, 114, 0, 9, 7, 102, 117, 110, 99, 95, 49,
+ 49, 0, 11, 15, 102, 117, 110, 99, 95, 49, 49, 95, 105, 110, 118,
+ 111, 107, 101, 114, 0, 12, 15, 102, 117, 110, 99, 95, 49, 51, 95,
+ 105, 110, 118, 111, 107, 101, 114, 0, 14, 7, 102, 117, 110, 99, 95,
+ 49, 53, 0, 15, 15, 102, 117, 110, 99, 95, 49, 53, 95, 105, 110,
+ 118, 111, 107, 101, 114, 0, 16, 15, 102, 117, 110, 99, 95, 49, 55,
+ 95, 105, 110, 118, 111, 107, 101, 114, 0, 18, 7, 102, 117, 110, 99,
+ 95, 49, 57, 0, 19, 7, 102, 117, 110, 99, 95, 50, 48, 0, 20,
+ 20, 104, 97, 110, 103, 76, 105, 109, 105, 116, 73, 110, 105, 116, 105,
+ 97, 108, 105, 122, 101, 114, 0, 21, 9, 15, 1, 0, 65, 0, 11,
+ 9, 4, 6, 6, 8, 10, 11, 11, 15, 15, 10, 220, 18, 22, 113,
+ 0, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 2, 127, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 65, 128, 128, 128, 4, 11, 4, 127, 65, 193, 255, 3, 5,
+ 2, 127, 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 12, 1, 11, 0, 65, 0, 13, 1, 0, 11, 0,
+ 11, 11, 26, 12, 0, 11, 0, 11, 131, 3, 1, 1, 125, 35, 0,
+ 69, 4, 64, 65, 128, 128, 128, 2, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 2, 127, 2, 64, 66, 157, 228, 193, 147, 127, 3, 126, 35,
+ 0, 69, 4, 64, 65, 224, 196, 126, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 35, 0, 69, 4, 64, 65, 129, 128, 124, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 32, 3, 65, 105, 13, 2, 13, 0, 66, 128,
+ 128, 128, 128, 192, 0, 11, 2, 125, 35, 0, 69, 4, 64, 32, 3,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 67, 0, 0, 80, 193, 32,
+ 2, 2, 127, 35, 0, 69, 4, 64, 65, 117, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 3, 11, 27, 34, 4, 67, 0, 0, 0, 0,
+ 32, 4, 32, 4, 91, 27, 11, 32, 3, 16, 1, 3, 127, 35, 0,
+ 69, 4, 64, 65, 168, 186, 126, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 35, 0, 69, 4, 64, 65, 128, 1, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 65, 255, 0, 32, 3, 69, 13, 2, 34, 3, 13, 0,
+ 32, 3, 11, 69, 13, 1, 32, 3, 69, 13, 1, 65, 220, 188, 126,
+ 13, 1, 34, 3, 4, 64, 2, 64, 2, 127, 35, 0, 69, 4, 64,
+ 65, 128, 128, 128, 128, 120, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 32, 3, 32, 3, 13, 0, 13, 3, 35, 0, 69, 4, 64, 32, 3,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 12, 1, 11, 26, 3, 127,
+ 35, 0, 69, 4, 64, 32, 3, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 32, 3, 13, 0, 65, 1, 11, 26, 12, 2, 11, 35, 0, 69,
+ 4, 64, 65, 167, 127, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35,
+ 0, 69, 4, 64, 65, 128, 192, 0, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 35, 0, 69, 4, 64, 32, 3, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 65, 147, 127, 12, 2, 5, 35, 0, 69, 4, 64, 65,
+ 129, 128, 128, 128, 120, 15, 11, 35, 0, 65, 1, 107, 36, 0, 11,
+ 11, 65, 255, 255, 125, 11, 11, 33, 0, 66, 252, 130, 221, 255, 15,
+ 66, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 67, 0, 0, 234,
+ 66, 65, 252, 224, 168, 179, 122, 16, 1, 26, 11, 178, 2, 1, 2,
+ 127, 35, 0, 69, 4, 64, 65, 120, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15,
+ 11, 35, 0, 65, 1, 107, 36, 0, 65, 128, 8, 11, 4, 127, 65,
+ 0, 5, 2, 127, 65, 0, 65, 129, 126, 69, 13, 2, 4, 64, 3,
+ 64, 35, 0, 69, 4, 64, 65, 159, 216, 137, 124, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 65, 0, 40, 2, 3, 26, 35, 0, 69, 4,
+ 64, 65, 222, 136, 126, 15, 11, 35, 0, 65, 1, 107, 36, 0, 3,
+ 64, 35, 0, 4, 64, 35, 0, 65, 1, 107, 36, 0, 12, 1, 5,
+ 65, 128, 8, 15, 11, 0, 11, 0, 11, 0, 5, 3, 64, 35, 0,
+ 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1, 107, 36, 0, 2,
+ 127, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 65, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 65,
+ 0, 15, 11, 35, 0, 65, 1, 107, 36, 0, 11, 65, 1, 254, 18,
+ 0, 22, 11, 69, 13, 0, 11, 13, 0, 35, 0, 69, 4, 64, 65,
+ 128, 124, 15, 11, 35, 0, 65, 1, 107, 36, 0, 3, 64, 35, 0,
+ 69, 4, 64, 65, 224, 216, 2, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 35, 0, 69, 4, 64, 65, 128, 128, 2, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 65, 190, 127, 12, 3, 11, 0, 11, 0, 11, 0,
+ 11, 11, 11, 11, 23, 0, 35, 0, 69, 4, 64, 32, 4, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 65, 0, 43, 3, 2, 11, 116, 0,
+ 65, 141, 176, 126, 66, 217, 236, 126, 66, 128, 1, 67, 0, 0, 0,
+ 79, 68, 0, 0, 0, 0, 0, 0, 80, 64, 65, 76, 67, 0, 0,
+ 128, 95, 16, 4, 26, 65, 32, 66, 129, 128, 128, 128, 120, 66, 230,
+ 212, 156, 252, 15, 67, 0, 0, 160, 64, 68, 0, 0, 0, 0, 0,
+ 0, 224, 67, 65, 127, 67, 0, 0, 128, 128, 16, 4, 26, 65, 255,
+ 166, 200, 177, 123, 66, 185, 127, 66, 128, 128, 128, 128, 8, 67, 0,
+ 0, 0, 93, 68, 0, 0, 0, 0, 0, 0, 96, 67, 65, 150, 224,
+ 126, 67, 0, 0, 0, 88, 16, 4, 26, 11, 111, 0, 35, 0, 69,
+ 4, 64, 65, 144, 194, 0, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 3, 64, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 65, 124, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 35, 0, 69, 4, 64, 65, 111, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 3, 127, 35, 0, 69, 4, 64, 65,
+ 128, 128, 2, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 128, 128,
+ 126, 11, 69, 13, 0, 12, 1, 11, 0, 69, 0, 13, 0, 0, 11,
+ 0, 11, 14, 0, 16, 6, 26, 16, 6, 26, 16, 6, 26, 16, 6,
+ 26, 11, 34, 0, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0,
+ 0, 224, 67, 15, 11, 35, 0, 65, 1, 107, 36, 0, 68, 26, 192,
+ 255, 255, 255, 255, 255, 255, 11, 5, 0, 16, 8, 26, 11, 26, 0,
+ 35, 0, 69, 4, 64, 67, 0, 0, 0, 0, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 67, 0, 0, 128, 214, 11, 26, 0, 35, 0, 69,
+ 4, 64, 67, 0, 0, 0, 90, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 67, 0, 0, 44, 194, 11, 8, 0, 16, 11, 26, 16, 11, 26,
+ 11, 26, 0, 35, 0, 69, 4, 64, 67, 0, 0, 0, 197, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 67, 117, 227, 255, 255, 11, 38, 0,
+ 68, 129, 255, 255, 255, 255, 255, 255, 255, 16, 13, 26, 68, 0, 0,
+ 0, 0, 0, 0, 16, 65, 16, 13, 26, 68, 193, 255, 255, 255, 255,
+ 255, 255, 255, 16, 13, 26, 11, 30, 0, 35, 0, 69, 4, 64, 15,
+ 11, 35, 0, 65, 1, 107, 36, 0, 35, 0, 69, 4, 64, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 11, 6, 0, 16, 15, 16, 15, 11,
+ 16, 0, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 11, 8, 0, 16, 17, 16, 17, 16, 17, 11, 52, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 0, 0, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 3, 124, 35, 0, 4, 124, 35, 0, 65,
+ 1, 107, 36, 0, 12, 1, 5, 68, 0, 0, 0, 0, 0, 128, 109,
+ 64, 11, 11, 11, 218, 7, 3, 4, 127, 1, 126, 2, 125, 35, 0,
+ 69, 4, 64, 68, 255, 255, 255, 255, 255, 255, 239, 255, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 2, 124, 3, 64, 35, 0, 69, 4, 64,
+ 68, 0, 0, 0, 0, 0, 0, 42, 192, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 2, 64, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0,
+ 0, 0, 0, 0, 176, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 65, 128, 127, 34, 2, 4, 127, 32, 0, 5, 35, 0, 69, 4, 64,
+ 68, 0, 0, 192, 137, 207, 250, 239, 65, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0, 0, 245,
+ 255, 255, 239, 65, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 134,
+ 82, 34, 0, 33, 3, 32, 1, 69, 13, 0, 11, 35, 0, 69, 4,
+ 64, 68, 0, 0, 0, 0, 0, 0, 144, 192, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 1, 69, 13, 2, 32, 4, 16, 3, 13, 1,
+ 65, 116, 33, 0, 12, 3, 11, 33, 2, 3, 127, 35, 0, 69, 4,
+ 64, 68, 77, 69, 29, 145, 255, 255, 255, 255, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 1, 13, 0, 32, 2, 34, 0, 34, 1, 11,
+ 13, 0, 11, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0,
+ 0, 0, 48, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 160, 102, 64, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 32, 1, 33, 2, 65, 7, 17, 0, 0,
+ 3, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 240,
+ 63, 15, 11, 35, 0, 65, 1, 107, 36, 0, 2, 127, 35, 0, 69,
+ 4, 64, 68, 0, 0, 0, 0, 0, 128, 78, 192, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 66, 129, 128, 128, 128, 120, 66, 128, 128, 2,
+ 32, 0, 27, 33, 4, 65, 177, 152, 126, 11, 4, 64, 3, 64, 35,
+ 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 16, 195, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 16, 6, 65, 15, 113, 65, 130, 128,
+ 126, 254, 0, 2, 0, 4, 64, 32, 0, 32, 1, 32, 2, 27, 4,
+ 127, 65, 207, 230, 157, 153, 4, 34, 0, 5, 65, 140, 226, 132, 187,
+ 6, 11, 26, 5, 67, 151, 255, 255, 255, 33, 6, 11, 32, 2, 13,
+ 0, 66, 128, 128, 128, 128, 128, 1, 33, 4, 11, 11, 3, 64, 35,
+ 0, 69, 4, 64, 68, 0, 0, 0, 0, 32, 250, 239, 64, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 32, 6, 26, 3, 127, 35, 0, 69,
+ 4, 64, 68, 0, 0, 0, 0, 0, 0, 128, 67, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 3, 127, 35, 0, 69, 4, 64, 68, 0, 0,
+ 0, 0, 0, 0, 77, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 67, 80, 255, 55, 202, 33, 6, 32, 2, 69, 13, 0, 65, 110, 11,
+ 34, 3, 13, 4, 32, 2, 33, 0, 32, 3, 69, 13, 0, 65, 128,
+ 96, 11, 69, 13, 0, 32, 1, 4, 127, 2, 127, 35, 0, 69, 4,
+ 64, 68, 138, 255, 255, 255, 255, 255, 255, 255, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 35, 0, 69, 4, 64, 68, 215, 255, 255, 255, 255,
+ 255, 255, 255, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 185, 127,
+ 2, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 224,
+ 195, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 0, 11, 13, 0,
+ 4, 64, 68, 0, 0, 0, 0, 0, 0, 240, 66, 32, 3, 65, 4,
+ 17, 3, 0, 26, 5, 32, 1, 69, 13, 3, 11, 32, 2, 34, 1,
+ 11, 5, 65, 129, 1, 34, 1, 34, 0, 11, 69, 13, 2, 11, 32,
+ 1, 65, 15, 113, 65, 128, 128, 32, 34, 1, 254, 0, 2, 0, 69,
+ 13, 0, 65, 128, 128, 32, 65, 129, 128, 124, 32, 0, 27, 11, 34,
+ 0, 13, 0, 65, 4, 66, 217, 208, 176, 127, 254, 24, 3, 0, 12,
+ 0, 11, 0, 11, 3, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0,
+ 0, 0, 128, 84, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35,
+ 0, 69, 4, 64, 68, 177, 255, 255, 255, 255, 255, 255, 255, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 32, 2, 13, 0, 35, 0, 69, 4,
+ 64, 68, 0, 0, 0, 0, 0, 0, 64, 195, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 0, 69, 13, 0, 35, 0, 69, 4, 64, 68,
+ 0, 0, 0, 0, 0, 0, 96, 64, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 3, 124, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0,
+ 0, 16, 184, 15, 11, 35, 0, 65, 1, 107, 36, 0, 32, 3, 13,
+ 0, 68, 0, 0, 0, 0, 0, 0, 224, 195, 11, 32, 0, 13, 2,
+ 26, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 192, 66,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 32, 1, 13, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 240, 191, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 65, 128, 126, 11, 13, 0, 11, 35, 0,
+ 69, 4, 64, 68, 136, 255, 255, 255, 255, 255, 255, 255, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 68, 0, 0, 0, 0, 0, 0, 0, 192,
+ 11, 11, 6, 0, 65, 10, 36, 0, 11, 11, 15, 1, 0, 65, 0,
+ 11, 9, 109, 0, 0, 0, 0, 0, 0, 0, 38
+]));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js b/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js
new file mode 100644
index 00000000000..364de33c804
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI64, kWasmF64, kWasmI64], []));
+builder.addType(makeSig([kWasmF64], [kWasmF64]));
+// Generate function 1 (out of 2).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: v_ildl
+// body:
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprLocalGet, 0x00, // local.get
+kExprI32Const, 0x82, 0x7f, // i32.const
+kExprI32DivS, // i32.div_s
+kExprSelect, // select
+kExprCallFunction, 0x01, // call function #1: d_d
+kExprDrop, // drop
+kExprEnd, // end @29
+]);
+// Generate function 2 (out of 2).
+builder.addFunction(undefined, 1 /* sig */)
+ .addBodyWithEnd([
+// signature: d_d
+// body:
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprEnd, // end @10
+]);
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js b/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js
new file mode 100644
index 00000000000..82910df155f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+builder = new WasmModuleBuilder();
+builder.addImportedMemory();
+let leb = [0x80, 0x80, 0x80, 0x80, 0x0c];
+builder.addFunction('store', makeSig([kWasmI32, kWasmI32], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, ...leb])
+ .exportFunc();
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
index 77c436119c8..75605e36128 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI64Const, 0,
// 0x80 ... 0x10 is the LEB encoding of 0x100000000. This is chosen so
// that the 64-bit constant has a non-zero top half. In this bug, the
@@ -34,7 +34,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI64Const, 0,
// 0x80 ... 0x10 is the LEB encoding of 0x100000000. This is chosen so
// that the 64-bit constant has a non-zero top half. In this bug, the
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
index 671da730fbe..9bda7fcc708 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -16,7 +16,7 @@ builder.addFunction('main', kSig_i_i).addBody([
...wasmI32Const(0x41),
kExprLocalSet, 0,
// Enter loop, such that values are spilled to the stack.
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprEnd,
// Reload value. This must be loaded as 32 bit value.
kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
index b5e4e2e2b6e..92579bc37b1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
@@ -18,7 +18,7 @@ builder.addFunction(undefined, kSig_i_iii)
kExprLocalSet, 1, // set_local 1
...wasmI32Const(16), // i32.const 0x1
kExprLocalSet, 2, // set_local 2
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprEnd, // end
kExprLocalGet, 0, // get_local 0
kExprLocalGet, 1, // get_local 1
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
index 008ab16159c..776ca522c28 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
@@ -21,7 +21,7 @@ builder.addFunction('test', kSig_i_iii)
kExprI32Const, 0, // 0, 0
kExprI32Const, 1, // 0, 0, 1
kExprI32Add, // 0, 0 + 1 -> 1
- kExprBlock, kWasmStmt, // 0, 1
+ kExprBlock, kWasmVoid, // 0, 1
kExprBr, 0, // 0, 1
kExprEnd, // 0, 1
kExprI32Add, // 0 + 1 -> 1
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
index 3daeff9e152..74f11ca81e0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
@@ -8,10 +8,10 @@ const builder = new WasmModuleBuilder();
builder.addFunction('test', kSig_i_i)
.addBody([
kExprLocalGet, 0x00, // get_local 0
- kExprBlock, kWasmStmt, // block
+ kExprBlock, kWasmVoid, // block
kExprBr, 0x00, // br depth=0
kExprEnd, // end
- kExprBlock, kWasmStmt, // block
+ kExprBlock, kWasmVoid, // block
kExprBr, 0x00, // br depth=0
kExprEnd, // end
kExprBr, 0x00, // br depth=0
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
index ac2b34019e2..db93c83fded 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
@@ -10,7 +10,7 @@ builder.addFunction('test', kSig_i_i)
// body:
kExprLocalGet, 0, // get_local 0
kExprLocalGet, 0, // get_local 0
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprBr, 0, // br depth=0
kExprEnd, // end
kExprUnreachable, // unreachable
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
index d54507cc59f..8445b48906e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
@@ -23,7 +23,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
kExprF32Eq,
kExprI32LoadMem, 0x01, 0xef, 0xec, 0x95, 0x93, 0x07,
kExprI32Add,
- kExprIf, kWasmStmt, // @30
+ kExprIf, kWasmVoid, // @30
kExprEnd, // @32
kExprI32Const, 0xc9, 0x93, 0xdf, 0xcc, 0x7c,
kExprEnd, // @39
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
index a39d7e88361..db027a226da 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
@@ -21,7 +21,7 @@ const sync_address = 12;
// Calling the imported function sets the thread-in-wasm flag of the
// main thread.
kExprCallFunction, import_id, // --
- kExprLoop, kWasmStmt, // --
+ kExprLoop, kWasmVoid, // --
kExprI32Const, sync_address, // --
kExprI32LoadMem, 0, 0, // --
kExprI32Eqz,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
index bf170c5d632..040c0e36c89 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
@@ -10,7 +10,7 @@ builder.addFunction(undefined, makeSig([kWasmI32, kWasmF32], []))
.addBody([
kExprLocalGet, 0, // get_local
kExprI32Const, 0, // i32.const 0
- kExprIf, kWasmStmt, // if
+ kExprIf, kWasmVoid, // if
kExprUnreachable, // unreachable
kExprEnd, // end if
kExprLocalGet, 4, // get_local
@@ -21,7 +21,7 @@ builder.addFunction(undefined, makeSig([kWasmI32, kWasmF32], []))
kExprLocalTee, 2, // tee_local
kExprLocalTee, 8, // tee_local
kExprDrop, // drop
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprEnd, // end loop
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
index 21f32180bd7..00f7825a244 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
@@ -15,7 +15,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.addFunction(undefined, 1 /* sig */)
.addLocals(kWasmI32, 65)
.addBodyWithEnd([
- kExprLoop, kWasmStmt, // @3
+ kExprLoop, kWasmVoid, // @3
kSimdPrefix,
kExprF32x4Min,
kExprI64UConvertI32,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
index e9d4026308a..630929a0bd6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
@@ -6,9 +6,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_v_v).addBody([
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
/**/ kExprBr, 0x01, // br depth=1
- /**/ kExprBlock, kWasmStmt, // block
+ /**/ kExprBlock, kWasmVoid, // block
/**/ /**/ kExprBr, 0x02, // br depth=2
/**/ /**/ kExprEnd, // end [block]
/**/ kExprEnd // end [loop]
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
index 4b9528ccf6d..74fb87133c4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
@@ -16,7 +16,7 @@ kExprElse,
kExprEnd,
kExprLocalTee, 0,
kExprLocalGet, 0,
-kExprLoop, kWasmStmt,
+kExprLoop, kWasmVoid,
kExprI64Const, 0x80, 0x80, 0x80, 0x70,
kExprLocalSet, 0x01,
kExprI32Const, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
index 1e5c1a4488c..1fab062b85a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
@@ -16,8 +16,8 @@ builder.addFunction(undefined, sig1)
// signature: f_lilfl
kExprBlock, kWasmF32, // @1 f32
kExprI32Const, 0x00,
- kExprIf, kWasmStmt, // @5
- kExprLoop, kWasmStmt, // @7
+ kExprIf, kWasmVoid, // @5
+ kExprLoop, kWasmVoid, // @7
kExprBlock, kWasmI32, // @9 i32
kExprF32Const, 0x00, 0x00, 0x80, 0xc1,
kExprF32Const, 0x00, 0x00, 0x80, 0x45,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
index 1cc4b675c20..ab13941b201 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
@@ -12,7 +12,7 @@ builder.addFunction(undefined, kSig_i_i)
kExprLocalGet, 0,
// Stack now contains two copies of the first param register.
// Start a loop to create a merge point (values still in registers).
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
// The call spills all values.
kExprCallFunction, 0,
// Break to the loop. Now the spilled values need to be loaded back *into
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
index aabe0013923..7df7fb47d2a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
@@ -9,21 +9,21 @@ const sig = builder.addType(makeSig([kWasmI64], [kWasmI64]));
builder.addFunction(undefined, sig)
.addLocals(kWasmI32, 14).addLocals(kWasmI64, 17).addLocals(kWasmF32, 14)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprBr, 0x00,
kExprEnd,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI32Const, 0x00,
kExprLocalSet, 0x09,
kExprI32Const, 0x00,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprI32Const, 0x00,
kExprLocalSet, 0x0a,
kExprBr, 0x00,
kExprEnd,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0x00,
kExprLocalSet, 0x12,
kExprBr, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
index c77845af76a..c4c8b309873 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
@@ -9,8 +9,8 @@ const sig = builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]))
builder.addFunction(undefined, sig)
.addBody([
kExprLocalGet, 2,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid
]);
builder.addExport('main', 0);
assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
index 07081087fa5..fce3727cd35 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
@@ -27,12 +27,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(12, 12, false);
builder.addFunction("foo", kSig_v_iii)
.addBody([].concat([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0x2,
kExprI32Const, 0x01,
kExprI32And,
// Generate a test branch (which has 32k limited reach).
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 0x0,
kExprI32Const, 0x01,
kExprI32And,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9759.js b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
index 05bb26f7ff8..ca0604eebf0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
@@ -15,7 +15,7 @@ const NUM_CASES = 0xfffd;
let cases = new Array(NUM_CASES).fill(0);
builder.addFunction('main', kSig_v_i)
.addBody([].concat([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable], wasmSignedLeb(NUM_CASES),
cases, [0,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9832.js b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
index 05b63b0984d..891139f50da 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
@@ -16,7 +16,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
]).exportFunc();
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprLocalGet, 0,
kExprCallFunction, f.index,
kExprCallFunction, f.index,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js
new file mode 100644
index 00000000000..f40ead82d51
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-js-wasm-calls
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function getMain() {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_v_v)
+ .addBody([kExprUnreachable])
+ .exportAs("main");
+ return builder.instantiate().exports.main;
+}
+let foo = getMain();
+
+function loop() {
+ for (let i = 0; i < 2; i++) {
+ try {
+ foo();
+ } catch (e) {
+ if (i) {
+ throw e;
+ }
+ }
+ }
+}
+%PrepareFunctionForOptimization(loop);
+assertThrows(loop, WebAssembly.RuntimeError, "unreachable");
+%OptimizeFunctionOnNextCall(loop);
+assertThrows(loop, WebAssembly.RuntimeError, "unreachable");
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress1192313.js b/deps/v8/test/mjsunit/regress/wasm/regress1192313.js
new file mode 100644
index 00000000000..40307a3fa49
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress1192313.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --experimental-wasm-threads
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function Regress1192313() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(16, 32);
+ builder.addFunction('f', kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprCallFunction, 0,
+ kAtomicPrefix, kExprI32AtomicAnd8U,
+ 0x00, 0xba, 0xe2, 0x81, 0xd6, 0x0b,
+ kExprCatchAll,
+ kExprTry, kWasmI32,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kAtomicPrefix, kExprI32AtomicAnd8U,
+ 0x00, 0x85, 0x97, 0xc4, 0x5f,
+ kExprDelegate, 1,
+ kExprEnd]).exportFunc();
+ let instance = builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index de53699570f..e186dbb7f59 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -25,7 +25,7 @@ assertFalse(isNeverOptimize());
if (i == 1) {
// f must be interpreted code.
- assertTrue(isInterpreted(f));
+ assertTrue(isUnoptimized(f));
// Run twice (i = 0, 1), then tier-up.
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/tools/foozzie.js b/deps/v8/test/mjsunit/tools/foozzie.js
index 147f6370601..759df0e9832 100644
--- a/deps/v8/test/mjsunit/tools/foozzie.js
+++ b/deps/v8/test/mjsunit/tools/foozzie.js
@@ -119,3 +119,12 @@ assertEquals(unoptimized, callPow(6996));
let then_called = false;
Atomics.waitAsync().value.then(() => {then_called = true;});
assertEquals(true, then_called);
+
+// Test .caller access is neutered.
+function callee() {
+ assertEquals(null, callee.caller);
+}
+function caller() {
+ callee();
+}
+caller();
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index 19a9a0ccfb7..e006ecdf0f2 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -264,7 +264,7 @@ function generateFunctionBodyForSequence(sequence) {
kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
+ kExprLoop, kWasmVoid, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 6d37ba55483..3df938af7f3 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -399,7 +399,7 @@ function TestStore(func, buffer, value, size) {
builder.addImportedMemory("m", "imported_mem", 16, 128, "shared");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprI32Const, 16,
kExprI32Const, 20,
kAtomicPrefix,
@@ -442,7 +442,7 @@ function CmpExchgLoop(opcode, alignment) {
builder.addFunction("main", makeSig([kWasmI32], []))
.addLocals(kWasmI64, 2)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprLocalGet, 1,
kExprLocalGet, 2,
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
index 99e9016f1a4..472bba81c04 100644
--- a/deps/v8/test/mjsunit/wasm/atomics64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -302,7 +302,7 @@ function generateFunctionBodyForSequence(sequence) {
kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
+ kExprLoop, kWasmVoid, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
index 97ed71b9e10..7d979b65ec3 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
@@ -43,20 +43,20 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprI32Mul,
kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Set counter to 0.
kExprI32Const, 0,
kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Find the next value to wait for.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Check end of sequence.
kExprLocalGet, kLocalCurrentOffset,
kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Load next value.
kExprLocalGet, kArgSequencePtr,
kExprLocalGet, kLocalCurrentOffset,
@@ -95,7 +95,7 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
loadMemOpcode, 0, 0,
kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Load address.
kExprLocalGet, kArgMemoryCell,
// Load expected value.
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
index be219f3a070..ae266d11391 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
@@ -46,20 +46,20 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprI32Mul,
kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Set counter to 0.
kExprI32Const, 0,
kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Find the next value to wait for.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Check end of sequence.
kExprLocalGet, kLocalCurrentOffset,
kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Load next value.
kExprLocalGet, kArgSequencePtr,
kExprLocalGet, kLocalCurrentOffset,
@@ -100,7 +100,7 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
loadMemOpcode, 0, 0,
kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Load address.
kExprLocalGet, kArgMemoryCell,
// Load expected value.
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
index 59c1a9ed3aa..1d30b4e5cf9 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
@@ -74,9 +74,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
{mod: {pow: Math.pow}})
.then(assertUnreachable,
- error => assertEquals("WebAssembly.instantiateStreaming(): call[1] " +
+ error => assertEquals("WebAssembly.instantiateStreaming(): call[0] " +
"expected type f32, found local.get of type " +
- "i32 @+94",
+ "i32 @+92",
error.message)));
})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 63568fe6570..d527a72ca6d 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -359,13 +359,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_i)
- .addBody([kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ .addBody([kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable, 6, 0, 1, 2, 3, 4, 5, 6,
kExprEnd,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
index ec689791cad..e7dc9e4f0bf 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprCatch, except,
kExprRethrow, 0,
@@ -27,7 +27,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprCatch, except,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprI32Const, 23,
@@ -47,7 +47,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprCatchAll,
kExprRethrow, 0,
@@ -60,7 +60,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprCatchAll,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprI32Const, 23,
@@ -91,13 +91,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprI32Const, 23,
@@ -125,7 +125,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprI32Const, 42,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
index 8b3defb9af9..d2d595dc736 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-shared.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -30,7 +30,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
@@ -62,7 +62,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
@@ -97,7 +97,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except1,
kExprEnd,
@@ -136,7 +136,7 @@ function NewExportedException() {
let except = builder2.addImportedException("m", "ex", kSig_v_v);
builder2.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index 9082a7f49e5..ae2d8ee40c0 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -40,7 +40,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd,
kExprLocalGet, 0,
kSimdPrefix, kExprI32x4Eq,
- kSimdPrefix, kExprV8x16AllTrue,
+ kSimdPrefix, kExprI8x16AllTrue,
])
.exportFunc();
var instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index d7539119abc..88e92fa3bb7 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -17,7 +17,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd,
kExprI32Const, 1
@@ -36,7 +36,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("catch_empty_try", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCatch, except,
kExprEnd,
]).exportFunc();
@@ -55,7 +55,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd,
kExprI32Const, 42,
@@ -74,14 +74,14 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let builder = new WasmModuleBuilder();
builder.addFunction('unreachable_in_try', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprUnreachable,
kExprCatchAll,
kExprEnd
]).exportFunc();
builder.addFunction('unreachable_in_try_unwind', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprUnreachable,
kExprUnwind,
kExprEnd
@@ -229,13 +229,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction('test', kSig_v_v)
.addBody([
// Calling "throw" directly should produce the expected exception.
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, throw_fn.index,
kExprCatch, except,
kExprEnd,
// Calling through JS produces a wrapped exceptions which does not match
// the catch.
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, imp,
kExprCatch, except,
kExprEnd
@@ -277,13 +277,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except2,
kExprElse,
kExprThrow, except3,
@@ -317,13 +317,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except2,
kExprElse,
kExprThrow, except3,
@@ -440,6 +440,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_l);
builder.addFunction("throw_catch_param", kSig_i_i)
+ .addLocals(kWasmI64, 1)
.addBody([
kExprLocalGet, 0,
kExprI64UConvertI32,
@@ -457,7 +458,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Const, 0,
kExprEnd,
kExprEnd,
- ]).addLocals(kWasmI64, 1).exportFunc();
+ ]).exportFunc();
let instance = builder.instantiate();
assertEquals(1, instance.exports.throw_catch_param(5));
@@ -608,7 +609,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 0,
kExprThrow, except,
kExprUnreachable,
@@ -663,6 +664,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
// p == 2 -> path == 298
// p == 3 -> path == 338
// else -> path == 146
+ .addLocals(kWasmI32, 1)
.addBody([
kExprTry, kWasmI32,
kExprTry, kWasmI32,
@@ -670,7 +672,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprI32Const, 1,
kExprThrow, except,
kExprUnreachable,
@@ -686,7 +688,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprI32Const, 8,
kExprI32Ior,
@@ -705,7 +707,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 3,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprI32Const, /*64=*/ 192, 0,
kExprI32Ior,
@@ -719,7 +721,6 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Ior,
kExprEnd,
])
- .addLocals(kWasmI32, 1)
.exportFunc();
// Scenario 2: Catches an exception raised from the direct callee.
@@ -765,7 +766,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("string_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowString,
kExprCatch, except,
kExprUnreachable,
@@ -775,7 +776,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("fp_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowFP,
kExprCatch, except,
kExprUnreachable,
@@ -785,7 +786,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("large_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowLarge,
kExprCatch, except,
kExprUnreachable,
@@ -795,7 +796,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("undefined_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowUndefined,
kExprCatch, except,
kExprUnreachable,
@@ -865,7 +866,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let throw_if = builder.addFunction('throw', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd]).exportFunc();
builder.addFunction('test', kSig_i_i)
@@ -923,9 +924,9 @@ load("test/mjsunit/wasm/exceptions-utils.js");
// 2 -> throw except2
let throw_fn = builder.addFunction('throw', kSig_v_i)
.addBody([
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable, 2, 0, 1, 2,
kExprEnd,
@@ -992,7 +993,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction('test', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except1,
kExprDelegate, 0,
kExprI32Const, 1,
@@ -1012,8 +1013,8 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction('test', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprDelegate, 1,
kExprCatchAll,
@@ -1021,8 +1022,8 @@ load("test/mjsunit/wasm/exceptions-utils.js");
]).exportFunc();
builder.addFunction('test_unwind', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprDelegate, 1,
kExprUnwind,
@@ -1032,3 +1033,61 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertTraps(WebAssembly.RuntimeError, () => instance.exports.test());
assertTraps(WebAssembly.RuntimeError, () => instance.exports.test_unwind());
})();
+
+(function TestThrowBeforeUnreachable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_before_unreachable', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprUnreachable,
+ kExprCatchAll,
+ kExprI32Const, 42,
+ kExprEnd,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ assertEquals(42, instance.exports.throw_before_unreachable());
+})();
+
+(function TestUnreachableInCatchAll() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_before_unreachable', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprCatchAll,
+ kExprUnreachable,
+ kExprI32Const, 42,
+ kExprEnd,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+})();
+
+(function TestThrowWithLocal() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_with_local', kSig_i_v)
+ .addLocals(kWasmI32, 4)
+ .addBody([
+ kExprI32Const, 42,
+ kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
+ kExprTry, kWasmF32,
+ kExprThrow, except,
+ kExprCatchAll,
+ kExprF32Const, 0, 0, 0, 0,
+ kExprEnd,
+ kExprDrop, // Drop the f32.
+ kExprDrop, // Drop the f64.
+ // Leave the '42' on the stack.
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ assertEquals(42, instance.exports.throw_with_local());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/externref.js b/deps/v8/test/mjsunit/wasm/externref.js
index a954f273ae8..ed63ab58861 100644
--- a/deps/v8/test/mjsunit/wasm/externref.js
+++ b/deps/v8/test/mjsunit/wasm/externref.js
@@ -333,3 +333,28 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 4}, 5, {world: 6}, null, {bar: 7});
})();
+
+(function MultiReturnRefTest() {
+ print("MultiReturnTest");
+ let builder = new WasmModuleBuilder();
+ let sig = makeSig([kWasmExternRef],
+ [kWasmExternRef, kWasmExternRef, kWasmExternRef, kWasmExternRef]);
+
+ builder.addFunction("callee", sig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ ]);
+ builder.addFunction("main", sig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprCallFunction, 0
+ ])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(null), [null, null, null, null]);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
index ba7bef301c9..8a9bb2517e4 100644
--- a/deps/v8/test/mjsunit/wasm/globals.js
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -183,7 +183,7 @@ TestGlobalIndexSpace(kWasmF64, 12345.678);
builder.addFunction("set", kSig_v_ii)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprGlobalSet, g.index,
kExprElse,
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
index 8babc66b758..9c9e881fab1 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
@@ -33,7 +33,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -60,7 +60,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -95,7 +95,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, index, // put index on stack
kExprI32Const, newValue, // put the value on stack
kExprI32StoreMem, 0, 0, // store
@@ -128,7 +128,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -160,7 +160,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -192,7 +192,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -227,7 +227,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
@@ -264,7 +264,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index 660ec08e904..29ece1cba2f 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -124,9 +124,9 @@ print('=== grow_memory in direct calls ===');
builder.addFunction('main', kSig_i_ii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
@@ -174,9 +174,9 @@ print('=== grow_memory in direct calls ===');
builder.addFunction('main', kSig_i_iii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
@@ -338,9 +338,9 @@ print('\n=== grow_memory in indirect calls ===');
builder.addFunction('main', kSig_i_iii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 1, // -
- kExprIf, kWasmStmt, // if <param1> != 0
+ kExprIf, kWasmVoid, // if <param1> != 0
// Grow memory.
kExprLocalGet, 2, // get number of new pages
kExprLocalGet, 0, // get index of the function
@@ -393,9 +393,9 @@ print('\n=== grow_memory in indirect calls ===');
'main', makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], [kWasmI32]))
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 1, // -
- kExprIf, kWasmStmt, // if <param1> != 0
+ kExprIf, kWasmVoid, // if <param1> != 0
// Grow memory.
kExprLocalGet, 2, // get number of new pages
kExprLocalGet, 0, // get index of the function
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
index 143b555b17b..eb99902c14a 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
@@ -30,9 +30,9 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -82,9 +82,9 @@ function generateBuilder() {
kExprI32Const, deltaPagesOut, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -131,9 +131,9 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -202,9 +202,9 @@ function generateBuilder() {
kExprI32Add, // increase value on stack
kExprI32StoreMem, 0, 0, // store new value
// Start loop.
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
diff --git a/deps/v8/test/mjsunit/wasm/loop-rotation.js b/deps/v8/test/mjsunit/wasm/loop-rotation.js
index 7805f5ccf5e..538bdb0bd0d 100644
--- a/deps/v8/test/mjsunit/wasm/loop-rotation.js
+++ b/deps/v8/test/mjsunit/wasm/loop-rotation.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
@@ -32,7 +32,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
@@ -55,7 +55,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(1, undefined, false);
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
diff --git a/deps/v8/test/mjsunit/wasm/loop-unrolling.js b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
index b0e125413f3..43852dec263 100644
--- a/deps/v8/test/mjsunit/wasm/loop-unrolling.js
+++ b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-typed-funcref --experimental-wasm-eh
-// Flags: --wasm-loop-unrolling
+// Flags: --wasm-loop-unrolling --experimental-wasm-return-call
// Needed for exceptions-utils.js.
// Flags: --allow-natives-syntax
@@ -17,10 +17,10 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("main", kSig_i_i)
.addBody([
...wasmI32Const(1),
- kExprLet, kWasmStmt, 1, 1, kWasmI32,
- kExprLoop, kWasmStmt,
+ kExprLet, kWasmVoid, 1, 1, kWasmI32,
+ kExprLoop, kWasmVoid,
...wasmI32Const(10),
- kExprLet, kWasmStmt, 1, 1, kWasmI32,
+ kExprLet, kWasmVoid, 1, 1, kWasmI32,
kExprLocalGet, 0,
kExprLocalGet, 1,
kExprI32Sub,
@@ -38,6 +38,33 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertEquals(instance.exports.main(100), 109);
})();
+// Test the interaction between tail calls and loop unrolling.
+(function TailCallTest() {
+ let builder = new WasmModuleBuilder();
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprReturnCall, callee.index,
+ kExprElse,
+ kExprBr, 1,
+ kExprEnd,
+ kExprEnd,
+ kExprUnreachable
+ ])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1), 1);
+})();
+
// Test the interaction between the eh proposal and loop unrolling.
(function TestRethrowNested() {
@@ -59,16 +86,16 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Eq,
- kExprIf, kWasmStmt,
- kExprLoop, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprLoop, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprEnd,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
- kExprLoop, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprLoop, kWasmVoid,
kExprRethrow, 3,
kExprEnd,
kExprEnd,
@@ -92,11 +119,11 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except1 = builder.addException(kSig_v_v);
builder.addFunction("throw", kSig_i_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 10,
kExprI32GtS,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
@@ -125,7 +152,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 10,
kExprI32GtS,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/wasm/memory64.js b/deps/v8/test/mjsunit/wasm/memory64.js
index e7646358464..5376ba87dbd 100644
--- a/deps/v8/test/mjsunit/wasm/memory64.js
+++ b/deps/v8/test/mjsunit/wasm/memory64.js
@@ -81,3 +81,28 @@ function BasicMemory64Tests(num_pages) {
// let num_pages = 5 * 1024 * 1024 * 1024 / kPageSize;
// BasicMemory64Tests(num_pages);
//})();
+
+(function TestGrow64() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addMemory64(1, 10, false);
+
+ builder.addFunction('grow', makeSig([kWasmI64], [kWasmI64]))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0
+ kExprMemoryGrow, 0, // memory.grow 0
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(1n, instance.exports.grow(2n));
+ assertEquals(3n, instance.exports.grow(1n));
+ assertEquals(-1n, instance.exports.grow(-1n));
+ assertEquals(-1n, instance.exports.grow(1n << 31n));
+ assertEquals(-1n, instance.exports.grow(1n << 32n));
+ assertEquals(-1n, instance.exports.grow(1n << 33n));
+ assertEquals(-1n, instance.exports.grow(1n << 63n));
+ assertEquals(-1n, instance.exports.grow(7n)); // Above the of 10.
+ assertEquals(4n, instance.exports.grow(6n)); // Just at the maximum of 10.
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index d5a4e7119fb..5d5e81c6d3c 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -17,12 +17,12 @@ function genModule(memory) {
.addBody([
// main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
// TODO(titzer): this manual bytecode has a copy of test-run-wasm.cc
- /**/ kExprLoop, kWasmStmt, // --
+ /**/ kExprLoop, kWasmVoid, // --
/* */ kExprLocalGet, 0, // --
- /* */ kExprIf, kWasmStmt, // --
+ /* */ kExprIf, kWasmVoid, // --
/* */ kExprLocalGet, 0, // --
/* */ kExprI32LoadMem, 0, 0, // --
- /* */ kExprIf, kWasmStmt, // --
+ /* */ kExprIf, kWasmVoid, // --
/* */ kExprI32Const, 127, // --
/* */ kExprReturn, // --
/* */ kExprEnd, // --
diff --git a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
index 9e786bed230..342b3c682d0 100644
--- a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
+++ b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
@@ -32,7 +32,7 @@ while (true) {
// Each function f<n> with argument {i} then calls f<n/10> with argument
// {i + 1} and returns whatever that function returns.
const body_template = [
- kExprLocalGet, 0, kExprI32Eqz, kExprIf, kWasmStmt, // if (i == 0)
+ kExprLocalGet, 0, kExprI32Eqz, kExprIf, kWasmVoid, // if (i == 0)
kExprLocalGet, 0 // get i
];
for (let i = 0; i < 1000; ++i) body_template.push(kExprI32LoadMem, 0, 0);
diff --git a/deps/v8/test/mjsunit/wasm/reference-tables.js b/deps/v8/test/mjsunit/wasm/reference-tables.js
index 756ec04d44f..cfbe238e6bd 100644
--- a/deps/v8/test/mjsunit/wasm/reference-tables.js
+++ b/deps/v8/test/mjsunit/wasm/reference-tables.js
@@ -4,74 +4,81 @@
// Flags: --experimental-wasm-typed-funcref
-load("test/mjsunit/wasm/wasm-module-builder.js");
+load('test/mjsunit/wasm/wasm-module-builder.js');
-(function Test1() {
- var exporting_instance = (function () {
+(function TestTables() {
+ var exporting_instance = (function() {
var builder = new WasmModuleBuilder();
var binary_type = builder.addType(kSig_i_ii);
- builder.addFunction("addition", kSig_i_ii)
- .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
- .exportFunc();
+ builder.addFunction('addition', kSig_i_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
+ .exportFunc();
- builder.addFunction("id", kSig_i_i)
- .addBody([kExprLocalGet, 0])
- .exportFunc();
+ builder.addFunction('succ', kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add])
+ .exportFunc();
- builder.addTable(wasmOptRefType(binary_type), 1, 100).exportAs("table");
+ builder.addTable(wasmOptRefType(binary_type), 1, 100).exportAs('table');
return builder.instantiate({});
})();
// Wrong type for imported table.
- assertThrows(
- () => {
- var builder = new WasmModuleBuilder();
- var unary_type = builder.addType(kSig_i_i);
- builder.addImportedTable("imports", "table", 1, 100,
- wasmOptRefType(unary_type));
- builder.instantiate({imports: {table: exporting_instance.exports.table}})
- },
- WebAssembly.LinkError,
- /imported table does not match the expected type/
- )
+ assertThrows(() => {
+ var builder = new WasmModuleBuilder();
+ var unary_type = builder.addType(kSig_i_i);
+ builder.addImportedTable(
+ 'imports', 'table', 1, 100, wasmOptRefType(unary_type));
+ builder.instantiate({imports: {table: exporting_instance.exports.table}})
+ }, WebAssembly.LinkError, /imported table does not match the expected type/)
// Type for imported table must match exactly.
- assertThrows(
- () => {
- var builder = new WasmModuleBuilder();
- builder.addImportedTable("imports", "table", 1, 100, kWasmFuncRef);
- builder.instantiate({imports: {table: exporting_instance.exports.table}})
- },
- WebAssembly.LinkError,
- /imported table does not match the expected type/
- )
-
- var instance = (function () {
+ assertThrows(() => {
+ var builder = new WasmModuleBuilder();
+ builder.addImportedTable('imports', 'table', 1, 100, kWasmFuncRef);
+ builder.instantiate({imports: {table: exporting_instance.exports.table}})
+ }, WebAssembly.LinkError, /imported table does not match the expected type/)
+
+ var instance = (function() {
var builder = new WasmModuleBuilder();
var unary_type = builder.addType(kSig_i_i);
var binary_type = builder.addType(kSig_i_ii);
- builder.addImportedTable("imports", "table", 1, 100,
- wasmOptRefType(binary_type));
-
- var table = builder.addTable(wasmOptRefType(unary_type), 1)
- .exportAs("table");
- builder.addTable(kWasmFuncRef, 1).exportAs("generic_table");
-
- builder.addFunction("table_test", makeSig([wasmRefType(unary_type)],
- [kWasmI32]))
- // Set table[0] to input function, then retrieve it and call it.
- .addBody([kExprI32Const, 0, kExprLocalGet, 0, kExprTableSet, table.index,
- kExprI32Const, 42, kExprI32Const, 0, kExprTableGet, table.index,
- kExprCallRef])
- .exportFunc();
+ builder.addImportedTable(
+ 'imports', 'table', 1, 100, wasmOptRefType(binary_type));
+
+ var table =
+ builder.addTable(wasmOptRefType(unary_type), 10).exportAs('table');
+ builder.addTable(kWasmFuncRef, 1).exportAs('generic_table');
+
+ builder
+ .addFunction(
+ 'table_test', makeSig([wasmRefType(unary_type)], [kWasmI32]))
+ // Set table[0] to input function, then retrieve it and call it.
+ .addBody([
+ kExprI32Const, 0, kExprLocalGet, 0, kExprTableSet, table.index,
+ kExprI32Const, 42, kExprI32Const, 0, kExprTableGet, table.index,
+ kExprCallRef
+ ])
+ .exportFunc();
+
+ // Same, but with table[1] and call_indirect
+ builder
+ .addFunction(
+ 'table_indirect_test',
+ makeSig([wasmRefType(unary_type)], [kWasmI32]))
+ .addBody([
+ kExprI32Const, 1, kExprLocalGet, 0, kExprTableSet, table.index,
+ kExprI32Const, 42, kExprI32Const, 0, kExprCallIndirect, unary_type,
+ table.index
+ ])
+ .exportFunc();
// Instantiate with a table of the correct type.
return builder.instantiate(
- {imports: {table: exporting_instance.exports.table}});
+ {imports: {table: exporting_instance.exports.table}});
})();
// This module is valid.
@@ -79,13 +86,57 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// The correct function reference is preserved when setting it to and getting
// it back from a table.
- assertEquals(42, instance.exports.table_test(exporting_instance.exports.id));
+ assertEquals(
+ 43, instance.exports.table_test(exporting_instance.exports.succ));
+ // Same for call indirect (the indirect call tables are also set correctly).
+ assertEquals(
+ 43,
+ instance.exports.table_indirect_test(exporting_instance.exports.succ));
// Setting from JS API respects types.
- instance.exports.generic_table.set(0, exporting_instance.exports.id);
- instance.exports.table.set(0, exporting_instance.exports.id);
+ instance.exports.generic_table.set(0, exporting_instance.exports.succ);
+ instance.exports.table.set(0, exporting_instance.exports.succ);
assertThrows(
- () => instance.exports.table.set(0, exporting_instance.exports.addition),
- TypeError,
- /Argument 1 must be null or a WebAssembly function of type compatible to 'this'/);
+ () => instance.exports.table.set(0, exporting_instance.exports.addition),
+ TypeError,
+ /Argument 1 must be null or a WebAssembly function of type compatible to/);
+})();
+
+(function TestNonNullableTables() {
+ var builder = new WasmModuleBuilder();
+
+ var binary_type = builder.addType(kSig_i_ii);
+
+ var addition = builder.addFunction('addition', kSig_i_ii).addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
+ ]);
+ var subtraction =
+ builder.addFunction('subtraction', kSig_i_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub])
+ .exportFunc();
+
+ var table = builder.addTable(wasmRefType(binary_type), 3, 3, addition.index);
+
+ builder.addFunction('init', kSig_v_v)
+ .addBody([
+ kExprI32Const, 1, kExprRefFunc, subtraction.index, kExprTableSet,
+ table.index
+ ])
+ .exportFunc();
+
+ // (index, arg1, arg2) -> table[index](arg1, arg2)
+ builder.addFunction('table_test', kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 0, kExprCallIndirect,
+ binary_type, table.index
+ ])
+ .exportFunc();
+
+ var instance = builder.instantiate({});
+
+ assertTrue(!!instance);
+
+ instance.exports.init();
+ assertEquals(44, instance.exports.table_test(0, 33, 11));
+ assertEquals(22, instance.exports.table_test(1, 33, 11));
})();
diff --git a/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js b/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js
new file mode 100644
index 00000000000..e50feb2e54e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-enable-avx
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Carefully hand-crafted test case to exercie a codegen bug in Liftoff. In
+// i64x2.mul, non-AVX case, we will overwrite rhs if dst == rhs. The intention
+// is to do dst = lhs * rhs, but if dst == rhs && dst != lhs, we will overwrite
+// dst (and hence rhs) with lhs, effectively doing lhs^2.
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction(undefined, kSig_l_v)
+.addBody([
+ kExprI64Const, 0,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprI64Const, 1,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprI64Const, 2,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprCallFunction, 1,
+]);
+
+let sig = makeSig([kWasmS128, kWasmS128, kWasmS128], [kWasmI64]);
+builder.addFunction(undefined, sig)
+.addLocals(kWasmS128, 10)
+.addBody([
+ kExprLocalGet, 2, // This is 2 (lhs).
+ kExprI64Const, 4, // This is 4 (rhs).
+ kSimdPrefix, kExprI64x2Splat,
+ kSimdPrefix, kExprI64x2Mul, 0x01, // The bug will write 2 to rhs.
+ kSimdPrefix, kExprI64x2ExtractLane, 0,
+]);
+builder.addExport('main', 0);
+const module = builder.instantiate();
+// Should be 2 * 4, the buggy codegen will give 2 * 2 instead.
+assertEquals(8n, module.exports.main());
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 4f91f58fc7f..be9d54a0648 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -88,11 +88,11 @@ Error.prepareStackTrace = function(error, frames) {
module.exports.main();
verifyStack(stack, [
- // isWasm function line pos file offset funcIndex
- [ false, "STACK", 38, 0, "stack.js"],
- [ true, "main", 1, 0x86, null, '0x86', 1],
- [ false, "testStackFrames", 88, 0, "stack.js"],
- [ false, null, 97, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ false, "STACK", 38, 0, "stack.js"],
+ [ true, "main", 1, 0x86, "wasm://wasm/7168ab72", '0x86', 1],
+ [ false, "testStackFrames", 88, 0, "stack.js"],
+ [ false, null, 97, 0, "stack.js"]
]);
})();
@@ -103,10 +103,10 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("unreachable", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, "exec_unreachable", 1, 0x8b, null, '0x8b', 2],
- [ false, "testWasmUnreachable", 101, 0, "stack.js"],
- [ false, null, 112, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ true, "exec_unreachable", 1, 0x8b, "wasm://wasm/7168ab72", '0x8b', 2],
+ [ false, "testWasmUnreachable", 101, 0, "stack.js"],
+ [ false, null, 112, 0, "stack.js"]
]);
}
})();
@@ -118,11 +118,11 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, "mem_out_of_bounds", 1, 0x91, null, '0x91', 3],
- [ true, "call_mem_out_of_bounds", 1, 0x97, null, '0x97', 4],
- [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
- [ false, null, 128, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ true, "mem_out_of_bounds", 1, 0x91, "wasm://wasm/7168ab72", '0x91', 3],
+ [ true, "call_mem_out_of_bounds", 1, 0x97, "wasm://wasm/7168ab72", '0x97', 4],
+ [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
+ [ false, null, 128, 0, "stack.js"]
]);
}
})();
@@ -147,11 +147,11 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals("Maximum call stack size exceeded", e.message, "trap reason");
assertTrue(e.stack.length >= 4, "expected at least 4 stack entries");
verifyStack(e.stack.splice(0, 4), [
- // isWasm function line pos file offset funcIndex
- [ true, "recursion", 1, 0x34, null, '0x34', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0]
+ // isWasm function line pos file offset funcIndex
+ [ true, "recursion", 1, 0x34, "wasm://wasm/80a35e5a", '0x34', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0]
]);
}
})();
@@ -175,10 +175,10 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals('unreachable', e.message, 'trap reason');
let hexOffset = '0x' + (unreachable_pos + 0x25).toString(16);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, 'main', 1, unreachable_pos + 0x25, null, hexOffset, 0],
- [ false, 'testBigOffset', 172, 0, 'stack.js'],
- [ false, null, 184, 0, 'stack.js']
+ // isWasm function line pos file offset funcIndex
+ [ true, 'main', 1, unreachable_pos + 0x25, 'wasm://wasm/000600e6', hexOffset, 0],
+ [ false, 'testBigOffset', 172, 0, 'stack.js'],
+ [ false, null, 184, 0, 'stack.js']
]);
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index 50f795f7704..6984f22ea2c 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -390,7 +390,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of types
kWasmFunctionTypeForm, // type
1, // number of parameter
- 0x7b, // invalid type
+ kWasmVoid, // invalid type
0 // number of returns
]);
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index bde5793acc2..1f070d01f92 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -147,7 +147,6 @@ const dummy_func = exports.set_table_func1;
kExprTableSet, table_index, // --
kExprI32Const, index, // entry index
kExprCallIndirect, sig_index, table_index // --
-
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index a34162ab8c1..719af7e7b74 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -57,7 +57,7 @@ builder.addFunction("main", kSig_i_i)
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32LtU,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
// offset 9
kExprI32Const, 0x7e /* -2 */,
kExprLocalGet, 0,
@@ -70,7 +70,7 @@ builder.addFunction("main", kSig_i_i)
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprUnreachable,
kExprEnd,
// offset 30
diff --git a/deps/v8/test/mjsunit/wasm/unreachable-validation.js b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
index 70768ff7d49..a9165639ab6 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
@@ -45,13 +45,13 @@ let brt1 = [kExprBrTable, 0, 1];
let brt01 = [kExprBrTable, 1, 0, 1];
let f32 = [kExprF32Const, 0, 0, 0, 0];
let zero = [kExprI32Const, 0];
-let if_else_empty = [kExprIf, kWasmStmt, kExprElse, kExprEnd];
-let if_unr = [kExprIf, kWasmStmt, kExprUnreachable, kExprEnd];
-let if_else_unr = [kExprIf, kWasmStmt, kExprUnreachable, kExprElse, kExprUnreachable, kExprEnd];
-let block_unr = [kExprBlock, kWasmStmt, kExprUnreachable, kExprEnd];
-let loop_unr = [kExprLoop, kWasmStmt, kExprUnreachable, kExprEnd];
-let block_block_unr = [kExprBlock, kWasmStmt, kExprBlock, kWasmStmt, kExprUnreachable, kExprEnd, kExprEnd];
-let block = [kExprBlock, kWasmStmt]
+let if_else_empty = [kExprIf, kWasmVoid, kExprElse, kExprEnd];
+let if_unr = [kExprIf, kWasmVoid, kExprUnreachable, kExprEnd];
+let if_else_unr = [kExprIf, kWasmVoid, kExprUnreachable, kExprElse, kExprUnreachable, kExprEnd];
+let block_unr = [kExprBlock, kWasmVoid, kExprUnreachable, kExprEnd];
+let loop_unr = [kExprLoop, kWasmVoid, kExprUnreachable, kExprEnd];
+let block_block_unr = [kExprBlock, kWasmVoid, kExprBlock, kWasmVoid, kExprUnreachable, kExprEnd, kExprEnd];
+let block = [kExprBlock, kWasmVoid]
let iblock = [kExprBlock, kWasmI32]
let fblock = [kExprBlock, kWasmF32]
let end = kExprEnd;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
new file mode 100644
index 00000000000..13c19813ee7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
@@ -0,0 +1,149 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let instance = (() => {
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let array = builder.addArray(kWasmF64, true);
+ let sig = builder.addType(makeSig([kWasmI32], [kWasmI32]));
+
+ let func = builder.addFunction('inc', sig)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add])
+ .exportAs('inc');
+
+ builder.addFunction('struct_producer', makeSig([], [kWasmDataRef]))
+ .addBody([
+ kGCPrefix, kExprRttCanon, struct, kGCPrefix, kExprStructNewDefault,
+ struct
+ ])
+ .exportFunc();
+
+ builder.addFunction('array_producer', makeSig([], [kWasmDataRef]))
+ .addBody([
+ kExprI32Const, 10, kGCPrefix, kExprRttCanon, array, kGCPrefix,
+ kExprArrayNewDefault, array
+ ])
+ .exportFunc();
+
+ builder.addFunction('i31_producer', makeSig([], [kWasmI31Ref]))
+ .addBody([kExprI32Const, 5, kGCPrefix, kExprI31New])
+ .exportFunc();
+
+ builder.addFunction('func_producer', makeSig([], [wasmRefType(sig)]))
+ .addBody([kExprRefFunc, func.index])
+ .exportFunc();
+
+ let test_types = {
+ i31: kWasmI31Ref,
+ struct: kWasmDataRef,
+ array: kWasmDataRef,
+ raw_struct: struct,
+ raw_array: array,
+ typed_func: sig,
+ data: kWasmDataRef,
+ eq: kWasmEqRef,
+ func: kWasmFuncRef,
+ any: kWasmAnyRef,
+ };
+
+ for (key in test_types) {
+ let type = wasmOptRefType(test_types[key]);
+ builder.addFunction(key + '_id', makeSig([type], [type]))
+ .addBody([kExprLocalGet, 0])
+ .exportFunc();
+ builder.addFunction(key + '_null', makeSig([], [type]))
+ .addBody([kExprRefNull, test_types[key]])
+ .exportFunc();
+ }
+
+ return builder.instantiate({});
+})();
+
+// Wasm-exposed null is the same as JS null.
+assertEquals(instance.exports.struct_null(), null);
+
+// We can roundtrip an i31.
+instance.exports.i31_id(instance.exports.i31_producer());
+// We can roundtrip any null as i31.
+instance.exports.i31_id(instance.exports.i31_null());
+instance.exports.i31_id(instance.exports.struct_null());
+// We cannot roundtrip a struct as i31.
+assertThrows(
+ () => instance.exports.i31_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as dataref.
+instance.exports.data_id(instance.exports.struct_producer());
+// We can roundtrip an array as dataref.
+instance.exports.data_id(instance.exports.array_producer());
+// We can roundtrip any null as dataref.
+instance.exports.data_id(instance.exports.data_null());
+instance.exports.data_id(instance.exports.i31_null());
+// We cannot roundtrip an i31 as dataref.
+assertThrows(
+ () => instance.exports.data_id(instance.exports.i31_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as eqref.
+instance.exports.eq_id(instance.exports.struct_producer());
+// We can roundtrip an array as eqref.
+instance.exports.eq_id(instance.exports.array_producer());
+// We can roundtrip an i31 as eqref.
+instance.exports.eq_id(instance.exports.i31_producer());
+// We can roundtrip any null as eqref.
+instance.exports.eq_id(instance.exports.data_null());
+instance.exports.eq_id(instance.exports.i31_null());
+instance.exports.eq_id(instance.exports.func_null());
+// We cannot roundtrip a func as eqref.
+assertThrows(
+ () => instance.exports.eq_id(instance.exports.func_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as anyref.
+instance.exports.any_id(instance.exports.struct_producer());
+// We can roundtrip an array as anyref.
+instance.exports.any_id(instance.exports.array_producer());
+// We can roundtrip an i31 as anyref.
+instance.exports.any_id(instance.exports.i31_producer());
+// We can roundtrip a func as anyref.
+instance.exports.any_id(instance.exports.func_producer());
+// We can roundtrip any null as anyref.
+instance.exports.any_id(instance.exports.data_null());
+instance.exports.any_id(instance.exports.i31_null());
+instance.exports.any_id(instance.exports.func_null());
+// We can roundtrip a JS object as anyref.
+instance.exports.any_id(instance);
+
+// We can roundtrip a typed function.
+instance.exports.typed_func_id(instance.exports.func_producer());
+// We can roundtrip any null as typed funcion.
+instance.exports.typed_func_id(instance.exports.i31_null());
+instance.exports.typed_func_id(instance.exports.struct_null());
+// We cannot roundtrip a struct as typed funcion.
+assertThrows(
+ () => instance.exports.typed_func_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a func.
+instance.exports.func_id(instance.exports.func_producer());
+// We can roundtrip any null as func.
+instance.exports.func_id(instance.exports.i31_null());
+instance.exports.func_id(instance.exports.struct_null());
+// We cannot roundtrip an i31 as func.
+assertThrows(
+ () => instance.exports.func_id(instance.exports.i31_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We cannot directly roundtrip structs or arrays.
+// TODO(7748): Switch these tests once we can.
+assertThrows(
+ () => instance.exports.raw_struct_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+assertThrows(
+ () => instance.exports.raw_array_id(instance.exports.array_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index b5021a313c9..d3ae3a9b4ad 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -92,13 +92,13 @@ let kPassiveWithElements = 5;
let kDeclarativeWithElements = 7;
// Function declaration flags
-let kDeclFunctionName = 0x01;
+let kDeclFunctionName = 0x01;
let kDeclFunctionImport = 0x02;
let kDeclFunctionLocals = 0x04;
let kDeclFunctionExport = 0x08;
// Value types and related
-let kWasmStmt = 0x40;
+let kWasmVoid = 0x40;
let kWasmI32 = 0x7f;
let kWasmI64 = 0x7e;
let kWasmF32 = 0x7d;
@@ -107,19 +107,28 @@ let kWasmS128 = 0x7b;
let kWasmI8 = 0x7a;
let kWasmI16 = 0x79;
let kWasmFuncRef = 0x70;
-let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
+let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
let kWasmExternRef = 0x6f;
let kWasmAnyRef = 0x6e;
let kWasmEqRef = 0x6d;
let kWasmOptRef = 0x6c;
let kWasmRef = 0x6b;
-function wasmOptRefType(index) { return {opcode: kWasmOptRef, index: index}; }
-function wasmRefType(index) { return {opcode: kWasmRef, index: index}; }
+function wasmOptRefType(index) {
+ return {opcode: kWasmOptRef, index: index};
+}
+function wasmRefType(index) {
+ return {opcode: kWasmRef, index: index};
+}
let kWasmI31Ref = 0x6a;
-let kWasmRtt = 0x69;
+let kWasmRttWithDepth = 0x69;
function wasmRtt(index, depth) {
- return {opcode: kWasmRtt, index: index, depth: depth};
+ return { opcode: kWasmRttWithDepth, index: index, depth: depth };
}
+let kWasmRtt = 0x68;
+function wasmRttNoDepth(index) {
+ return { opcode: kWasmRtt, index: index };
+}
+let kWasmDataRef = 0x67;
let kExternalFunction = 0;
let kExternalTable = 1;
@@ -400,7 +409,10 @@ const kWasmOpcodes = {
'I64SExtendI32': 0xc4,
'RefNull': 0xd0,
'RefIsNull': 0xd1,
- 'RefFunc': 0xd2
+ 'RefFunc': 0xd2,
+ 'RefAsNonNull': 0xd3,
+ 'BrOnNull': 0xd4,
+ 'RefEq': 0xd5,
};
function defineWasmOpcode(name, value) {
@@ -451,6 +463,15 @@ let kExprRttSub = 0x31;
let kExprRefTest = 0x40;
let kExprRefCast = 0x41;
let kExprBrOnCast = 0x42;
+let kExprRefIsFunc = 0x50;
+let kExprRefIsData = 0x51;
+let kExprRefIsI31 = 0x52;
+let kExprRefAsFunc = 0x58;
+let kExprRefAsData = 0x59;
+let kExprRefAsI31 = 0x5a;
+let kExprBrOnFunc = 0x60;
+let kExprBrOnData = 0x61;
+let kExprBrOnI31 = 0x62;
// Numeric opcodes.
let kExprI32SConvertSatF32 = 0x00;
@@ -554,23 +575,27 @@ let kExprS128Load16Splat = 0x08;
let kExprS128Load32Splat = 0x09;
let kExprS128Load64Splat = 0x0a;
let kExprS128StoreMem = 0x0b;
-
let kExprS128Const = 0x0c;
let kExprI8x16Shuffle = 0x0d;
-
let kExprI8x16Swizzle = 0x0e;
+
let kExprI8x16Splat = 0x0f;
let kExprI16x8Splat = 0x10;
let kExprI32x4Splat = 0x11;
let kExprI64x2Splat = 0x12;
let kExprF32x4Splat = 0x13;
let kExprF64x2Splat = 0x14;
+let kExprI8x16ExtractLaneS = 0x15;
+let kExprI8x16ExtractLaneU = 0x16;
let kExprI8x16ReplaceLane = 0x17;
let kExprI16x8ExtractLaneS = 0x18;
+let kExprI16x8ExtractLaneU = 0x19;
let kExprI16x8ReplaceLane = 0x1a;
let kExprI32x4ExtractLane = 0x1b;
let kExprI32x4ReplaceLane = 0x1c;
+let kExprI64x2ExtractLane = 0x1d;
let kExprI64x2ReplaceLane = 0x1e;
+let kExprF32x4ExtractLane = 0x1f;
let kExprF32x4ReplaceLane = 0x20;
let kExprF64x2ExtractLane = 0x21;
let kExprF64x2ReplaceLane = 0x22;
@@ -622,12 +647,30 @@ let kExprS128AndNot = 0x4f;
let kExprS128Or = 0x50;
let kExprS128Xor = 0x51;
let kExprS128Select = 0x52;
+let kExprV128AnyTrue = 0x53;
+let kExprS128Load8Lane = 0x54;
+let kExprS128Load16Lane = 0x55;
+let kExprS128Load32Lane = 0x56;
+let kExprS128Load64Lane = 0x57;
+let kExprS128Store8Lane = 0x58;
+let kExprS128Store16Lane = 0x59;
+let kExprS128Store32Lane = 0x5a;
+let kExprS128Store64Lane = 0x5b;
+let kExprS128Load32Zero = 0x5c;
+let kExprS128Load64Zero = 0x5d;
+let kExprF32x4DemoteF64x2Zero = 0x5e;
+let kExprF64x2PromoteLowF32x4 = 0x5f;
let kExprI8x16Abs = 0x60;
let kExprI8x16Neg = 0x61;
-let kExprV128AnyTrue = 0x62;
-let kExprV8x16AllTrue = 0x63;
+let kExprI8x16Popcnt = 0x62;
+let kExprI8x16AllTrue = 0x63;
+let kExprI8x16BitMask = 0x64;
let kExprI8x16SConvertI16x8 = 0x65;
let kExprI8x16UConvertI16x8 = 0x66;
+let kExprF32x4Ceil = 0x67;
+let kExprF32x4Floor = 0x68;
+let kExprF32x4Trunc = 0x69;
+let kExprF32x4NearestInt = 0x6a;
let kExprI8x16Shl = 0x6b;
let kExprI8x16ShrS = 0x6c;
let kExprI8x16ShrU = 0x6d;
@@ -637,14 +680,23 @@ let kExprI8x16AddSatU = 0x70;
let kExprI8x16Sub = 0x71;
let kExprI8x16SubSatS = 0x72;
let kExprI8x16SubSatU = 0x73;
+let kExprF64x2Ceil = 0x74;
+let kExprF64x2Floor = 0x75;
let kExprI8x16MinS = 0x76;
let kExprI8x16MinU = 0x77;
let kExprI8x16MaxS = 0x78;
let kExprI8x16MaxU = 0x79;
+let kExprF64x2Trunc = 0x7a;
let kExprI8x16RoundingAverageU = 0x7b;
+let kExprI16x8ExtAddPairwiseI8x16S = 0x7c;
+let kExprI16x8ExtAddPairwiseI8x16U = 0x7d;
+let kExprI32x4ExtAddPairwiseI16x8S = 0x7e;
+let kExprI32x4ExtAddPairwiseI16x8U = 0x7f;
let kExprI16x8Abs = 0x80;
let kExprI16x8Neg = 0x81;
-let kExprV16x8AllTrue = 0x83;
+let kExprI16x8Q15MulRSatS = 0x82;
+let kExprI16x8AllTrue = 0x83;
+let kExprI16x8BitMask = 0x84;
let kExprI16x8SConvertI32x4 = 0x85;
let kExprI16x8UConvertI32x4 = 0x86;
let kExprI16x8SConvertI8x16Low = 0x87;
@@ -660,15 +712,21 @@ let kExprI16x8AddSatU = 0x90;
let kExprI16x8Sub = 0x91;
let kExprI16x8SubSatS = 0x92;
let kExprI16x8SubSatU = 0x93;
+let kExprF64x2NearestInt = 0x94;
let kExprI16x8Mul = 0x95;
let kExprI16x8MinS = 0x96;
let kExprI16x8MinU = 0x97;
let kExprI16x8MaxS = 0x98;
let kExprI16x8MaxU = 0x99;
let kExprI16x8RoundingAverageU = 0x9b;
+let kExprI16x8ExtMulLowI8x16S = 0x9c;
+let kExprI16x8ExtMulHighI8x16S = 0x9d;
+let kExprI16x8ExtMulLowI8x16U = 0x9e;
+let kExprI16x8ExtMulHighI8x16U = 0x9f;
let kExprI32x4Abs = 0xa0;
let kExprI32x4Neg = 0xa1;
-let kExprV32x4AllTrue = 0xa3;
+let kExprI32x4AllTrue = 0xa3;
+let kExprI32x4BitMask = 0xa4;
let kExprI32x4SConvertI16x8Low = 0xa7;
let kExprI32x4SConvertI16x8High = 0xa8;
let kExprI32x4UConvertI16x8Low = 0xa9;
@@ -683,14 +741,35 @@ let kExprI32x4MinS = 0xb6;
let kExprI32x4MinU = 0xb7;
let kExprI32x4MaxS = 0xb8;
let kExprI32x4MaxU = 0xb9;
+let kExprI32x4DotI16x8S = 0xba;
+let kExprI32x4ExtMulLowI16x8S = 0xbc;
+let kExprI32x4ExtMulHighI16x8S = 0xbd;
+let kExprI32x4ExtMulLowI16x8U = 0xbe;
+let kExprI32x4ExtMulHighI16x8U = 0xbf;
+let kExprI64x2Abs = 0xc0;
let kExprI64x2Neg = 0xc1;
+let kExprI64x2AllTrue = 0xc3;
+let kExprI64x2BitMask = 0xc4;
+let kExprI64x2SConvertI32x4Low = 0xc7;
+let kExprI64x2SConvertI32x4High = 0xc8;
+let kExprI64x2UConvertI32x4Low = 0xc9;
+let kExprI64x2UConvertI32x4High = 0xca;
let kExprI64x2Shl = 0xcb;
let kExprI64x2ShrS = 0xcc;
let kExprI64x2ShrU = 0xcd;
let kExprI64x2Add = 0xce;
let kExprI64x2Sub = 0xd1;
let kExprI64x2Mul = 0xd5;
-let kExprI64x2ExtMulHighI32x4U = 0xd7;
+let kExprI64x2Eq = 0xd6;
+let kExprI64x2Ne = 0xd7;
+let kExprI64x2LtS = 0xd8;
+let kExprI64x2GtS = 0xd9;
+let kExprI64x2LeS = 0xda;
+let kExprI64x2GeS = 0xdb;
+let kExprI64x2ExtMulLowI32x4S = 0xdc;
+let kExprI64x2ExtMulHighI32x4S = 0xdd;
+let kExprI64x2ExtMulLowI32x4U = 0xde;
+let kExprI64x2ExtMulHighI32x4U = 0xdf;
let kExprF32x4Abs = 0xe0;
let kExprF32x4Neg = 0xe1;
let kExprF32x4Sqrt = 0xe3;
@@ -700,6 +779,8 @@ let kExprF32x4Mul = 0xe6;
let kExprF32x4Div = 0xe7;
let kExprF32x4Min = 0xe8;
let kExprF32x4Max = 0xe9;
+let kExprF32x4Pmin = 0xea;
+let kExprF32x4Pmax = 0xeb;
let kExprF64x2Abs = 0xec;
let kExprF64x2Neg = 0xed;
let kExprF64x2Sqrt = 0xef;
@@ -709,10 +790,16 @@ let kExprF64x2Mul = 0xf2;
let kExprF64x2Div = 0xf3;
let kExprF64x2Min = 0xf4;
let kExprF64x2Max = 0xf5;
+let kExprF64x2Pmin = 0xf6;
+let kExprF64x2Pmax = 0xf7;
let kExprI32x4SConvertF32x4 = 0xf8;
let kExprI32x4UConvertF32x4 = 0xf9;
let kExprF32x4SConvertI32x4 = 0xfa;
let kExprF32x4UConvertI32x4 = 0xfb;
+let kExprI32x4TruncSatF64x2SZero = 0xfc;
+let kExprI32x4TruncSatF64x2UZero = 0xfd;
+let kExprF64x2ConvertLowI32x4S = 0xfe;
+let kExprF64x2ConvertLowI32x4U = 0xff;
// Compilation hint constants.
let kCompilationHintStrategyDefault = 0x00;
@@ -723,32 +810,32 @@ let kCompilationHintTierDefault = 0x00;
let kCompilationHintTierBaseline = 0x01;
let kCompilationHintTierOptimized = 0x02;
-let kTrapUnreachable = 0;
-let kTrapMemOutOfBounds = 1;
-let kTrapDivByZero = 2;
-let kTrapDivUnrepresentable = 3;
-let kTrapRemByZero = 4;
+let kTrapUnreachable = 0;
+let kTrapMemOutOfBounds = 1;
+let kTrapDivByZero = 2;
+let kTrapDivUnrepresentable = 3;
+let kTrapRemByZero = 4;
let kTrapFloatUnrepresentable = 5;
-let kTrapTableOutOfBounds = 6;
-let kTrapFuncSigMismatch = 7;
-let kTrapUnalignedAccess = 8;
-let kTrapDataSegmentDropped = 9;
-let kTrapElemSegmentDropped = 10;
-let kTrapRethrowNull = 11;
+let kTrapTableOutOfBounds = 6;
+let kTrapFuncSigMismatch = 7;
+let kTrapUnalignedAccess = 8;
+let kTrapDataSegmentDropped = 9;
+let kTrapElemSegmentDropped = 10;
+let kTrapRethrowNull = 11;
let kTrapMsgs = [
- "unreachable",
- "memory access out of bounds",
- "divide by zero",
- "divide result unrepresentable",
- "remainder by zero",
- "float unrepresentable in integer range",
- "table index is out of bounds",
- "function signature mismatch",
- "operation does not support unaligned accesses",
- "data segment has been dropped",
- "element segment has been dropped",
- "rethrowing null value"
+ 'unreachable',
+ 'memory access out of bounds',
+ 'divide by zero',
+ 'divide result unrepresentable',
+ 'remainder by zero',
+ 'float unrepresentable in integer range',
+ 'table index is out of bounds',
+ 'function signature mismatch',
+ 'operation does not support unaligned accesses',
+ 'data segment has been dropped',
+ 'element segment has been dropped',
+ 'rethrowing null value'
];
function assertTraps(trap, code) {
@@ -808,7 +895,7 @@ class Binary {
}
this.buffer[this.length++] = v | 0x80;
}
- throw new Error("Leb value exceeds maximum length of " + max_len);
+ throw new Error('Leb value exceeds maximum length of ' + max_len);
}
emit_u32v(val) {
@@ -843,8 +930,9 @@ class Binary {
}
emit_type(type) {
- if ((typeof type) == "number") this.emit_u8(type);
- else {
+ if ((typeof type) == 'number') {
+ this.emit_u8(type);
+ } else {
this.emit_u8(type.opcode);
if ('depth' in type) this.emit_u8(type.depth);
this.emit_u32v(type.index);
@@ -887,7 +975,7 @@ class WasmFunctionBuilder {
numLocalNames() {
let num_local_names = 0;
for (let loc_name of this.local_names) {
- if (typeof loc_name == "string") ++num_local_names;
+ if (typeof loc_name == 'string') ++num_local_names;
}
return num_local_names;
}
@@ -909,8 +997,10 @@ class WasmFunctionBuilder {
addBody(body) {
for (let b of body) {
- if (typeof b !== 'number' || (b & (~0xFF)) !== 0 )
- throw new Error('invalid body (entries must be 8 bit numbers): ' + body);
+ if (typeof b !== 'number' || (b & (~0xFF)) !== 0) {
+ throw new Error(
+ 'invalid body (entries must be 8 bit numbers): ' + body);
+ }
}
this.body = body.slice();
// Automatically add the end for the function block to the body.
@@ -954,37 +1044,39 @@ class WasmGlobalBuilder {
}
exportAs(name) {
- this.module.exports.push({name: name, kind: kExternalGlobal,
- index: this.index});
+ this.module.exports.push(
+ {name: name, kind: kExternalGlobal, index: this.index});
return this;
}
}
class WasmTableBuilder {
- constructor(module, type, initial_size, max_size) {
+ constructor(module, type, initial_size, max_size, init_func_index) {
this.module = module;
this.type = type;
this.initial_size = initial_size;
this.has_max = max_size != undefined;
this.max_size = max_size;
+ this.init_func_index = init_func_index;
+ this.has_init = init_func_index != undefined;
}
exportAs(name) {
- this.module.exports.push({name: name, kind: kExternalTable,
- index: this.index});
+ this.module.exports.push(
+ {name: name, kind: kExternalTable, index: this.index});
return this;
}
}
function makeField(type, mutability) {
- assertEquals("boolean", typeof mutability,
- "field mutability must be boolean");
+ assertEquals(
+ 'boolean', typeof mutability, 'field mutability must be boolean');
return {type: type, mutability: mutability};
}
class WasmStruct {
constructor(fields) {
- assertTrue(Array.isArray(fields), "struct fields must be an array");
+ assertTrue(Array.isArray(fields), 'struct fields must be an array');
this.fields = fields;
}
}
@@ -1072,8 +1164,8 @@ class WasmModuleBuilder {
addType(type) {
this.types.push(type);
- var pl = type.params.length; // should have params
- var rl = type.results.length; // should have results
+ var pl = type.params.length; // should have params
+ var rl = type.results.length; // should have results
return this.types.length - 1;
}
@@ -1094,19 +1186,21 @@ class WasmModuleBuilder {
return glob;
}
- addTable(type, initial_size, max_size = undefined) {
+ addTable(
+ type, initial_size, max_size = undefined, init_func_index = undefined) {
if (type == kWasmI32 || type == kWasmI64 || type == kWasmF32 ||
- type == kWasmF64 || type == kWasmS128 || type == kWasmStmt) {
+ type == kWasmF64 || type == kWasmS128 || type == kWasmVoid) {
throw new Error('Tables must be of a reference type');
}
- let table = new WasmTableBuilder(this, type, initial_size, max_size);
+ let table = new WasmTableBuilder(
+ this, type, initial_size, max_size, init_func_index);
table.index = this.tables.length + this.num_imported_tables;
this.tables.push(table);
return table;
}
addException(type) {
- let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
let except_index = this.exceptions.length + this.num_imported_exceptions;
this.exceptions.push(type_index);
return except_index;
@@ -1114,10 +1208,12 @@ class WasmModuleBuilder {
addFunction(name, type, arg_names) {
arg_names = arg_names || [];
- let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
let num_args = this.types[type_index].params.length;
- if (num_args < arg_names.length) throw new Error("too many arg names provided");
- if (num_args > arg_names.length) arg_names.push(num_args - arg_names.length);
+ if (num_args < arg_names.length)
+ throw new Error('too many arg names provided');
+ if (num_args > arg_names.length)
+ arg_names.push(num_args - arg_names.length);
let func = new WasmFunctionBuilder(this, name, type_index, arg_names);
func.index = this.functions.length + this.num_imported_funcs;
this.functions.push(func);
@@ -1128,9 +1224,13 @@ class WasmModuleBuilder {
if (this.functions.length != 0) {
throw new Error('Imported functions must be declared before local ones');
}
- let type_index = (typeof type) == "number" ? type : this.addType(type);
- this.imports.push({module: module, name: name, kind: kExternalFunction,
- type_index: type_index});
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
+ this.imports.push({
+ module: module,
+ name: name,
+ kind: kExternalFunction,
+ type_index: type_index
+ });
return this.num_imported_funcs++;
}
@@ -1138,15 +1238,26 @@ class WasmModuleBuilder {
if (this.globals.length != 0) {
throw new Error('Imported globals must be declared before local ones');
}
- let o = {module: module, name: name, kind: kExternalGlobal, type: type,
- mutable: mutable};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalGlobal,
+ type: type,
+ mutable: mutable
+ };
this.imports.push(o);
return this.num_imported_globals++;
}
addImportedMemory(module, name, initial = 0, maximum, shared) {
- let o = {module: module, name: name, kind: kExternalMemory,
- initial: initial, maximum: maximum, shared: shared};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalMemory,
+ initial: initial,
+ maximum: maximum,
+ shared: shared
+ };
this.imports.push(o);
return this;
}
@@ -1155,8 +1266,14 @@ class WasmModuleBuilder {
if (this.tables.length != 0) {
throw new Error('Imported tables must be declared before local ones');
}
- let o = {module: module, name: name, kind: kExternalTable, initial: initial,
- maximum: maximum, type: type || kWasmFuncRef};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalTable,
+ initial: initial,
+ maximum: maximum,
+ type: type || kWasmFuncRef
+ };
this.imports.push(o);
return this.num_imported_tables++;
}
@@ -1165,9 +1282,13 @@ class WasmModuleBuilder {
if (this.exceptions.length != 0) {
throw new Error('Imported exceptions must be declared before local ones');
}
- let type_index = (typeof type) == "number" ? type : this.addType(type);
- let o = {module: module, name: name, kind: kExternalException,
- type_index: type_index};
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalException,
+ type_index: type_index
+ };
this.imports.push(o);
return this.num_imported_exceptions++;
}
@@ -1181,7 +1302,7 @@ class WasmModuleBuilder {
if (index == undefined && kind != kExternalTable &&
kind != kExternalMemory) {
throw new Error(
- 'Index for exports other than tables/memories must be provided');
+ 'Index for exports other than tables/memories must be provided');
}
if (index !== undefined && (typeof index) != 'number') {
throw new Error('Index for exports must be a number')
@@ -1191,8 +1312,11 @@ class WasmModuleBuilder {
}
setCompilationHint(strategy, baselineTier, topTier, index) {
- this.compilation_hints[index] = {strategy: strategy, baselineTier:
- baselineTier, topTier: topTier};
+ this.compilation_hints[index] = {
+ strategy: strategy,
+ baselineTier: baselineTier,
+ topTier: topTier
+ };
return this;
}
@@ -1256,7 +1380,7 @@ class WasmModuleBuilder {
setTableBounds(min, max = undefined) {
if (this.tables.length != 0) {
- throw new Error("The table bounds of table '0' have already been set.");
+ throw new Error('The table bounds of table \'0\' have already been set.');
}
this.addTable(kWasmAnyFunc, min, max);
return this;
@@ -1276,7 +1400,7 @@ class WasmModuleBuilder {
// Add type section
if (wasm.types.length > 0) {
- if (debug) print("emitting types @ " + binary.length);
+ if (debug) print('emitting types @ ' + binary.length);
binary.emit_section(kTypeSectionCode, section => {
section.emit_u32v(wasm.types.length);
for (let type of wasm.types) {
@@ -1290,7 +1414,7 @@ class WasmModuleBuilder {
} else if (type instanceof WasmArray) {
section.emit_u8(kWasmArrayTypeForm);
section.emit_type(type.type);
- section.emit_u8(1); // Only mutable arrays supported currently.
+ section.emit_u8(1); // Only mutable arrays supported currently.
} else {
section.emit_u8(kWasmFunctionTypeForm);
section.emit_u32v(type.params.length);
@@ -1308,7 +1432,7 @@ class WasmModuleBuilder {
// Add imports section
if (wasm.imports.length > 0) {
- if (debug) print("emitting imports @ " + binary.length);
+ if (debug) print('emitting imports @ ' + binary.length);
binary.emit_section(kImportSectionCode, section => {
section.emit_u32v(wasm.imports.length);
for (let imp of wasm.imports) {
@@ -1321,26 +1445,26 @@ class WasmModuleBuilder {
section.emit_type(imp.type);
section.emit_u8(imp.mutable);
} else if (imp.kind == kExternalMemory) {
- var has_max = (typeof imp.maximum) != "undefined";
- var is_shared = (typeof imp.shared) != "undefined";
+ var has_max = (typeof imp.maximum) != 'undefined';
+ var is_shared = (typeof imp.shared) != 'undefined';
if (is_shared) {
- section.emit_u8(has_max ? 3 : 2); // flags
+ section.emit_u8(has_max ? 3 : 2); // flags
} else {
- section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u8(has_max ? 1 : 0); // flags
}
- section.emit_u32v(imp.initial); // initial
- if (has_max) section.emit_u32v(imp.maximum); // maximum
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalTable) {
section.emit_type(imp.type);
- var has_max = (typeof imp.maximum) != "undefined";
- section.emit_u8(has_max ? 1 : 0); // flags
- section.emit_u32v(imp.initial); // initial
- if (has_max) section.emit_u32v(imp.maximum); // maximum
+ var has_max = (typeof imp.maximum) != 'undefined';
+ section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalException) {
section.emit_u32v(kExceptionAttribute);
section.emit_u32v(imp.type_index);
} else {
- throw new Error("unknown/unsupported import kind " + imp.kind);
+ throw new Error('unknown/unsupported import kind ' + imp.kind);
}
}
});
@@ -1348,7 +1472,7 @@ class WasmModuleBuilder {
// Add functions declarations
if (wasm.functions.length > 0) {
- if (debug) print("emitting function decls @ " + binary.length);
+ if (debug) print('emitting function decls @ ' + binary.length);
binary.emit_section(kFunctionSectionCode, section => {
section.emit_u32v(wasm.functions.length);
for (let func of wasm.functions) {
@@ -1359,7 +1483,7 @@ class WasmModuleBuilder {
// Add table section
if (wasm.tables.length > 0) {
- if (debug) print ("emitting tables @ " + binary.length);
+ if (debug) print('emitting tables @ ' + binary.length);
binary.emit_section(kTableSectionCode, section => {
section.emit_u32v(wasm.tables.length);
for (let table of wasm.tables) {
@@ -1367,13 +1491,18 @@ class WasmModuleBuilder {
section.emit_u8(table.has_max);
section.emit_u32v(table.initial_size);
if (table.has_max) section.emit_u32v(table.max_size);
+ if (table.has_init) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(table.init_func_index);
+ section.emit_u8(kExprEnd);
+ }
}
});
}
// Add memory section
if (wasm.memory !== undefined) {
- if (debug) print("emitting memory @ " + binary.length);
+ if (debug) print('emitting memory @ ' + binary.length);
binary.emit_section(kMemorySectionCode, section => {
section.emit_u8(1); // one memory entry
const has_max = wasm.memory.max !== undefined;
@@ -1398,7 +1527,7 @@ class WasmModuleBuilder {
// Add event section.
if (wasm.exceptions.length > 0) {
- if (debug) print("emitting events @ " + binary.length);
+ if (debug) print('emitting events @ ' + binary.length);
binary.emit_section(kExceptionSectionCode, section => {
section.emit_u32v(wasm.exceptions.length);
for (let type_index of wasm.exceptions) {
@@ -1410,55 +1539,55 @@ class WasmModuleBuilder {
// Add global section.
if (wasm.globals.length > 0) {
- if (debug) print ("emitting globals @ " + binary.length);
+ if (debug) print('emitting globals @ ' + binary.length);
binary.emit_section(kGlobalSectionCode, section => {
section.emit_u32v(wasm.globals.length);
for (let global of wasm.globals) {
section.emit_type(global.type);
section.emit_u8(global.mutable);
- if ((typeof global.init_index) == "undefined") {
+ if ((typeof global.init_index) == 'undefined') {
// Emit a constant initializer.
switch (global.type) {
- case kWasmI32:
- section.emit_u8(kExprI32Const);
- section.emit_u32v(global.init);
- break;
- case kWasmI64:
- section.emit_u8(kExprI64Const);
- section.emit_u64v(global.init);
- break;
- case kWasmF32:
- section.emit_bytes(wasmF32Const(global.init));
- break;
- case kWasmF64:
- section.emit_bytes(wasmF64Const(global.init));
- break;
- case kWasmS128:
- section.emit_bytes(wasmS128Const(global.init));
- break;
- case kWasmExternRef:
- section.emit_u8(kExprRefNull);
- section.emit_u8(kWasmExternRef);
- assertEquals(global.function_index, undefined);
- break;
- case kWasmAnyFunc:
- if (global.function_index !== undefined) {
- section.emit_u8(kExprRefFunc);
- section.emit_u32v(global.function_index);
- } else {
- section.emit_u8(kExprRefNull);
- section.emit_u8(kWasmAnyFunc);
- }
- break;
- default:
- if (global.function_index !== undefined) {
- section.emit_u8(kExprRefFunc);
- section.emit_u32v(global.function_index);
- } else {
+ case kWasmI32:
+ section.emit_u8(kExprI32Const);
+ section.emit_u32v(global.init);
+ break;
+ case kWasmI64:
+ section.emit_u8(kExprI64Const);
+ section.emit_u64v(global.init);
+ break;
+ case kWasmF32:
+ section.emit_bytes(wasmF32Const(global.init));
+ break;
+ case kWasmF64:
+ section.emit_bytes(wasmF64Const(global.init));
+ break;
+ case kWasmS128:
+ section.emit_bytes(wasmS128Const(global.init));
+ break;
+ case kWasmExternRef:
section.emit_u8(kExprRefNull);
- section.emit_u32v(global.type.index);
- }
- break;
+ section.emit_u8(kWasmExternRef);
+ assertEquals(global.function_index, undefined);
+ break;
+ case kWasmAnyFunc:
+ if (global.function_index !== undefined) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(global.function_index);
+ } else {
+ section.emit_u8(kExprRefNull);
+ section.emit_u8(kWasmAnyFunc);
+ }
+ break;
+ default:
+ if (global.function_index !== undefined) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(global.function_index);
+ } else {
+ section.emit_u8(kExprRefNull);
+ section.emit_u32v(global.type.index);
+ }
+ break;
}
} else {
// Emit a global-index initializer.
@@ -1474,7 +1603,7 @@ class WasmModuleBuilder {
var mem_export = (wasm.memory !== undefined && wasm.memory.exported);
var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
if (exports_count > 0) {
- if (debug) print("emitting exports @ " + binary.length);
+ if (debug) print('emitting exports @ ' + binary.length);
binary.emit_section(kExportSectionCode, section => {
section.emit_u32v(exports_count);
for (let exp of wasm.exports) {
@@ -1483,7 +1612,7 @@ class WasmModuleBuilder {
section.emit_u32v(exp.index);
}
if (mem_export) {
- section.emit_string("memory");
+ section.emit_string('memory');
section.emit_u8(kExternalMemory);
section.emit_u8(0);
}
@@ -1492,7 +1621,7 @@ class WasmModuleBuilder {
// Add start function section.
if (wasm.start_index !== undefined) {
- if (debug) print("emitting start function @ " + binary.length);
+ if (debug) print('emitting start function @ ' + binary.length);
binary.emit_section(kStartSectionCode, section => {
section.emit_u32v(wasm.start_index);
});
@@ -1500,7 +1629,7 @@ class WasmModuleBuilder {
// Add element segments
if (wasm.element_segments.length > 0) {
- if (debug) print("emitting element segments @ " + binary.length);
+ if (debug) print('emitting element segments @ ' + binary.length);
binary.emit_section(kElementSectionCode, section => {
var inits = wasm.element_segments;
section.emit_u32v(inits.length);
@@ -1570,7 +1699,7 @@ class WasmModuleBuilder {
// If there are compilation hints add a custom section 'compilationHints'
// after the function section and before the code section.
if (wasm.compilation_hints.length > 0) {
- if (debug) print("emitting compilation hints @ " + binary.length);
+ if (debug) print('emitting compilation hints @ ' + binary.length);
// Build custom section payload.
let payloadBinary = new Binary();
let implicit_compilation_hints_count = wasm.functions.length;
@@ -1585,18 +1714,18 @@ class WasmModuleBuilder {
for (let i = 0; i < implicit_compilation_hints_count; i++) {
let index = wasm.num_imported_funcs + i;
var hintByte;
- if(index in wasm.compilation_hints) {
+ if (index in wasm.compilation_hints) {
let hint = wasm.compilation_hints[index];
- hintByte = hint.strategy | (hint.baselineTier << 2) |
- (hint.topTier << 4);
- } else{
+ hintByte =
+ hint.strategy | (hint.baselineTier << 2) | (hint.topTier << 4);
+ } else {
hintByte = defaultHintByte;
}
payloadBinary.emit_u8(hintByte);
}
// Finalize as custom section.
- let name = "compilationHints";
+ let name = 'compilationHints';
let bytes = this.createCustomSection(name, payloadBinary.trunc_buffer());
binary.emit_bytes(bytes);
}
@@ -1604,7 +1733,7 @@ class WasmModuleBuilder {
// Add function bodies.
if (wasm.functions.length > 0) {
// emit function bodies
- if (debug) print("emitting code @ " + binary.length);
+ if (debug) print('emitting code @ ' + binary.length);
let section_length = 0;
binary.emit_section(kCodeSectionCode, section => {
section.emit_u32v(wasm.functions.length);
@@ -1633,7 +1762,7 @@ class WasmModuleBuilder {
// Add data segments.
if (wasm.data_segments.length > 0) {
- if (debug) print("emitting data segments @ " + binary.length);
+ if (debug) print('emitting data segments @ ' + binary.length);
binary.emit_section(kDataSectionCode, section => {
section.emit_u32v(wasm.data_segments.length);
for (let seg of wasm.data_segments) {
@@ -1660,7 +1789,7 @@ class WasmModuleBuilder {
// Add any explicitly added sections
for (let exp of wasm.explicit) {
- if (debug) print("emitting explicit @ " + binary.length);
+ if (debug) print('emitting explicit @ ' + binary.length);
binary.emit_bytes(exp);
}
@@ -1703,7 +1832,7 @@ class WasmModuleBuilder {
name_section.emit_u32v(func.numLocalNames());
let name_index = 0;
for (let i = 0; i < func.local_names.length; ++i) {
- if (typeof func.local_names[i] == "string") {
+ if (typeof func.local_names[i] == 'string') {
name_section.emit_u32v(name_index);
name_section.emit_string(func.local_names[i]);
name_index++;
@@ -1772,8 +1901,8 @@ function wasmF64Const(f) {
// Write in little-endian order at offset 0.
data_view.setFloat64(0, f, true);
return [
- kExprF64Const, byte_view[0], byte_view[1], byte_view[2],
- byte_view[3], byte_view[4], byte_view[5], byte_view[6], byte_view[7]
+ kExprF64Const, byte_view[0], byte_view[1], byte_view[2], byte_view[3],
+ byte_view[4], byte_view[5], byte_view[6], byte_view[7]
];
}
diff --git a/deps/v8/test/mkgrokdump/BUILD.gn b/deps/v8/test/mkgrokdump/BUILD.gn
index 67f8f98e7aa..1b06b87f972 100644
--- a/deps/v8/test/mkgrokdump/BUILD.gn
+++ b/deps/v8/test/mkgrokdump/BUILD.gn
@@ -19,7 +19,6 @@ v8_executable("mkgrokdump") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index f31091f3d06..8cae693b583 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -578,12 +578,17 @@
'built-ins/String/prototype/at/*': [FAIL],
'built-ins/TypedArray/prototype/at/*': [FAIL],
- # Temporarily disabled until upstream tests are changed to use /d
- 'built-ins/RegExp/match-indices/*': [FAIL],
+ # http://crbug/v8/11530
+ 'built-ins/Function/internals/Call/class-ctor-realm': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=11411
- 'intl402/DateTimeFormat/prototype/formatRange/date-same-returns-single-date': [FAIL],
- 'intl402/DateTimeFormat/prototype/formatRangeToParts/date-same-returns-single-date': [FAIL],
+ # http://crbug/v8/11531
+ 'built-ins/RegExp/prototype/flags/get-order': [FAIL],
+
+ # http://crbug/v8/11532
+ 'language/expressions/object/dstr/object-rest-proxy-gopd-not-called-on-excluded-keys': [FAIL],
+
+ # http://crbug/v8/11533
+ 'language/statements/class/subclass/default-constructor-spread-override': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -734,8 +739,6 @@
'built-ins/ArrayBuffer/length-is-too-large-throws': [SKIP],
'built-ins/SharedArrayBuffer/allocation-limit': [SKIP],
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'intl402/DateTimeFormat/timezone-invalid' : [SKIP],
}], # asan == True or msan == True or tsan == True
['system == android', {
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 24e02607d62..42aeac4723e 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -45,7 +45,6 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
- 'String.prototype.replaceAll': '--harmony_string_replaceall',
'Symbol.prototype.description': '--harmony-symbol-description',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
@@ -55,8 +54,6 @@ FEATURE_FLAGS = {
'regexp-match-indices': '--harmony-regexp-match-indices',
# https://github.com/tc39/test262/pull/2395
'regexp-named-groups': '--harmony-regexp-match-indices',
- 'logical-assignment-operators': '--harmony-logical-assignment',
- 'Atomics.waitAsync': '--harmony-atomics-waitasync',
}
SKIPPED_FEATURES = set([])
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 1940dfa77e6..43858603dcb 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -10,7 +10,7 @@ if (is_fuchsia) {
cr_fuchsia_package("v8_unittests_pkg") {
testonly = true
binary = ":unittests"
- manifest = "//build/config/fuchsia/tests-with-exec.cmx"
+ manifest = "../../gni/v8.cmx"
package_name_override = "v8_unittests"
}
@@ -92,6 +92,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/cross-thread-persistent-unittest.cc",
"heap/cppgc/custom-spaces-unittest.cc",
"heap/cppgc/ephemeron-pair-unittest.cc",
+ "heap/cppgc/explicit-management-unittest.cc",
"heap/cppgc/finalizer-trait-unittest.cc",
"heap/cppgc/free-list-unittest.cc",
"heap/cppgc/garbage-collected-unittest.cc",
@@ -193,7 +194,6 @@ v8_source_set("unittests_sources") {
sources = [
"../../test/common/assembler-tester.h",
- "../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"api/access-check-unittest.cc",
@@ -226,6 +226,8 @@ v8_source_set("unittests_sources") {
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
"base/vlq-base64-unittest.cc",
+ "base/vlq-unittest.cc",
+ "codegen/aligned-slot-allocator-unittest.cc",
"codegen/code-stub-assembler-unittest.cc",
"codegen/code-stub-assembler-unittest.h",
"codegen/register-configuration-unittest.cc",
@@ -246,16 +248,17 @@ v8_source_set("unittests_sources") {
"compiler/constant-folding-reducer-unittest.cc",
"compiler/control-equivalence-unittest.cc",
"compiler/control-flow-optimizer-unittest.cc",
+ "compiler/csa-load-elimination-unittest.cc",
"compiler/dead-code-elimination-unittest.cc",
"compiler/decompression-optimizer-unittest.cc",
"compiler/diamond-unittest.cc",
"compiler/effect-control-linearizer-unittest.cc",
+ "compiler/frame-unittest.cc",
"compiler/graph-reducer-unittest.cc",
"compiler/graph-reducer-unittest.h",
"compiler/graph-trimmer-unittest.cc",
"compiler/graph-unittest.cc",
"compiler/graph-unittest.h",
- "compiler/int64-lowering-unittest.cc",
"compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
"compiler/js-intrinsic-lowering-unittest.cc",
@@ -308,7 +311,6 @@ v8_source_set("unittests_sources") {
"heap/heap-utils.cc",
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
- "heap/item-parallel-job-unittest.cc",
"heap/list-unittest.cc",
"heap/local-factory-unittest.cc",
"heap/local-heap-unittest.cc",
@@ -350,7 +352,6 @@ v8_source_set("unittests_sources") {
"logging/counters-unittest.cc",
"numbers/bigint-unittest.cc",
"numbers/conversions-unittest.cc",
- "objects/backing-store-unittest.cc",
"objects/object-unittest.cc",
"objects/osr-optimized-code-cache-unittest.cc",
"objects/value-serializer-unittest.cc",
@@ -381,21 +382,6 @@ v8_source_set("unittests_sources") {
"utils/locked-queue-unittest.cc",
"utils/utils-unittest.cc",
"utils/vector-unittest.cc",
- "wasm/control-transfer-unittest.cc",
- "wasm/decoder-unittest.cc",
- "wasm/function-body-decoder-unittest.cc",
- "wasm/leb-helper-unittest.cc",
- "wasm/loop-assignment-analysis-unittest.cc",
- "wasm/module-decoder-memory64-unittest.cc",
- "wasm/module-decoder-unittest.cc",
- "wasm/simd-shuffle-unittest.cc",
- "wasm/streaming-decoder-unittest.cc",
- "wasm/subtyping-unittest.cc",
- "wasm/wasm-code-manager-unittest.cc",
- "wasm/wasm-compiler-unittest.cc",
- "wasm/wasm-macro-gen-unittest.cc",
- "wasm/wasm-module-builder-unittest.cc",
- "wasm/wasm-module-sourcemap-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
@@ -403,8 +389,27 @@ v8_source_set("unittests_sources") {
if (v8_enable_webassembly) {
sources += [
+ "../../test/common/wasm/wasm-macro-gen.h",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
+ "compiler/int64-lowering-unittest.cc",
+ "objects/wasm-backing-store-unittest.cc",
+ "wasm/control-transfer-unittest.cc",
+ "wasm/decoder-unittest.cc",
+ "wasm/function-body-decoder-unittest.cc",
+ "wasm/leb-helper-unittest.cc",
+ "wasm/liftoff-register-unittests.cc",
+ "wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/module-decoder-memory64-unittest.cc",
+ "wasm/module-decoder-unittest.cc",
+ "wasm/simd-shuffle-unittest.cc",
+ "wasm/streaming-decoder-unittest.cc",
+ "wasm/subtyping-unittest.cc",
+ "wasm/wasm-code-manager-unittest.cc",
+ "wasm/wasm-compiler-unittest.cc",
+ "wasm/wasm-macro-gen-unittest.cc",
+ "wasm/wasm-module-builder-unittest.cc",
+ "wasm/wasm-module-sourcemap-unittest.cc",
]
}
@@ -450,8 +455,10 @@ v8_source_set("unittests_sources") {
sources += [
"assembler/turbo-assembler-x64-unittest.cc",
"compiler/x64/instruction-selector-x64-unittest.cc",
- "wasm/trap-handler-x64-unittest.cc",
]
+ if (v8_enable_webassembly) {
+ sources += [ "wasm/trap-handler-x64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
"assembler/turbo-assembler-ppc-unittest.cc",
@@ -464,11 +471,11 @@ v8_source_set("unittests_sources") {
]
}
- if (is_posix) {
+ if (is_posix && v8_enable_webassembly) {
sources += [ "wasm/trap-handler-posix-unittest.cc" ]
}
- if (is_win) {
+ if (is_win && v8_enable_webassembly) {
sources += [ "wasm/trap-handler-win-unittest.cc" ]
}
@@ -484,14 +491,16 @@ v8_source_set("unittests_sources") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../..:v8_shared_internal_headers",
- "../..:v8_wrappers",
- "../..:wasm_test_common",
"../../third_party/inspector_protocol:crdtp_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
]
+ if (v8_enable_webassembly) {
+ deps += [ "../..:wasm_test_common" ]
+ }
+
if (is_win) {
# This warning is benignly triggered by the U16 and U32 macros in
# bytecode-utils.h.
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index cdcce68efd8..1216100b23a 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "include/v8.h"
+#include "src/debug/debug.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index 1146bd5d830..bbfcfd984b0 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -230,11 +230,26 @@ void operator<<(std::ostream& str, TestEnum6 val) {
TEST(LoggingDeathTest, OutputEnumWithOutputOperator) {
ASSERT_DEATH_IF_SUPPORTED(
([&] { CHECK_EQ(TEST_A, TEST_B); })(),
- FailureMessage("Check failed: TEST_A == TEST_B", "A", "B"));
+ FailureMessage("Check failed: TEST_A == TEST_B", "A (0)", "B (1)"));
ASSERT_DEATH_IF_SUPPORTED(
([&] { CHECK_GE(TestEnum6::TEST_C, TestEnum6::TEST_D); })(),
FailureMessage("Check failed: TestEnum6::TEST_C >= TestEnum6::TEST_D",
- "C", "D"));
+ "C (0)", "D (1)"));
+}
+
+enum TestEnum7 : uint8_t { A = 2, B = 7 };
+enum class TestEnum8 : int8_t { A, B };
+
+TEST(LoggingDeathTest, OutputSingleCharEnum) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ(TestEnum7::A, TestEnum7::B); })(),
+ FailureMessage("Check failed: TestEnum7::A == TestEnum7::B", "2", "7"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GT(TestEnum7::A, TestEnum7::B); })(),
+ FailureMessage("Check failed: TestEnum7::A > TestEnum7::B", "2", "7"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GE(TestEnum8::A, TestEnum8::B); })(),
+ FailureMessage("Check failed: TestEnum8::A >= TestEnum8::B", "0", "1"));
}
TEST(LoggingDeathTest, OutputLongValues) {
@@ -328,6 +343,12 @@ TEST(LoggingTest, LogFunctionPointers) {
}
#endif // defined(DEBUG)
+TEST(LoggingDeathTest, CheckChars) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ('a', 'b'); })(),
+ FailureMessage("Check failed: 'a' == 'b'", "'97'", "'98'"));
+}
+
} // namespace logging_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/vlq-unittest.cc b/deps/v8/test/unittests/base/vlq-unittest.cc
new file mode 100644
index 00000000000..647873410a4
--- /dev/null
+++ b/deps/v8/test/unittests/base/vlq-unittest.cc
@@ -0,0 +1,123 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/vlq.h"
+
+#include <cmath>
+#include <limits>
+
+#include "src/base/memory.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+int ExpectedBytesUsed(int64_t value, bool is_signed) {
+ uint64_t bits = value;
+ if (is_signed) {
+ bits = (value < 0 ? -value : value) << 1;
+ }
+ int num_bits = 0;
+ while (bits != 0) {
+ num_bits++;
+ bits >>= 1;
+ }
+ return std::max(1, static_cast<int>(ceil(static_cast<float>(num_bits) / 7)));
+}
+
+void TestVLQUnsignedEquals(uint32_t value) {
+ std::vector<byte> buffer;
+ VLQEncodeUnsigned(&buffer, value);
+ byte* data_start = buffer.data();
+ int index = 0;
+ int expected_bytes_used = ExpectedBytesUsed(value, false);
+ EXPECT_EQ(buffer.size(), static_cast<size_t>(expected_bytes_used));
+ EXPECT_EQ(value, VLQDecodeUnsigned(data_start, &index));
+ EXPECT_EQ(index, expected_bytes_used);
+}
+
+void TestVLQEquals(int32_t value) {
+ std::vector<byte> buffer;
+ VLQEncode(&buffer, value);
+ byte* data_start = buffer.data();
+ int index = 0;
+ int expected_bytes_used = ExpectedBytesUsed(value, true);
+ EXPECT_EQ(buffer.size(), static_cast<size_t>(expected_bytes_used));
+ EXPECT_EQ(value, VLQDecode(data_start, &index));
+ EXPECT_EQ(index, expected_bytes_used);
+}
+
+TEST(VLQ, Unsigned) {
+ TestVLQUnsignedEquals(0);
+ TestVLQUnsignedEquals(1);
+ TestVLQUnsignedEquals(63);
+ TestVLQUnsignedEquals(64);
+ TestVLQUnsignedEquals(127);
+ TestVLQUnsignedEquals(255);
+ TestVLQUnsignedEquals(256);
+}
+
+TEST(VLQ, Positive) {
+ TestVLQEquals(0);
+ TestVLQEquals(1);
+ TestVLQEquals(63);
+ TestVLQEquals(64);
+ TestVLQEquals(127);
+ TestVLQEquals(255);
+ TestVLQEquals(256);
+}
+
+TEST(VLQ, Negative) {
+ TestVLQEquals(-1);
+ TestVLQEquals(-63);
+ TestVLQEquals(-64);
+ TestVLQEquals(-127);
+ TestVLQEquals(-255);
+ TestVLQEquals(-256);
+}
+
+TEST(VLQ, LimitsUnsigned) {
+ TestVLQEquals(std::numeric_limits<uint8_t>::max());
+ TestVLQEquals(std::numeric_limits<uint8_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<uint8_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<uint16_t>::max());
+ TestVLQEquals(std::numeric_limits<uint16_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<uint16_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<uint32_t>::max());
+ TestVLQEquals(std::numeric_limits<uint32_t>::max() - 1);
+}
+
+TEST(VLQ, LimitsSigned) {
+ TestVLQEquals(std::numeric_limits<int8_t>::max());
+ TestVLQEquals(std::numeric_limits<int8_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::max());
+ TestVLQEquals(std::numeric_limits<int16_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<int32_t>::max());
+ TestVLQEquals(std::numeric_limits<int32_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::min());
+ TestVLQEquals(std::numeric_limits<int8_t>::min() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::min() + 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::min());
+ TestVLQEquals(std::numeric_limits<int16_t>::min() - 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::min() + 1);
+ // int32_t::min() is not supported.
+ TestVLQEquals(std::numeric_limits<int32_t>::min() + 1);
+}
+
+TEST(VLQ, Random) {
+ static constexpr int RANDOM_RUNS = 50;
+
+ base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
+ for (int i = 0; i < RANDOM_RUNS; ++i) {
+ TestVLQUnsignedEquals(rng.NextInt(std::numeric_limits<int32_t>::max()));
+ }
+ for (int i = 0; i < RANDOM_RUNS; ++i) {
+ TestVLQEquals(rng.NextInt());
+ }
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc b/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc
new file mode 100644
index 00000000000..3b04f7888ac
--- /dev/null
+++ b/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/aligned-slot-allocator.h"
+
+#include "src/base/bits.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+class AlignedSlotAllocatorUnitTest : public ::testing::Test {
+ public:
+ AlignedSlotAllocatorUnitTest() = default;
+ ~AlignedSlotAllocatorUnitTest() override = default;
+
+ // Helper method to test AlignedSlotAllocator::Allocate.
+ void Allocate(int size, int expected) {
+ int next = allocator_.NextSlot(size);
+ int result = allocator_.Allocate(size);
+ EXPECT_EQ(next, result); // NextSlot/Allocate are consistent.
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(0, result & (size - 1)); // result is aligned to size.
+ int slot_end = result + static_cast<int>(base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(size)));
+ EXPECT_LE(slot_end, allocator_.Size()); // allocator Size is beyond slot.
+ }
+
+ // Helper method to test AlignedSlotAllocator::AllocateUnaligned.
+ void AllocateUnaligned(int size, int expected, int expected1, int expected2,
+ int expected4) {
+ int size_before = allocator_.Size();
+ int result = allocator_.AllocateUnaligned(size);
+ EXPECT_EQ(size_before, result); // AllocateUnaligned/Size are consistent.
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(result + size, allocator_.Size());
+ EXPECT_EQ(expected1, allocator_.NextSlot(1));
+ EXPECT_EQ(expected2, allocator_.NextSlot(2));
+ EXPECT_EQ(expected4, allocator_.NextSlot(4));
+ }
+
+ AlignedSlotAllocator allocator_;
+};
+
+TEST_F(AlignedSlotAllocatorUnitTest, NumSlotsForWidth) {
+ constexpr int kSlotBytes = AlignedSlotAllocator::kSlotSize;
+ for (int slot_size = 1; slot_size <= 4 * kSlotBytes; ++slot_size) {
+ EXPECT_EQ(AlignedSlotAllocator::NumSlotsForWidth(slot_size),
+ (slot_size + kSlotBytes - 1) / kSlotBytes);
+ }
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate1) {
+ Allocate(1, 0);
+ EXPECT_EQ(2, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 1);
+ EXPECT_EQ(2, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 2);
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 3);
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ // Make sure we use 1-fragments.
+ Allocate(1, 4);
+ Allocate(2, 6);
+ Allocate(1, 5);
+
+ // Make sure we use 2-fragments.
+ Allocate(2, 8);
+ Allocate(1, 10);
+ Allocate(1, 11);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate2) {
+ Allocate(2, 0);
+ EXPECT_EQ(2, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(2, 2);
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ // Make sure we use 2-fragments.
+ Allocate(1, 4);
+ Allocate(2, 6);
+ Allocate(2, 8);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate4) {
+ Allocate(4, 0);
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+
+ Allocate(1, 4);
+ Allocate(4, 8);
+
+ Allocate(2, 6);
+ Allocate(4, 12);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, AllocateUnaligned) {
+ AllocateUnaligned(1, 0, 1, 2, 4);
+ AllocateUnaligned(1, 1, 2, 2, 4);
+
+ Allocate(1, 2);
+
+ AllocateUnaligned(2, 3, 5, 6, 8);
+
+ // Advance to leave 1- and 2- fragments below Size.
+ Allocate(4, 8);
+
+ // AllocateUnaligned should allocate at the end, and clear fragments.
+ AllocateUnaligned(0, 12, 12, 12, 12);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, LargeAllocateUnaligned) {
+ AllocateUnaligned(11, 0, 11, 12, 12);
+ AllocateUnaligned(11, 11, 22, 22, 24);
+ AllocateUnaligned(13, 22, 35, 36, 36);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Size) {
+ allocator_.Allocate(1);
+ EXPECT_EQ(1, allocator_.Size());
+ // Allocate 2, leaving a fragment at 1. Size should be at 4.
+ allocator_.Allocate(2);
+ EXPECT_EQ(4, allocator_.Size());
+ // Allocate should consume fragment.
+ EXPECT_EQ(1, allocator_.Allocate(1));
+ // Size should still be 4.
+ EXPECT_EQ(4, allocator_.Size());
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Align) {
+ EXPECT_EQ(0, allocator_.Align(1));
+ EXPECT_EQ(0, allocator_.Size());
+
+ // Allocate 1 to become misaligned.
+ Allocate(1, 0);
+
+ // 4-align.
+ EXPECT_EQ(3, allocator_.Align(4));
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+ EXPECT_EQ(4, allocator_.Size());
+
+ // Allocate 2 to become misaligned.
+ Allocate(2, 4);
+
+ // 4-align.
+ EXPECT_EQ(2, allocator_.Align(4));
+ EXPECT_EQ(8, allocator_.NextSlot(1));
+ EXPECT_EQ(8, allocator_.NextSlot(2));
+ EXPECT_EQ(8, allocator_.NextSlot(4));
+ EXPECT_EQ(8, allocator_.Size());
+
+ // No change when we're already aligned.
+ EXPECT_EQ(0, allocator_.Align(2));
+ EXPECT_EQ(8, allocator_.NextSlot(1));
+ EXPECT_EQ(8, allocator_.NextSlot(2));
+ EXPECT_EQ(8, allocator_.NextSlot(4));
+ EXPECT_EQ(8, allocator_.Size());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 4aa4aaba2b4..6137b3425a8 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -13,7 +13,6 @@
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
-using v8::internal::compiler::Node;
namespace c = v8::internal::compiler;
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 6c9c6321ccd..7a865c6d7f1 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/globals.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
@@ -2165,10 +2166,7 @@ static const SIMDMulDPInst kSIMDMulDPInstructions[] = {
kArm64I32x4Mla, kArm64I32x4Mls, MachineType::Simd128()},
{"I16x8Mul", &MachineOperatorBuilder::I16x8Mul,
&MachineOperatorBuilder::I16x8Add, &MachineOperatorBuilder::I16x8Sub,
- kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()},
- {"I8x16Mul", &MachineOperatorBuilder::I8x16Mul,
- &MachineOperatorBuilder::I8x16Add, &MachineOperatorBuilder::I8x16Sub,
- kArm64I8x16Mla, kArm64I8x16Mls, MachineType::Simd128()}};
+ kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()}};
using InstructionSelectorSIMDDPWithSIMDMulTest =
InstructionSelectorTestWithParam<SIMDMulDPInst>;
@@ -2220,6 +2218,204 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorSIMDDPWithSIMDMulTest,
::testing::ValuesIn(kSIMDMulDPInstructions));
+struct SIMDMulDupInst {
+ const uint8_t shuffle[16];
+ int32_t lane;
+ int shuffle_input_index;
+};
+
+const SIMDMulDupInst kSIMDF32x4MulDuplInstructions[] = {
+ {
+ {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
+ 0,
+ 0,
+ },
+ {
+ {4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7},
+ 1,
+ 0,
+ },
+ {
+ {8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11},
+ 2,
+ 0,
+ },
+ {
+ {12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15},
+ 3,
+ 0,
+ },
+ {
+ {16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19},
+ 0,
+ 1,
+ },
+ {
+ {20, 21, 22, 23, 20, 21, 22, 23, 20, 21, 22, 23, 20, 21, 22, 23},
+ 1,
+ 1,
+ },
+ {
+ {24, 25, 26, 27, 24, 25, 26, 27, 24, 25, 26, 27, 24, 25, 26, 27},
+ 2,
+ 1,
+ },
+ {
+ {28, 29, 30, 31, 28, 29, 30, 31, 28, 29, 30, 31, 28, 29, 30, 31},
+ 3,
+ 1,
+ },
+};
+
+using InstructionSelectorSimdF32x4MulWithDupTest =
+ InstructionSelectorTestWithParam<SIMDMulDupInst>;
+
+TEST_P(InstructionSelectorSimdF32x4MulWithDupTest, MulWithDup) {
+ const SIMDMulDupInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+
+ // Multiplication operator should be commutative, so test shuffle op as lhs.
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), shuffle, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSimdF32x4MulWithDupTest,
+ ::testing::ValuesIn(kSIMDF32x4MulDuplInstructions));
+
+TEST_F(InstructionSelectorTest, SimdF32x4MulWithDupNegativeTest) {
+ const MachineType type = MachineType::Simd128();
+ // Check that optimization does not match when the shuffle is not a f32x4.dup.
+ const uint8_t mask[kSimd128Size] = {0};
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode((m.machine()->I8x16Shuffle(mask)), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // The shuffle is a i8x16.dup of lane 0.
+ EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kArm64F32x4Mul, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
+const SIMDMulDupInst kSIMDF64x2MulDuplInstructions[] = {
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7},
+ 0,
+ 0,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15},
+ 1,
+ 0,
+ },
+ {
+ {16, 17, 18, 19, 20, 21, 22, 23, 16, 17, 18, 19, 20, 21, 22, 23},
+ 0,
+ 1,
+ },
+ {
+ {24, 25, 26, 27, 28, 29, 30, 31, 24, 25, 26, 27, 28, 29, 30, 31},
+ 1,
+ 1,
+ },
+};
+
+using InstructionSelectorSimdF64x2MulWithDupTest =
+ InstructionSelectorTestWithParam<SIMDMulDupInst>;
+
+TEST_P(InstructionSelectorSimdF64x2MulWithDupTest, MulWithDup) {
+ const SIMDMulDupInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+
+ // Multiplication operator should be commutative, so test shuffle op as lhs.
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), shuffle, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSimdF64x2MulWithDupTest,
+ ::testing::ValuesIn(kSIMDF64x2MulDuplInstructions));
+
+TEST_F(InstructionSelectorTest, SimdF64x2MulWithDupNegativeTest) {
+ const MachineType type = MachineType::Simd128();
+ // Check that optimization does not match when the shuffle is not a f64x2.dup.
+ const uint8_t mask[kSimd128Size] = {0};
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode((m.machine()->I8x16Shuffle(mask)), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // The shuffle is a i8x16.dup of lane 0.
+ EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kArm64F64x2Mul, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
@@ -2564,6 +2760,32 @@ TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
::testing::ValuesIn(kFPCmpInstructions));
+TEST_F(InstructionSelectorTest, Float32SelectWithRegisters) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* cond = m.Int32Constant(1);
+ m.Return(m.Float32Select(cond, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_select, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+TEST_F(InstructionSelectorTest, Float64SelectWithRegisters) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* cond = m.Int32Constant(1);
+ m.Return(m.Float64Select(cond, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_select, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
// -----------------------------------------------------------------------------
// Conversions.
@@ -4543,6 +4765,42 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32Abd) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const fsub = m.Float32Sub(p0, p1);
+ Node* const fabs = m.Float32Abs(fsub);
+ m.Return(fabs);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Abd, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(fabs), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Abd) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const fsub = m.Float64Sub(p0, p1);
+ Node* const fabs = m.Float64Abs(fsub);
+ m.Return(fabs);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Abd, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(fabs), s.ToVreg(s[0]->Output()));
+}
+
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 0c0214ce43d..60ba115713c 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -90,7 +90,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
if (input->IsImmediate()) {
auto imm = ImmediateOperand::cast(input);
- if (imm->type() == ImmediateOperand::INDEXED) {
+ if (imm->type() == ImmediateOperand::INDEXED_IMM) {
int index = imm->indexed_value();
s.immediates_.insert(
std::make_pair(index, sequence.GetImmediate(imm)));
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
index 05c4d04eac5..203daca69fb 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
@@ -267,8 +267,10 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
} else {
EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
auto imm = ImmediateOperand::cast(operand);
- if (imm->type() == ImmediateOperand::INLINE) {
- return Constant(imm->inline_value());
+ if (imm->type() == ImmediateOperand::INLINE_INT32) {
+ return Constant(imm->inline_int32_value());
+ } else if (imm->type() == ImmediateOperand::INLINE_INT64) {
+ return Constant(imm->inline_int64_value());
}
i = immediates_.find(imm->indexed_value());
EXPECT_EQ(imm->indexed_value(), i->first);
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index c66685b710f..3cfb050c798 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -344,7 +344,7 @@ InstructionOperand* InstructionSequenceTest::ConvertInputs(
InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
- return ImmediateOperand(ImmediateOperand::INLINE, op.value_);
+ return ImmediateOperand(ImmediateOperand::INLINE_INT32, op.value_);
}
CHECK_NE(op.vreg_.value_, kNoValue);
switch (op.type_) {
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 97ddd8ee526..cacff096525 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
-
#include "src/compiler/bytecode-analysis.h"
+
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
diff --git a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
new file mode 100644
index 00000000000..87bbdf40418
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/csa-load-elimination.h"
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CsaLoadEliminationTest : public GraphTest {
+ public:
+ CsaLoadEliminationTest()
+ : GraphTest(3),
+ simplified_(zone()),
+ machine_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, simplified(),
+ machine()),
+ reducer_(zone(), graph(), tick_counter(), broker()),
+ csa_(reducer(), jsgraph(), zone()),
+ mcr_(reducer(), jsgraph()) {
+ reducer()->AddReducer(&csa_);
+ reducer()->AddReducer(&mcr_);
+ }
+
+ ~CsaLoadEliminationTest() override = default;
+
+ protected:
+ JSGraph* jsgraph() { return &jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ GraphReducer* reducer() { return &reducer_; }
+ Node* param1() {
+ return graph()->NewNode(common()->Parameter(1), graph()->start());
+ }
+ Node* constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+ }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+ MachineOperatorBuilder machine_;
+ JSGraph jsgraph_;
+ GraphReducer reducer_;
+ CsaLoadElimination csa_;
+ MachineOperatorReducer mcr_;
+};
+
+#define SETUP_SIMPLE_TEST(store_type, load_type, value_) \
+ Node* object = graph()->NewNode(common()->Parameter(0), graph()->start()); \
+ Node* offset = graph()->NewNode(common()->Int32Constant(5)); \
+ Node* value = value_; \
+ Node* control = graph()->start(); \
+ \
+ ObjectAccess store_access(MachineType::store_type(), kNoWriteBarrier); \
+ ObjectAccess load_access(MachineType::load_type(), kNoWriteBarrier); \
+ \
+ Node* store = \
+ graph()->NewNode(simplified()->StoreToObject(store_access), object, \
+ offset, value, graph()->start(), control); \
+ \
+ Node* load = graph()->NewNode(simplified()->LoadFromObject(load_access), \
+ object, offset, store, control); \
+ \
+ Node* ret = graph()->NewNode(common()->Return(0), load, load, control); \
+ \
+ graph()->end()->InsertInput(zone(), 0, ret); \
+ \
+ reducer()->ReduceGraph();
+
+TEST_F(CsaLoadEliminationTest, Int32) {
+ SETUP_SIMPLE_TEST(Int32, Int32, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kParameter);
+}
+
+TEST_F(CsaLoadEliminationTest, Int64) {
+ SETUP_SIMPLE_TEST(Int64, Int64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kParameter);
+}
+
+TEST_F(CsaLoadEliminationTest, Int64_to_Int32) {
+ SETUP_SIMPLE_TEST(Int64, Int32, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kTruncateInt64ToInt32);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_to_Int16) {
+ SETUP_SIMPLE_TEST(Int16, Int16, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kWord32Sar);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_to_Uint8) {
+ SETUP_SIMPLE_TEST(Int16, Uint8, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kWord32And);
+}
+
+TEST_F(CsaLoadEliminationTest, Int8_to_Uint16) {
+ SETUP_SIMPLE_TEST(Int8, Uint16, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int8_to_Uint64) {
+ SETUP_SIMPLE_TEST(Int8, Uint64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int32_to_Int64) {
+ SETUP_SIMPLE_TEST(Int32, Int64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_constant) {
+ SETUP_SIMPLE_TEST(Int32, Int16, constant(0xfedcba98))
+
+ Int32Matcher m(ret->InputAt(0));
+
+ EXPECT_TRUE(m.HasResolvedValue());
+ EXPECT_EQ(m.ResolvedValue(), int32_t(0xffffba98));
+}
+
+TEST_F(CsaLoadEliminationTest, Uint8_constant) {
+ SETUP_SIMPLE_TEST(Int32, Uint8, constant(0xfedcba98))
+
+ Uint32Matcher m(ret->InputAt(0));
+
+ EXPECT_TRUE(m.HasResolvedValue());
+ EXPECT_EQ(m.ResolvedValue(), uint32_t(0x98));
+}
+
+#undef SETUP_SIMPLE_TEST
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/frame-unittest.cc b/deps/v8/test/unittests/compiler/frame-unittest.cc
new file mode 100644
index 00000000000..f74e4d34ecd
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/frame-unittest.cc
@@ -0,0 +1,242 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame.h"
+
+#include "src/codegen/aligned-slot-allocator.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+constexpr int kSlotSize = AlignedSlotAllocator::kSlotSize;
+
+constexpr int kFixed1 = 1;
+constexpr int kFixed3 = 3;
+} // namespace
+
+class FrameTest : public ::testing::Test {
+ public:
+ FrameTest() = default;
+ ~FrameTest() override = default;
+};
+
+TEST_F(FrameTest, Constructor) {
+ Frame frame(kFixed3);
+ EXPECT_EQ(kFixed3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, ReserveSpillSlots) {
+ Frame frame(kFixed3);
+ constexpr int kReserve2 = 2;
+
+ frame.ReserveSpillSlots(kReserve2);
+ EXPECT_EQ(kFixed3 + kReserve2, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(kReserve2, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, EnsureReturnSlots) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+ constexpr int kReturn5 = 5;
+ constexpr int kReturn2 = 2;
+
+ frame.EnsureReturnSlots(kReturn3);
+ EXPECT_EQ(kFixed3 + kReturn3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3, frame.GetReturnSlotCount());
+
+ // Returns should grow by 2 slots.
+ frame.EnsureReturnSlots(kReturn5);
+ EXPECT_EQ(kFixed3 + kReturn5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+
+ // Returns shouldn't grow.
+ frame.EnsureReturnSlots(kReturn2);
+ EXPECT_EQ(kFixed3 + kReturn5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSavedCalleeRegisterSlots) {
+ Frame frame(kFixed3);
+ constexpr int kFirstSlots = 2;
+ constexpr int kSecondSlots = 3;
+
+ frame.AllocateSavedCalleeRegisterSlots(kFirstSlots);
+ EXPECT_EQ(kFixed3 + kFirstSlots, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ frame.AllocateSavedCalleeRegisterSlots(kSecondSlots);
+ EXPECT_EQ(kFixed3 + kFirstSlots + kSecondSlots,
+ frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AlignSavedCalleeRegisterSlots) {
+ Frame frame(kFixed3);
+ constexpr int kSlots = 2; // An even number leaves the slots misaligned.
+
+ frame.AllocateSavedCalleeRegisterSlots(kSlots);
+
+ // Align, which should add 1 padding slot.
+ frame.AlignSavedCalleeRegisterSlots(2 * kSlotSize);
+ EXPECT_EQ(kFixed3 + kSlots + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(1, frame.GetSpillSlotCount()); // padding
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Align again, which should not add a padding slot.
+ frame.AlignSavedCalleeRegisterSlots(2 * kSlotSize);
+ EXPECT_EQ(kFixed3 + kSlots + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(1, frame.GetSpillSlotCount()); // padding
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a quad slot, which must add 3 padding slots. Frame returns the
+ // last index of the 4 slot allocation.
+ int end = kFixed1 + 3 + 4;
+ int slot = kFixed1 + 3 + 4 - 1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(4 * kSlotSize, 4 * kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a double slot, which should leave the first padding slot and
+ // take the last two slots of padding.
+ slot = kFixed1 + 1 + 2 - 1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot, which should take the last padding slot.
+ slot = kFixed1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAlignedWithReturns) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+ constexpr int kReturn5 = 5;
+
+ frame.EnsureReturnSlots(kReturn3);
+
+ // Allocate a double slot, which must add 1 padding slot. This should occupy
+ // slots 4 and 5, and AllocateSpillSlot returns the last slot index.
+ EXPECT_EQ(kFixed3 + 2, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+ EXPECT_EQ(kFixed3 + kReturn3 + 3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3, frame.GetReturnSlotCount());
+
+ frame.EnsureReturnSlots(kReturn5);
+
+ // Allocate a single slot, which should take the padding slot.
+ EXPECT_EQ(kFixed3, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + kReturn5 + 3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAndEndSpillArea) {
+ Frame frame(kFixed3);
+
+ // Allocate a double slot, which must add 1 padding slot.
+ EXPECT_EQ(kFixed3 + 2, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+
+ // Allocate an unaligned double slot. This should be at the end.
+ EXPECT_EQ(kFixed3 + 4, frame.AllocateSpillSlot(2 * kSlotSize));
+ EXPECT_EQ(kFixed3 + 5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(5, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot. This should not be the padding slot, since that
+ // area has been closed by the unaligned allocation.
+ EXPECT_EQ(kFixed3 + 5, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + 6, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(6, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotOverAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a 4-aligned double slot, which must add 3 padding slots. This
+ // also terminates the slot area. Returns the starting slot in this case.
+ EXPECT_EQ(kFixed1 + 4, frame.AllocateSpillSlot(2 * kSlotSize, 4 * kSlotSize));
+ EXPECT_EQ(kFixed1 + 5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(5, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot. This should not use any padding slot.
+ EXPECT_EQ(kFixed1 + 5, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed1 + 6, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(6, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotUnderAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a 1-aligned double slot. This also terminates the slot area.
+ EXPECT_EQ(kFixed1 + 1, frame.AllocateSpillSlot(2 * kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed1 + 2, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(2, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AlignFrame) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+
+ frame.EnsureReturnSlots(kReturn3);
+
+ // Allocate two single slots, which leaves spill slots not 2-aligned.
+ EXPECT_EQ(kFixed3, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + 1, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+
+ // Align to 2 slots. This should pad the spill and return slot areas.
+ frame.AlignFrame(2 * kSlotSize);
+
+ EXPECT_EQ(kFixed3 + 3 + kReturn3 + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3 + 1, frame.GetReturnSlotCount());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 1a153eff9a3..4478b360477 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -886,6 +886,50 @@ TEST_F(InstructionSelectorTest, SIMDSplatZero) {
}
}
+struct SwizzleConstants {
+ uint8_t shuffle[kSimd128Size];
+ bool omit_add;
+};
+
+static constexpr SwizzleConstants kSwizzleConstants[] = {
+ {
+ // all lanes < kSimd128Size
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ true,
+ },
+ {
+ // lanes that are >= kSimd128Size have top bit set
+ {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC,
+ 0xFD, 0xFE, 0xFF},
+ true,
+ },
+ {
+ {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27},
+ false,
+ },
+};
+
+using InstructionSelectorSIMDSwizzleConstantTest =
+ InstructionSelectorTestWithParam<SwizzleConstants>;
+
+TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) {
+ // Test optimization of swizzle with constant indices.
+ auto param = GetParam();
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* const c = m.S128Const(param.shuffle);
+ Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c);
+ m.Return(swizzle);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(kIA32I8x16Swizzle, s[1]->arch_opcode());
+ ASSERT_EQ(param.omit_add, s[1]->misc());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDSwizzleConstantTest,
+ ::testing::ValuesIn(kSwizzleConstants));
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index e6660b7823e..8eafd4fe15a 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -32,13 +32,15 @@ class Int64LoweringTest : public GraphTest {
Int64LoweringTest()
: GraphTest(),
machine_(zone(), MachineRepresentation::kWord32,
- MachineOperatorBuilder::Flag::kAllOptionalOps) {
+ MachineOperatorBuilder::Flag::kAllOptionalOps),
+ simplified_(zone()) {
value_[0] = 0x1234567890ABCDEF;
value_[1] = 0x1EDCBA098765432F;
value_[2] = 0x1133557799886644;
}
MachineOperatorBuilder* machine() { return &machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
@@ -46,7 +48,8 @@ class Int64LoweringTest : public GraphTest {
graph()->start(), graph()->start());
NodeProperties::MergeControlToEnd(graph(), common(), ret);
- Int64Lowering lowering(graph(), machine(), common(), zone(), signature);
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
+ signature);
lowering.LowerGraph();
}
@@ -64,7 +67,7 @@ class Int64LoweringTest : public GraphTest {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(rep);
- Int64Lowering lowering(graph(), machine(), common(), zone(),
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build(), std::move(special_case));
lowering.LowerGraph();
}
@@ -134,6 +137,7 @@ class Int64LoweringTest : public GraphTest {
private:
MachineOperatorBuilder machine_;
+ SimplifiedOperatorBuilder simplified_;
int64_t value_[3];
};
@@ -177,22 +181,64 @@ TEST_F(Int64LoweringTest, Int64Constant) {
start()));
#endif
-#define INT64_LOAD_LOWERING(kLoad) \
- int32_t base = 0x1234; \
- int32_t index = 0x5678; \
- \
- LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \
- Int32Constant(base), Int32Constant(index), \
- start(), start()), \
- MachineRepresentation::kWord64); \
- \
- Capture<Node*> high_word_load; \
+#define INT64_LOAD_LOWERING(kLoad, param, builder) \
+ int32_t base = 0x1234; \
+ int32_t index = 0x5678; \
+ \
+ LowerGraph(graph()->NewNode(builder()->kLoad(param), Int32Constant(base), \
+ Int32Constant(index), start(), start()), \
+ MachineRepresentation::kWord64); \
+ \
+ Capture<Node*> high_word_load; \
LOAD_VERIFY(kLoad)
-TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); }
+TEST_F(Int64LoweringTest, Int64Load) {
+ INT64_LOAD_LOWERING(Load, MachineType::Int64(), machine);
+}
TEST_F(Int64LoweringTest, UnalignedInt64Load) {
- INT64_LOAD_LOWERING(UnalignedLoad);
+ INT64_LOAD_LOWERING(UnalignedLoad, MachineType::Int64(), machine);
+}
+
+TEST_F(Int64LoweringTest, Int64LoadFromObject) {
+ INT64_LOAD_LOWERING(LoadFromObject,
+ ObjectAccess(MachineType::Int64(), kNoWriteBarrier),
+ simplified);
+}
+
+TEST_F(Int64LoweringTest, Int64LoadImmutable) {
+ int32_t base = 0x1234;
+ int32_t index = 0x5678;
+
+ LowerGraph(graph()->NewNode(machine()->LoadImmutable(MachineType::Int64()),
+ Int32Constant(base), Int32Constant(index)),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> high_word_load;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ Matcher<Node*> high_word_load_matcher =
+ IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Constant(index)),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start(), start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ Matcher<Node*> high_word_load_matcher = IsLoadImmutable(
+ MachineType::Int32(), IsInt32Constant(base), IsInt32Constant(index));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoadImmutable(
+ MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4))),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start(), start()));
+#endif
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -225,7 +271,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
start()));
#endif
-#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \
+#define INT64_STORE_LOWERING(kStore, kRep32, kRep64, builder) \
int32_t base = 1111; \
int32_t index = 2222; \
int32_t return_value = 0x5555; \
@@ -233,7 +279,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
sig_builder.AddReturn(MachineRepresentation::kWord32); \
\
- Node* store = graph()->NewNode(machine()->kStore(kRep64), \
+ Node* store = graph()->NewNode(builder()->kStore(kRep64), \
Int32Constant(base), Int32Constant(index), \
Int64Constant(value(0)), start(), start()); \
\
@@ -243,7 +289,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
\
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
- Int64Lowering lowering(graph(), machine(), common(), zone(), \
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
sig_builder.Build()); \
lowering.LowerGraph(); \
\
@@ -254,7 +300,7 @@ TEST_F(Int64LoweringTest, Int64Store) {
WriteBarrierKind::kNoWriteBarrier);
const StoreRepresentation rep32(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier);
- INT64_STORE_LOWERING(Store, rep32, rep64);
+ INT64_STORE_LOWERING(Store, rep32, rep64, machine);
}
TEST_F(Int64LoweringTest, Int32Store) {
@@ -277,7 +323,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
- Int64Lowering lowering(graph(), machine(), common(), zone(),
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build());
lowering.LowerGraph();
@@ -292,7 +338,13 @@ TEST_F(Int64LoweringTest, Int32Store) {
TEST_F(Int64LoweringTest, Int64UnalignedStore) {
const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
- INT64_STORE_LOWERING(UnalignedStore, rep32, rep64);
+ INT64_STORE_LOWERING(UnalignedStore, rep32, rep64, machine);
+}
+
+TEST_F(Int64LoweringTest, Int64StoreToObject) {
+ const ObjectAccess access64(MachineType::Int64(), kNoWriteBarrier);
+ const ObjectAccess access32(MachineType::Int32(), kNoWriteBarrier);
+ INT64_STORE_LOWERING(StoreToObject, access32, access64, simplified);
}
TEST_F(Int64LoweringTest, Int64And) {
@@ -988,6 +1040,22 @@ TEST_F(Int64LoweringTest, LoopCycle) {
LowerGraph(load, MachineRepresentation::kWord64);
}
+TEST_F(Int64LoweringTest, LoopExitValue) {
+ Node* loop_header = graph()->NewNode(common()->Loop(1), graph()->start());
+ Node* loop_exit =
+ graph()->NewNode(common()->LoopExit(), loop_header, loop_header);
+ Node* exit =
+ graph()->NewNode(common()->LoopExitValue(MachineRepresentation::kWord64),
+ Int64Constant(value(2)), loop_exit);
+ LowerGraph(exit, MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsLoopExitValue(MachineRepresentation::kWord32,
+ IsInt32Constant(low_word_value(2))),
+ IsLoopExitValue(MachineRepresentation::kWord32,
+ IsInt32Constant(high_word_value(2))),
+ start(), start()));
+}
+
TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
Node* target = Int32Constant(1);
Node* context = Int32Constant(2);
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index ceed584d857..5a1f1ac8ab4 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -169,8 +169,8 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to add one slot of padding to the callee arguments.
- int expected = kPadArguments ? 2 : 1;
+ // We might need to add padding slots to the callee arguments.
+ int expected = 1 + ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
@@ -192,8 +192,8 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to drop one slot of padding from the caller's arguments.
- int expected = kPadArguments ? -2 : -1;
+ // We might need to drop padding slots from the caller's arguments.
+ int expected = -1 - ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
@@ -329,8 +329,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to add one slot of padding to the callee arguments.
- int expected = kPadArguments ? 0 : -1;
+ // We might need to add padding slots to the callee arguments.
+ int expected = ArgumentPaddingSlots(1) - 1;
EXPECT_EQ(expected, stack_param_delta);
}
@@ -359,8 +359,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to drop one slot of padding from the caller's arguments.
- int expected = kPadArguments ? 0 : 1;
+ // We might need to drop padding slots from the caller's arguments.
+ int expected = 1 - ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index e53050ad55f..edb29420849 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -306,6 +306,8 @@ const OptionalOperatorEntry kOptionalOperators[] = {
OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float64Select, 3, 0, 1), // --
+ OPTIONAL_ENTRY(Float32Select, 3, 0, 1), // --
#undef OPTIONAL_ENTRY
};
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index aeceabeffa0..5305fef5741 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -102,6 +102,36 @@ class IsBranchMatcher final : public TestNodeMatcher {
const Matcher<Node*> control_matcher_;
};
+class IsLoopExitValueMatcher final : public TestNodeMatcher {
+ public:
+ IsLoopExitValueMatcher(const Matcher<MachineRepresentation>& rep_matcher,
+ const Matcher<Node*>& value_matcher)
+ : TestNodeMatcher(IrOpcode::kLoopExitValue),
+ rep_matcher_(rep_matcher),
+ value_matcher_(value_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << ") whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << " and value (";
+ value_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(LoopExitValueRepresentationOf(node->op()),
+ "representation", rep_matcher_, listener)) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "value",
+ value_matcher_, listener);
+ }
+
+ private:
+ const Matcher<MachineRepresentation> rep_matcher_;
+ const Matcher<Node*> value_matcher_;
+};
+
class IsSwitchMatcher final : public TestNodeMatcher {
public:
IsSwitchMatcher(const Matcher<Node*>& value_matcher,
@@ -1123,10 +1153,47 @@ LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
-#define STORE_MATCHER(kStore) \
+class IsLoadImmutableMatcher final : public TestNodeMatcher {
+ public:
+ IsLoadImmutableMatcher(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher)
+ : TestNodeMatcher(IrOpcode::kLoadImmutable),
+ rep_matcher_(rep_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << ") and index (";
+ index_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ LoadRepresentation rep = LoadRepresentationOf(node->op());
+ return TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(rep, "rep", rep_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "index",
+ index_matcher_, listener);
+ }
+
+ private:
+ const Matcher<LoadRepresentation> rep_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+};
+
+#define STORE_MATCHER(kStore, representation) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
public: \
- Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
+ Is##kStore##Matcher(const Matcher<representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \
const Matcher<Node*>& index_matcher, \
const Matcher<Node*>& value_matcher, \
@@ -1168,9 +1235,8 @@ LOAD_MATCHER(LoadFromObject)
control_node = NodeProperties::GetControlInput(node); \
} \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \
- PrintMatchAndExplain( \
- OpParameter<kStore##Representation>(node->op()), "rep", \
- rep_matcher_, listener) && \
+ PrintMatchAndExplain(OpParameter<representation>(node->op()), \
+ "rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
@@ -1184,7 +1250,7 @@ LOAD_MATCHER(LoadFromObject)
} \
\
private: \
- const Matcher<kStore##Representation> rep_matcher_; \
+ const Matcher<representation> rep_matcher_; \
const Matcher<Node*> base_matcher_; \
const Matcher<Node*> index_matcher_; \
const Matcher<Node*> value_matcher_; \
@@ -1192,8 +1258,9 @@ LOAD_MATCHER(LoadFromObject)
const Matcher<Node*> control_matcher_; \
};
-STORE_MATCHER(Store)
-STORE_MATCHER(UnalignedStore)
+STORE_MATCHER(Store, StoreRepresentation)
+STORE_MATCHER(UnalignedStore, UnalignedStoreRepresentation)
+STORE_MATCHER(StoreToObject, ObjectAccess)
class IsStackSlotMatcher final : public TestNodeMatcher {
public:
@@ -1556,6 +1623,10 @@ Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
control1_matcher, control2_matcher));
}
+Matcher<Node*> IsLoopExitValue(const Matcher<MachineRepresentation> rep_matcher,
+ const Matcher<Node*>& value_matcher) {
+ return MakeMatcher(new IsLoopExitValueMatcher(rep_matcher, value_matcher));
+}
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
@@ -2062,6 +2133,13 @@ Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher));
}
+Matcher<Node*> IsLoadImmutable(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher) {
+ return MakeMatcher(
+ new IsLoadImmutableMatcher(rep_matcher, base_matcher, index_matcher));
+}
+
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -2083,6 +2161,17 @@ Matcher<Node*> IsUnalignedStore(
control_matcher));
}
+Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsStoreToObjectMatcher(
+ rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStackSlot(
const Matcher<StackSlotRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 42d6db82cf2..d9afb369638 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -58,6 +58,8 @@ Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher,
const Matcher<Node*>& control2_matcher);
+Matcher<Node*> IsLoopExitValue(const Matcher<MachineRepresentation> rep_matcher,
+ const Matcher<Node*>& value_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher);
@@ -341,6 +343,9 @@ Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadImmutable(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -352,6 +357,12 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index fc04f419a01..4dcbbf2eb8b 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -4,10 +4,15 @@
#include <limits>
+#include "src/common/globals.h"
#include "src/compiler/node-matchers.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -1888,6 +1893,9 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
}
}
+// -----------------------------------------------------------------------------
+// SIMD.
+
TEST_F(InstructionSelectorTest, SIMDSplatZero) {
// Test optimization for splat of contant 0.
// {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero().
@@ -1935,6 +1943,286 @@ TEST_F(InstructionSelectorTest, SIMDSplatZero) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
+struct ArchShuffle {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode arch_opcode;
+ size_t input_count;
+};
+
+static constexpr ArchShuffle kArchShuffles[] = {
+ // These are architecture specific shuffles defined in
+ // instruction-selecor-x64.cc arch_shuffles.
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23},
+ kX64S64x2UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31},
+ kX64S64x2UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kX64S32x4UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kX64S32x4UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kX64S16x8UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kX64S16x8UnpackHigh,
+ 2,
+ },
+ {
+ {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kX64S8x16UnpackLow,
+ 2,
+ },
+ {
+ {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kX64S8x16UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kX64S16x8UnzipLow,
+ 2,
+ },
+ {
+ {2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kX64S16x8UnzipHigh,
+ 2,
+ },
+ {
+ {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kX64S8x16UnzipLow,
+ 2,
+ },
+ {
+ {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kX64S8x16UnzipHigh,
+ 2,
+ },
+ {
+ {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kX64S8x16TransposeLow,
+ 2,
+ },
+ {
+ {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kX64S8x16TransposeHigh,
+ 2,
+ },
+ {
+ {7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kX64S8x8Reverse,
+ 1,
+ },
+ {
+ {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kX64S8x4Reverse,
+ 1,
+ },
+ {
+ {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kX64S8x2Reverse,
+ 1,
+ },
+ // These are matched by TryMatchConcat && TryMatch32x4Rotate.
+ {
+ {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3},
+ kX64S32x4Rotate,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7},
+ kX64S32x4Rotate,
+ 2,
+ },
+ {
+ {12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+ kX64S32x4Rotate,
+ 2,
+ },
+ // These are matched by TryMatchConcat && !TryMatch32x4Rotate.
+ {
+ {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2},
+ kX64S8x16Alignr,
+ 3,
+ },
+ {
+ {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1},
+ kX64S8x16Alignr,
+ 3,
+ },
+ {
+ {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17},
+ kX64S8x16Alignr,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && is_swizzle.
+ {
+ {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15},
+ kX64S32x4Swizzle,
+ 2,
+ },
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 8, 9, 10, 11},
+ kX64S32x4Swizzle,
+ 2,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle && TryMatchBlend.
+ {
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ kX64S16x8Blend,
+ 3,
+ },
+ {
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ kX64S16x8Blend,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle &&
+ // TryMatchShufps.
+ {
+ {0, 1, 2, 3, 8, 9, 10, 11, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64Shufps,
+ 3,
+ },
+ {
+ {8, 9, 10, 11, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64Shufps,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle.
+ {
+ {28, 29, 30, 31, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64S32x4Shuffle,
+ 4,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatchBlend.
+ {
+ {16, 17, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ kX64S16x8Blend,
+ 3,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatchSplat<8>.
+ {
+ {2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3},
+ kX64S16x8Dup,
+ 2,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatch16x8HalfShuffle.
+ {
+ {6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kX64S16x8HalfShuffle1,
+ 3,
+ },
+ {
+ {6, 7, 4, 5, 2, 3, 0, 1, 30, 31, 28, 29, 26, 27, 24, 25},
+ kX64S16x8HalfShuffle2,
+ 5,
+ },
+ // These are matched by TryMatchSplat<16>.
+ {
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ kX64S8x16Dup,
+ 2,
+ },
+ // Generic shuffle that only uses 1 input.
+ {
+ {1, 15, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8},
+ kX64I8x16Shuffle,
+ 5,
+ },
+ // Generic shuffle that uses both input.
+ {
+ {1, 31, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8},
+ kX64I8x16Shuffle,
+ 6,
+ },
+};
+
+using InstructionSelectorSIMDArchShuffleTest =
+ InstructionSelectorTestWithParam<ArchShuffle>;
+
+TEST_P(InstructionSelectorSIMDArchShuffleTest, SIMDArchShuffle) {
+ MachineType type = MachineType::Simd128();
+ {
+ // Tests various shuffle optimizations
+ StreamBuilder m(this, type, type, type);
+ auto param = GetParam();
+ auto shuffle = param.shuffle;
+ const Operator* op = m.machine()->I8x16Shuffle(shuffle);
+ Node* n = m.AddNode(op, m.Parameter(0), m.Parameter(1));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(param.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(param.input_count, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDArchShuffleTest,
+ ::testing::ValuesIn(kArchShuffles));
+#endif // V8_ENABLE_WEBASSEMBLY
+
+struct SwizzleConstants {
+ uint8_t shuffle[kSimd128Size];
+ bool omit_add;
+};
+
+static constexpr SwizzleConstants kSwizzleConstants[] = {
+ {
+ // all lanes < kSimd128Size
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ true,
+ },
+ {
+ // lanes that are >= kSimd128Size have top bit set
+ {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC,
+ 0xFD, 0xFE, 0xFF},
+ true,
+ },
+ {
+ {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27},
+ false,
+ },
+};
+
+using InstructionSelectorSIMDSwizzleConstantTest =
+ InstructionSelectorTestWithParam<SwizzleConstants>;
+
+TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) {
+ // Test optimization of swizzle with constant indices.
+ auto param = GetParam();
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* const c = m.S128Const(param.shuffle);
+ Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c);
+ m.Return(swizzle);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(kX64I8x16Swizzle, s[1]->arch_opcode());
+ ASSERT_EQ(param.omit_add, s[1]->misc());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDSwizzleConstantTest,
+ ::testing::ValuesIn(kSwizzleConstants));
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 84e29f25969..7db92611793 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -41,6 +41,7 @@ class WithFinalizationRegistryMixin : public TMixin {
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
+ FLAG_harmony_weak_refs = true;
FLAG_expose_gc = true;
FLAG_allow_natives_syntax = true;
TMixin::SetUpTestCase();
diff --git a/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
index 9298a775411..893b55f24bd 100644
--- a/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
@@ -76,13 +76,6 @@ class CompactorTest : public testing::TestWithPlatform {
EXPECT_TRUE(compactor().IsEnabledForTesting());
}
- void CancelCompaction() {
- bool cancelled = compactor().CancelIfShouldNotCompact(
- GarbageCollector::Config::MarkingType::kAtomic,
- GarbageCollector::Config::StackState::kMayContainHeapPointers);
- EXPECT_TRUE(cancelled);
- }
-
void FinishCompaction() { compactor().CompactSpacesIfEnabled(); }
void StartGC() {
@@ -134,11 +127,6 @@ TEST_F(CompactorTest, NothingToCompact) {
heap()->stats_collector()->NotifySweepingCompleted();
}
-TEST_F(CompactorTest, CancelledNothingToCompact) {
- StartCompaction();
- CancelCompaction();
-}
-
TEST_F(CompactorTest, NonEmptySpaceAllLive) {
static constexpr int kNumObjects = 10;
Persistent<CompactableHolder<kNumObjects>> holder =
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
index c4aea68f156..3a8d0307d02 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
@@ -46,11 +46,6 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
return marker->IncrementalMarkingStepForTesting(stack_state);
}
- void FinishSteps(Config::StackState stack_state) {
- while (!SingleStep(stack_state)) {
- }
- }
-
void FinishGC() {
Heap* heap = Heap::From(GetHeap());
heap->marker()->SetMainThreadMarkingDisabledForTesting(false);
@@ -114,7 +109,7 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
@@ -133,7 +128,7 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
last_object = &(*last_object)->child_;
});
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
@@ -149,7 +144,7 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index 4541216b1f7..b03f3388d5d 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -132,7 +132,7 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
// The corresponding page could be removed.
if (!backend->Lookup(static_cast<ConstAddress>(object))) continue;
- if (!freelist.Contains({object, 0})) return false;
+ if (!freelist.ContainsForTesting({object, 0})) return false;
}
return true;
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index 33adc71ca64..32a5929fe40 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -5,6 +5,7 @@
#include "include/cppgc/ephemeron-pair.h"
#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marking-visitor.h"
@@ -21,7 +22,7 @@ class GCed : public GarbageCollected<GCed> {
void Trace(cppgc::Visitor*) const {}
};
-class EphemeronHolder : public GarbageCollected<GCed> {
+class EphemeronHolder : public GarbageCollected<EphemeronHolder> {
public:
EphemeronHolder(GCed* key, GCed* value) : ephemeron_pair_(key, value) {}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
@@ -168,5 +169,75 @@ TEST_F(EphemeronPairGCTest, EphemeronPairValueIsCleared) {
EXPECT_EQ(nullptr, holder->ephemeron_pair().value.Get());
}
+namespace {
+
+class Mixin : public GarbageCollectedMixin {
+ public:
+ void Trace(Visitor* v) const override {}
+};
+
+class OtherMixin : public GarbageCollectedMixin {
+ public:
+ void Trace(Visitor* v) const override {}
+};
+
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>,
+ public OtherMixin,
+ public Mixin {
+ public:
+ void Trace(Visitor* v) const override {
+ OtherMixin::Trace(v);
+ Mixin::Trace(v);
+ }
+};
+
+class EphemeronHolderWithMixins
+ : public GarbageCollected<EphemeronHolderWithMixins> {
+ public:
+ EphemeronHolderWithMixins(Mixin* key, Mixin* value)
+ : ephemeron_pair_(key, value) {}
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
+
+ const EphemeronPair<Mixin, Mixin>& ephemeron_pair() const {
+ return ephemeron_pair_;
+ }
+
+ private:
+ EphemeronPair<Mixin, Mixin> ephemeron_pair_;
+};
+
+} // namespace
+
+TEST_F(EphemeronPairTest, EphemeronPairWithMixinKey) {
+ GCedWithMixin* key =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ GCedWithMixin* value =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ Persistent<EphemeronHolderWithMixins> holder =
+ MakeGarbageCollected<EphemeronHolderWithMixins>(GetAllocationHandle(),
+ key, value);
+ EXPECT_NE(static_cast<void*>(key), holder->ephemeron_pair().key.Get());
+ EXPECT_NE(static_cast<void*>(value), holder->ephemeron_pair().value.Get());
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishSteps();
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(key).TryMarkAtomic());
+ FinishMarking();
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+TEST_F(EphemeronPairTest, EphemeronPairWithEmptyMixinValue) {
+ GCedWithMixin* key =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ Persistent<EphemeronHolderWithMixins> holder =
+ MakeGarbageCollected<EphemeronHolderWithMixins>(GetAllocationHandle(),
+ key, nullptr);
+ EXPECT_NE(static_cast<void*>(key), holder->ephemeron_pair().key.Get());
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(key).TryMarkAtomic());
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishSteps();
+ FinishMarking();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
new file mode 100644
index 00000000000..6ca8569b3f0
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
@@ -0,0 +1,194 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/explicit-management.h"
+
+#include "include/cppgc/garbage-collected.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/sweeper.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+class ExplicitManagementTest : public testing::TestWithHeap {
+ public:
+ size_t AllocatedObjectSize() const {
+ auto* heap = Heap::From(GetHeap());
+ heap->stats_collector()->NotifySafePointForTesting();
+ return heap->stats_collector()->allocated_object_size();
+ }
+
+ void ResetLinearAllocationBuffers() const {
+ return Heap::From(GetHeap())
+ ->object_allocator()
+ .ResetLinearAllocationBuffers();
+ }
+
+ void TearDown() override {
+ PreciseGC();
+ TestWithHeap::TearDown();
+ }
+};
+
+namespace {
+
+class DynamicallySized final : public GarbageCollected<DynamicallySized> {
+ public:
+ void Trace(Visitor*) const {}
+};
+
+} // namespace
+
+TEST_F(ExplicitManagementTest, FreeRegularObjectToLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ const auto& lab = space->linear_allocation_buffer();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ const size_t size = header.GetSize();
+ Address needle = reinterpret_cast<Address>(&header);
+ // Test checks freeing to LAB.
+ ASSERT_EQ(lab.start(), header.PayloadEnd());
+ const size_t lab_size_before_free = lab.size();
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(lab.start(), reinterpret_cast<Address>(needle));
+ EXPECT_EQ(lab_size_before_free + size, lab.size());
+ // LAB is included in allocated object size, so no change is expected.
+ EXPECT_EQ(allocated_size_before, AllocatedObjectSize());
+ EXPECT_FALSE(space->free_list().ContainsForTesting({needle, size}));
+}
+
+TEST_F(ExplicitManagementTest, FreeRegularObjectToFreeList) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ const auto& lab = space->linear_allocation_buffer();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ const size_t size = header.GetSize();
+ Address needle = reinterpret_cast<Address>(&header);
+ // Test checks freeing to free list.
+ ResetLinearAllocationBuffers();
+ ASSERT_EQ(lab.start(), nullptr);
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(lab.start(), nullptr);
+ EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
+ EXPECT_TRUE(space->free_list().ContainsForTesting({needle, size}));
+}
+
+TEST_F(ExplicitManagementTest, FreeLargeObject) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(kLargeObjectSizeThreshold));
+ const auto* page = BasePage::FromPayload(o);
+ auto* heap = page->heap();
+ ASSERT_TRUE(page->is_large());
+ ConstAddress needle = reinterpret_cast<ConstAddress>(o);
+ const size_t size = LargePage::From(page)->PayloadSize();
+ EXPECT_TRUE(heap->page_backend()->Lookup(needle));
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_FALSE(heap->page_backend()->Lookup(needle));
+ EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, FreeBailsOutDuringGC) {
+ const size_t snapshot_before = AllocatedObjectSize();
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto* heap = BasePage::FromPayload(o)->heap();
+ heap->SetInAtomicPauseForTesting(true);
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(allocated_size_before, AllocatedObjectSize());
+ heap->SetInAtomicPauseForTesting(false);
+ ResetLinearAllocationBuffers();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(snapshot_before, AllocatedObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, FreeNull) {
+ DynamicallySized* o = nullptr;
+ // Noop.
+ subtle::FreeUnreferencedObject(o);
+}
+
+TEST_F(ExplicitManagementTest, GrowAtLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ constexpr size_t kFirstDelta = 8;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kFirstDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kFirstDelta),
+ header.ObjectSize());
+ constexpr size_t kSecondDelta = 9;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kSecondDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kSecondDelta),
+ header.ObjectSize());
+ // Second round didn't actually grow object because alignment restrictions
+ // already forced it to be large enough on the first Grow().
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kFirstDelta),
+ RoundUp<kAllocationGranularity>(size_of_o + kSecondDelta));
+ constexpr size_t kThirdDelta = 16;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kThirdDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kThirdDelta),
+ header.ObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, GrowShrinkAtLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ constexpr size_t kDelta = 27;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kDelta),
+ header.ObjectSize());
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o), header.ObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, ShrinkFreeList) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(ObjectAllocator::kSmallestSpaceSize));
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ // Force returning to free list by removing the LAB.
+ ResetLinearAllocationBuffers();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o), header.ObjectSize());
+ EXPECT_TRUE(space->free_list().ContainsForTesting(
+ {header.PayloadEnd(), ObjectAllocator::kSmallestSpaceSize}));
+}
+
+TEST_F(ExplicitManagementTest, ShrinkFreeListBailoutAvoidFragmentation) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(ObjectAllocator::kSmallestSpaceSize - 1));
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ // Force returning to free list by removing the LAB.
+ ResetLinearAllocationBuffers();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(
+ size_of_o + ObjectAllocator::kSmallestSpaceSize - 1),
+ header.ObjectSize());
+ EXPECT_FALSE(space->free_list().ContainsForTesting(
+ {header.Payload() + RoundUp<kAllocationGranularity>(size_of_o),
+ ObjectAllocator::kSmallestSpaceSize - 1}));
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
index e059734cf94..c134877a2a4 100644
--- a/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
@@ -151,7 +151,7 @@ TEST(FreeListTest, Contains) {
FreeList list = CreatePopulatedFreeList(blocks);
for (const auto& block : blocks) {
- EXPECT_TRUE(list.Contains({block.Address(), block.Size()}));
+ EXPECT_TRUE(list.ContainsForTesting({block.Address(), block.Size()}));
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
index 3d951dc6cf3..da02212176e 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
@@ -4,6 +4,8 @@
#include "include/cppgc/internal/gc-info.h"
+#include <type_traits>
+
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
@@ -18,56 +20,72 @@ namespace {
constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
+class GCInfoTableTest : public ::testing::Test {
+ public:
+ GCInfoIndex RegisterNewGCInfoForTesting(const GCInfo& info) {
+ // Unused registered index will result in registering a new index.
+ std::atomic<GCInfoIndex> registered_index{0};
+ return table().RegisterNewGCInfo(registered_index, info);
+ }
+
+ void SetUp() override {
+ table_ = std::make_unique<GCInfoTable>(&page_allocator_);
+ }
+
+ void TearDown() override { table_.reset(); }
+
+ GCInfoTable& table() { return *table_; }
+ const GCInfoTable& table() const { return *table_; }
+
+ private:
+ v8::base::PageAllocator page_allocator_;
+ std::unique_ptr<GCInfoTable> table_;
+};
+
+using GCInfoTableDeathTest = GCInfoTableTest;
+
} // namespace
-TEST(GCInfoTableTest, InitialEmpty) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
- EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfos());
+TEST_F(GCInfoTableTest, InitialEmpty) {
+ EXPECT_EQ(GCInfoTable::kMinIndex, table().NumberOfGCInfos());
}
-TEST(GCInfoTableTest, ResizeToMaxIndex) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableTest, ResizeToMaxIndex) {
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
- GCInfoIndex index = table.RegisterNewGCInfo(info);
+ GCInfoIndex index = RegisterNewGCInfoForTesting(info);
EXPECT_EQ(i, index);
}
}
-TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
GCInfo info = GetEmptyGCInfo();
// Create GCInfoTable::kMaxIndex entries.
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
- EXPECT_DEATH_IF_SUPPORTED(table.RegisterNewGCInfo(info), "");
+ EXPECT_DEATH_IF_SUPPORTED(RegisterNewGCInfoForTesting(info), "");
}
-TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
GCInfo info = GetEmptyGCInfo();
// Use up all slots until limit.
- GCInfoIndex limit = table.LimitForTesting();
+ GCInfoIndex limit = table().LimitForTesting();
// Bail out if initial limit is already the maximum because of large committed
// pages. In this case, nothing can be comitted as read-only.
if (limit == GCInfoTable::kMaxIndex) {
return;
}
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < limit; i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
- EXPECT_EQ(limit, table.LimitForTesting());
- table.RegisterNewGCInfo(info);
- EXPECT_NE(limit, table.LimitForTesting());
+ EXPECT_EQ(limit, table().LimitForTesting());
+ RegisterNewGCInfoForTesting(info);
+ EXPECT_NE(limit, table().LimitForTesting());
// Old area is now read-only.
- auto& first_slot = table.TableSlotForTesting(GCInfoTable::kMinIndex);
+ auto& first_slot = table().TableSlotForTesting(GCInfoTable::kMinIndex);
EXPECT_DEATH_IF_SUPPORTED(first_slot.finalize = nullptr, "");
}
@@ -75,27 +93,27 @@ namespace {
class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
public:
- ThreadRegisteringGCInfoObjects(GCInfoTable* table,
+ ThreadRegisteringGCInfoObjects(GCInfoTableTest* test,
GCInfoIndex num_registrations)
: v8::base::Thread(Options("Thread registering GCInfo objects.")),
- table_(table),
+ test_(test),
num_registrations_(num_registrations) {}
void Run() final {
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = 0; i < num_registrations_; i++) {
- table_->RegisterNewGCInfo(info);
+ test_->RegisterNewGCInfoForTesting(info);
}
}
private:
- GCInfoTable* table_;
+ GCInfoTableTest* test_;
GCInfoIndex num_registrations_;
};
} // namespace
-TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
+TEST_F(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
constexpr size_t num_threads = 4;
constexpr size_t main_thread_initialized = 2;
constexpr size_t gc_infos_to_register =
@@ -105,17 +123,14 @@ TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
"must sum up to kMaxIndex");
constexpr size_t gc_infos_per_thread = gc_infos_to_register / num_threads;
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
for (size_t i = 0; i < main_thread_initialized; i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
v8::base::Thread* threads[num_threads];
for (size_t i = 0; i < num_threads; i++) {
- threads[i] =
- new ThreadRegisteringGCInfoObjects(&table, gc_infos_per_thread);
+ threads[i] = new ThreadRegisteringGCInfoObjects(this, gc_infos_per_thread);
}
for (size_t i = 0; i < num_threads; i++) {
CHECK(threads[i]->Start());
@@ -161,5 +176,110 @@ TEST_F(GCInfoTraitTest, TraitReturnsDifferentIndexForDifferentTypes) {
EXPECT_NE(index1, index2);
}
+namespace {
+
+struct Dummy {};
+
+class BaseWithVirtualDestructor
+ : public GarbageCollected<BaseWithVirtualDestructor> {
+ public:
+ virtual ~BaseWithVirtualDestructor() = default;
+ void Trace(Visitor*) const {}
+
+ private:
+ std::unique_ptr<Dummy> non_trivially_destructible_;
+};
+
+class ChildOfBaseWithVirtualDestructor : public BaseWithVirtualDestructor {
+ public:
+ ~ChildOfBaseWithVirtualDestructor() override = default;
+};
+
+static_assert(std::has_virtual_destructor<BaseWithVirtualDestructor>::value,
+ "Must have virtual destructor.");
+static_assert(!std::is_trivially_destructible<BaseWithVirtualDestructor>::value,
+ "Must not be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfBaseWithVirtualDestructor,
+ ChildOfBaseWithVirtualDestructor::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfBaseWithVirtualDestructor>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfBaseWithVirtualDestructor,
+ ChildOfBaseWithVirtualDestructor::
+ ParentMostGarbageCollectedType>::ResultType,
+ BaseWithVirtualDestructor>::value,
+ "Must fold into base as base has virtual destructor.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+class TriviallyDestructibleBase
+ : public GarbageCollected<TriviallyDestructibleBase> {
+ public:
+ virtual void Trace(Visitor*) const {}
+};
+
+class ChildOfTriviallyDestructibleBase : public TriviallyDestructibleBase {};
+
+static_assert(!std::has_virtual_destructor<TriviallyDestructibleBase>::value,
+ "Must not have virtual destructor.");
+static_assert(std::is_trivially_destructible<TriviallyDestructibleBase>::value,
+ "Must be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTriviallyDestructibleBase,
+ ChildOfTriviallyDestructibleBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfTriviallyDestructibleBase>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTriviallyDestructibleBase,
+ ChildOfTriviallyDestructibleBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ TriviallyDestructibleBase>::value,
+ "Must fold into base as both are trivially destructible.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+class TypeWithCustomFinalizationMethodAtBase
+ : public GarbageCollected<TypeWithCustomFinalizationMethodAtBase> {
+ public:
+ void FinalizeGarbageCollectedObject() {}
+ void Trace(Visitor*) const {}
+
+ private:
+ std::unique_ptr<Dummy> non_trivially_destructible_;
+};
+
+class ChildOfTypeWithCustomFinalizationMethodAtBase
+ : public TypeWithCustomFinalizationMethodAtBase {};
+
+static_assert(
+ !std::has_virtual_destructor<TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must not have virtual destructor.");
+static_assert(!std::is_trivially_destructible<
+ TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must not be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(
+ std::is_same<typename internal::GCInfoFolding<
+ ChildOfTypeWithCustomFinalizationMethodAtBase,
+ ChildOfTypeWithCustomFinalizationMethodAtBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfTypeWithCustomFinalizationMethodAtBase>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTypeWithCustomFinalizationMethodAtBase,
+ ChildOfTypeWithCustomFinalizationMethodAtBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must fold into base as base has custom finalizer dispatch.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+} // namespace
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
index 2621af28914..11f4498aa0d 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
@@ -35,6 +35,14 @@ TEST(HeapObjectHeaderTest, Payload) {
header.Payload());
}
+TEST(HeapObjectHeaderTest, PayloadEnd) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(reinterpret_cast<ConstAddress>(&header) + kSize,
+ header.PayloadEnd());
+}
+
TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
index 7ccabd00e3e..acfcd2d4018 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
@@ -195,12 +195,12 @@ TEST_F(PageTest, NormalPageCreationDestruction) {
EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
space->free_list().Add({page->PayloadStart(), page->PayloadSize()});
- EXPECT_TRUE(
- space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ EXPECT_TRUE(space->free_list().ContainsForTesting(
+ {page->PayloadStart(), page->PayloadSize()}));
space->free_list().Clear();
- EXPECT_FALSE(
- space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ EXPECT_FALSE(space->free_list().ContainsForTesting(
+ {page->PayloadStart(), page->PayloadSize()}));
space->RemovePage(page);
EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
NormalPage::Destroy(page);
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index eeb4b74b6d9..8f8191c6d07 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -4,10 +4,13 @@
#include "src/heap/cppgc/marker.h"
+#include <memory>
+
#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
+#include "include/cppgc/trace-trait.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -212,6 +215,7 @@ TEST_F(MarkerTest, NestedObjectsOnStackAreMarked) {
}
namespace {
+
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
@@ -219,8 +223,19 @@ class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
callback(this);
}
- void Trace(Visitor*) const {}
+ template <typename Callback>
+ GCedWithCallback(Callback callback, GCed* gced) : gced_(gced) {
+ callback(this);
+ }
+
+ void Trace(Visitor* visitor) const { visitor->Trace(gced_); }
+
+ GCed* gced() const { return gced_; }
+
+ private:
+ Member<GCed> gced_;
};
+
} // namespace
TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedEmptyStack) {
@@ -254,6 +269,63 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) {
});
}
+namespace {
+
+// Storage that can be used to hide a pointer from the GC. Only useful when
+// dealing with the stack separately.
+class GCObliviousObjectStorage final {
+ public:
+ GCObliviousObjectStorage()
+ : storage_(std::make_unique<const void*>(nullptr)) {}
+
+ template <typename T>
+ void set_object(T* t) {
+ *storage_.get() = TraceTrait<T>::GetTraceDescriptor(t).base_object_payload;
+ }
+
+ const void* object() const { return *storage_; }
+
+ private:
+ std::unique_ptr<const void*> storage_;
+};
+
+V8_NOINLINE void RegisterInConstructionObject(
+ AllocationHandle& allocation_handle, Visitor& v,
+ GCObliviousObjectStorage& storage) {
+ // Create deeper stack to avoid finding any temporary reference in the caller.
+ char space[500];
+ USE(space);
+ MakeGarbageCollected<GCedWithCallback>(
+ allocation_handle,
+ [&visitor = v, &storage](GCedWithCallback* obj) {
+ Member<GCedWithCallback> member(obj);
+ // Adds GCedWithCallback to in-construction objects.
+ visitor.Trace(member);
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsMarked());
+ // The inner object GCed is only found if GCedWithCallback is processed.
+ storage.set_object(obj->gced());
+ },
+ // Initializing store does not trigger a write barrier.
+ MakeGarbageCollected<GCed>(allocation_handle));
+}
+
+} // namespace
+
+TEST_F(MarkerTest,
+ InConstructionObjectIsEventuallyMarkedDifferentNonEmptyStack) {
+ static const Marker::MarkingConfig config = {
+ MarkingConfig::CollectionType::kMajor,
+ MarkingConfig::StackState::kMayContainHeapPointers};
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
+
+ GCObliviousObjectStorage storage;
+ RegisterInConstructionObject(GetAllocationHandle(), marker()->Visitor(),
+ storage);
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(storage.object()).IsMarked());
+ marker()->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(storage.object()).IsMarked());
+}
+
TEST_F(MarkerTest, SentinelNotClearedOnWeakPersistentHandling) {
static const Marker::MarkingConfig config = {
MarkingConfig::CollectionType::kMajor,
@@ -290,7 +362,8 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
MarkingConfig::MarkingType::kIncremental};
void FinishSteps(MarkingConfig::StackState stack_state) {
- while (!SingleStep(stack_state)) {}
+ while (!SingleStep(stack_state)) {
+ }
}
void FinishMarking() {
@@ -384,7 +457,7 @@ TEST_F(IncrementalMarkingTest, IncrementalStepDuringAllocation) {
TEST_F(IncrementalMarkingTest, MarkingRunsOutOfWorkEventually) {
InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(),
- IncrementalPreciseMarkingConfig);
+ IncrementalPreciseMarkingConfig);
FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
FinishMarking();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index 603a47399bb..c4e34655fe5 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -7,6 +7,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
+#include "include/cppgc/prefinalizer.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "test/unittests/heap/cppgc/tests.h"
@@ -145,7 +146,11 @@ TEST_F(MarkingVerifierTest, DoesntDieOnInConstructionObjectWithWriteBarrier) {
namespace {
-class MarkingVerifierDeathTest : public MarkingVerifierTest {};
+class MarkingVerifierDeathTest : public MarkingVerifierTest {
+ protected:
+ template <template <typename T> class Reference>
+ void TestResurrectingPreFinalizer();
+};
} // namespace
@@ -175,5 +180,58 @@ TEST_F(MarkingVerifierDeathTest, DieOnUnmarkedWeakMember) {
"");
}
+namespace {
+
+template <template <typename T> class Reference>
+class ResurrectingPreFinalizer
+ : public GarbageCollected<ResurrectingPreFinalizer<Reference>> {
+ CPPGC_USING_PRE_FINALIZER(ResurrectingPreFinalizer<Reference>, Dispose);
+
+ public:
+ class Storage : public GarbageCollected<Storage> {
+ public:
+ void Trace(Visitor* visitor) const { visitor->Trace(ref); }
+
+ Reference<GCed> ref;
+ };
+
+ ResurrectingPreFinalizer(Storage* storage, GCed* object_that_dies)
+ : storage_(storage), object_that_dies_(object_that_dies) {}
+
+ void Trace(Visitor* visitor) const {
+ visitor->Trace(storage_);
+ visitor->Trace(object_that_dies_);
+ }
+
+ private:
+ void Dispose() { storage_->ref = object_that_dies_; }
+
+ Member<Storage> storage_;
+ Member<GCed> object_that_dies_;
+};
+
+} // namespace
+
+template <template <typename T> class Reference>
+void MarkingVerifierDeathTest::TestResurrectingPreFinalizer() {
+ Persistent<typename ResurrectingPreFinalizer<Reference>::Storage> storage(
+ MakeGarbageCollected<
+ typename ResurrectingPreFinalizer<Reference>::Storage>(
+ GetAllocationHandle()));
+ MakeGarbageCollected<ResurrectingPreFinalizer<Reference>>(
+ GetAllocationHandle(), storage.Get(),
+ MakeGarbageCollected<GCed>(GetAllocationHandle()));
+ EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
+}
+#if DEBUG
+TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedMember) {
+ TestResurrectingPreFinalizer<Member>();
+}
+
+TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedWeakMember) {
+ TestResurrectingPreFinalizer<WeakMember>();
+}
+#endif // DEBUG
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
index 65c3e897eeb..1fff7e2c11b 100644
--- a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
@@ -52,20 +52,20 @@ struct PersistentRegionTrait<WeakPersistent> {
template <>
struct PersistentRegionTrait<subtle::CrossThreadPersistent> {
- static PersistentRegion& Get(cppgc::Heap* heap) {
+ static CrossThreadPersistentRegion& Get(cppgc::Heap* heap) {
return internal::Heap::From(heap)->GetStrongCrossThreadPersistentRegion();
}
};
template <>
struct PersistentRegionTrait<subtle::WeakCrossThreadPersistent> {
- static PersistentRegion& Get(cppgc::Heap* heap) {
+ static CrossThreadPersistentRegion& Get(cppgc::Heap* heap) {
return internal::Heap::From(heap)->GetWeakCrossThreadPersistentRegion();
}
};
template <template <typename> class PersistentType>
-PersistentRegion& GetRegion(cppgc::Heap* heap) {
+auto& GetRegion(cppgc::Heap* heap) {
return PersistentRegionTrait<PersistentType>::Get(heap);
}
@@ -114,31 +114,31 @@ class PersistentTest : public testing::TestSupportingAllocationOnly {};
template <template <typename> class PersistentType>
void NullStateCtor(cppgc::Heap* heap) {
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
{
PersistentType<GCed> empty;
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
PersistentType<GCed> empty = nullptr;
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
PersistentType<GCed> empty = kSentinelPointer;
EXPECT_EQ(kSentinelPointer, empty);
EXPECT_EQ(kSentinelPointer, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
// Runtime null must not allocated associated node.
PersistentType<GCed> empty = static_cast<GCed*>(nullptr);
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
diff --git a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
index 5c8044db7e6..8641922adb2 100644
--- a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
@@ -227,5 +227,13 @@ TEST_F(StatsCollectorTest, ObserverTriggersGC) {
stats.UnregisterObserver(&mock_observer);
}
+TEST_F(StatsCollectorTest, AllocatedMemorySize) {
+ EXPECT_EQ(0u, stats.allocated_memory_size());
+ stats.NotifyAllocatedMemory(1024);
+ EXPECT_EQ(1024u, stats.allocated_memory_size());
+ stats.NotifyFreedMemory(1024);
+ EXPECT_EQ(0u, stats.allocated_memory_size());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
index 94c3479d3a6..932ff9bb96d 100644
--- a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -208,12 +208,12 @@ TEST_F(SweeperTest, CoalesceFreeListEntries) {
object2_start, static_cast<size_t>(object3_end - object2_start)};
EXPECT_EQ(0u, g_destructor_callcount);
- EXPECT_FALSE(freelist.Contains(coalesced_block));
+ EXPECT_FALSE(freelist.ContainsForTesting(coalesced_block));
Sweep();
EXPECT_EQ(2u, g_destructor_callcount);
- EXPECT_TRUE(freelist.Contains(coalesced_block));
+ EXPECT_TRUE(freelist.ContainsForTesting(coalesced_block));
}
namespace {
diff --git a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
index 1aa9bd15bb6..cf45fe02485 100644
--- a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
@@ -51,5 +51,13 @@ TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
}
}
+TEST_F(TestingTest, StandaloneTestingHeap) {
+ // Perform garbage collection through the StandaloneTestingHeap API.
+ cppgc::testing::StandaloneTestingHeap heap(GetHeap()->GetHeapHandle());
+ heap.StartGarbageCollection();
+ heap.PerformMarkingStep(EmbedderStackState::kNoHeapPointers);
+ heap.FinalizeGarbageCollection(EmbedderStackState::kNoHeapPointers);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index c091c7f6ec9..d367e45dadb 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -100,7 +100,7 @@ class TestWithHeap : public TestWithPlatform {
// Restrictive test fixture that supports allocation but will make sure no
// garbage collection is triggered. This is useful for writing idiomatic
// tests where object are allocated on the managed heap while still avoiding
-// far reaching test consquences of full garbage collection calls.
+// far reaching test consequences of full garbage collection calls.
class TestSupportingAllocationOnly : public TestWithHeap {
protected:
TestSupportingAllocationOnly();
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index c46ee350958..55b87186757 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -21,8 +21,6 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
GCIdleTimeHeapState DefaultHeapState() {
GCIdleTimeHeapState result;
- result.contexts_disposed = 0;
- result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
result.incremental_marking_stopped = false;
result.size_of_objects = kSizeOfObjects;
return result;
@@ -72,80 +70,6 @@ TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
step_size);
}
-
-TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kDone,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate - 1;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kFullGC,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kFullGC,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = kMarkCompactSpeed;
- double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = kMarkCompactSpeed;
- double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeHeap) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- heap_state.size_of_objects = 101 * MB;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kDone,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
if (!handler()->Enabled()) return;
GCIdleTimeHeapState heap_state = DefaultHeapState();
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
deleted file mode 100644
index 7883283766b..00000000000
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/item-parallel-job.h"
-
-#include "src/execution/isolate.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-
-class ItemParallelJobTest : public TestWithIsolate {
- public:
- ItemParallelJobTest() : parallel_job_semaphore_(0) {}
- ItemParallelJobTest(const ItemParallelJobTest&) = delete;
- ItemParallelJobTest& operator=(const ItemParallelJobTest&) = delete;
-
- base::Semaphore* parallel_job_semaphore() { return &parallel_job_semaphore_; }
-
- private:
- base::Semaphore parallel_job_semaphore_;
-};
-
-namespace {
-
-class SimpleTask : public ItemParallelJob::Task {
- public:
- SimpleTask(Isolate* isolate, bool* did_run)
- : ItemParallelJob::Task(isolate), did_run_(did_run) {}
-
- void RunInParallel(Runner runner) override {
- ItemParallelJob::Item* item = nullptr;
- while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
- item->MarkFinished();
- }
- *did_run_ = true;
- }
-
- private:
- bool* did_run_;
-};
-
-// A simple work item which sets |was_processed| to true, if non-null, when it
-// is processed.
-class SimpleItem : public ItemParallelJob::Item {
- public:
- explicit SimpleItem(bool* was_processed = nullptr)
- : ItemParallelJob::Item(), was_processed_(was_processed) {}
- void Process() {
- if (was_processed_) *was_processed_ = true;
- }
-
- private:
- bool* was_processed_;
-};
-
-class EagerTask : public ItemParallelJob::Task {
- public:
- explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
-
- void RunInParallel(Runner runner) override {
- SimpleItem* item = nullptr;
- while ((item = GetItem<SimpleItem>()) != nullptr) {
- item->Process();
- item->MarkFinished();
- }
- }
-};
-
-// A OneShotBarrier is meant to be passed to |counter| users. Users should
-// either Signal() or Wait() when done (based on whether they want to return
-// immediately or wait until others are also done).
-class OneShotBarrier {
- public:
- explicit OneShotBarrier(size_t counter) : counter_(counter) {
- DCHECK_GE(counter_, 0);
- }
-
- void Wait() {
- DCHECK_NE(counter_, 0);
- mutex_.Lock();
- counter_--;
- if (counter_ == 0) {
- condition_.NotifyAll();
- } else {
- while (counter_ > 0) {
- condition_.Wait(&mutex_);
- }
- }
- mutex_.Unlock();
- }
-
- void Signal() {
- mutex_.Lock();
- counter_--;
- if (counter_ == 0) {
- condition_.NotifyAll();
- }
- mutex_.Unlock();
- }
-
- private:
- base::Mutex mutex_;
- base::ConditionVariable condition_;
- size_t counter_;
-};
-
-// A task that only processes a single item. Signals |barrier| when done; if
-// |wait_when_done|, will blocks until all other tasks have signaled |barrier|.
-// If |did_process_an_item| is non-null, will set it to true if it does process
-// an item. Otherwise, it will expect to get an item to process (and will report
-// a failure if it doesn't).
-class TaskProcessingOneItem : public ItemParallelJob::Task {
- public:
- TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier,
- bool wait_when_done,
- bool* did_process_an_item = nullptr)
- : ItemParallelJob::Task(isolate),
- barrier_(barrier),
- wait_when_done_(wait_when_done),
- did_process_an_item_(did_process_an_item) {}
-
- void RunInParallel(Runner runner) override {
- SimpleItem* item = GetItem<SimpleItem>();
-
- if (did_process_an_item_) {
- *did_process_an_item_ = item != nullptr;
- } else {
- EXPECT_NE(nullptr, item);
- }
-
- if (item) {
- item->Process();
- item->MarkFinished();
- }
-
- if (wait_when_done_) {
- barrier_->Wait();
- } else {
- barrier_->Signal();
- }
- }
-
- private:
- OneShotBarrier* barrier_;
- bool wait_when_done_;
- bool* did_process_an_item_;
-};
-
-class TaskForDifferentItems;
-
-class BaseItem : public ItemParallelJob::Item {
- public:
- ~BaseItem() override = default;
- virtual void ProcessItem(TaskForDifferentItems* task) = 0;
-};
-
-class TaskForDifferentItems : public ItemParallelJob::Task {
- public:
- explicit TaskForDifferentItems(Isolate* isolate, bool* processed_a,
- bool* processed_b)
- : ItemParallelJob::Task(isolate),
- processed_a_(processed_a),
- processed_b_(processed_b) {}
- ~TaskForDifferentItems() override = default;
-
- void RunInParallel(Runner runner) override {
- BaseItem* item = nullptr;
- while ((item = GetItem<BaseItem>()) != nullptr) {
- item->ProcessItem(this);
- item->MarkFinished();
- }
- }
-
- void ProcessA() { *processed_a_ = true; }
- void ProcessB() { *processed_b_ = true; }
-
- private:
- bool* processed_a_;
- bool* processed_b_;
-};
-
-class ItemA : public BaseItem {
- public:
- ~ItemA() override = default;
- void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
-};
-
-class ItemB : public BaseItem {
- public:
- ~ItemB() override = default;
- void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
-};
-
-} // namespace
-
-// ItemParallelJob runs tasks even without work items (as requested tasks may be
-// responsible for post-processing).
-TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
- bool did_run = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new SimpleTask(i_isolate(), &did_run));
-
- job.Run();
- EXPECT_TRUE(did_run);
-}
-
-TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
- bool did_run = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new SimpleTask(i_isolate(), &did_run));
-
- job.AddItem(new ItemParallelJob::Item);
-
- job.Run();
- EXPECT_TRUE(did_run);
-}
-
-TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
- const int kNumTasks = 128;
- const int kNumItems = kNumTasks - 4;
-
- TaskProcessingOneItem* tasks[kNumTasks] = {};
- bool did_process_an_item[kNumTasks] = {};
-
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
-
- // The barrier ensures that all tasks run. But only the first kNumItems tasks
- // should be assigned an item to execute.
- OneShotBarrier barrier(kNumTasks);
- for (int i = 0; i < kNumTasks; i++) {
- // Block the main thread when done to prevent it from returning control to
- // the job (which could cancel tasks that have yet to be scheduled).
- const bool wait_when_done = i == 0;
- tasks[i] = new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done,
- &did_process_an_item[i]);
- job.AddTask(tasks[i]);
- }
-
- for (int i = 0; i < kNumItems; i++) {
- job.AddItem(new SimpleItem);
- }
-
- job.Run();
-
- for (int i = 0; i < kNumTasks; i++) {
- // Only the first kNumItems tasks should have been assigned a work item.
- EXPECT_EQ(i < kNumItems, did_process_an_item[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
- const int kItems = 111;
- bool was_processed[kItems] = {};
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new EagerTask(i_isolate()));
- for (int i = 0; i < kItems; i++) {
- job.AddItem(new SimpleItem(&was_processed[i]));
- }
- job.Run();
- for (int i = 0; i < kItems; i++) {
- EXPECT_TRUE(was_processed[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
- const int kItemsAndTasks = 256;
- bool was_processed[kItemsAndTasks] = {};
- OneShotBarrier barrier(kItemsAndTasks);
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- for (int i = 0; i < kItemsAndTasks; i++) {
- job.AddItem(new SimpleItem(&was_processed[i]));
-
- // Block the main thread when done to prevent it from returning control to
- // the job (which could cancel tasks that have yet to be scheduled).
- const bool wait_when_done = i == 0;
- job.AddTask(
- new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
- }
- job.Run();
- for (int i = 0; i < kItemsAndTasks; i++) {
- EXPECT_TRUE(was_processed[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, DifferentItems) {
- bool item_a = false;
- bool item_b = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddItem(new ItemA());
- job.AddItem(new ItemB());
- job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
- job.Run();
- EXPECT_TRUE(item_a);
- EXPECT_TRUE(item_b);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 919578f2fbb..92b5eef8dce 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -170,8 +170,11 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start());
epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted();
- heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kTesting);
+ {
+ UnparkedScope scope(&lh);
+ heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kTesting);
+ }
epilogue[1].RequestStop();
epilogue[2].RequestStop();
thread1->Join();
diff --git a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
index 848def9e218..f58569eb107 100644
--- a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
@@ -384,14 +384,21 @@ TEST_F(UnifiedHeapSnapshotTest, MergedWrapperNode) {
// GCedWithJSRef is merged into MergedObject, replacing its name.
"NextObject" // NOLINT
}));
+ const size_t js_size = Utils::OpenHandle(*wrapper_object)->Size();
+#if CPPGC_SUPPORTS_OBJECT_NAMES
const size_t cpp_size =
cppgc::internal::HeapObjectHeader::FromPayload(gc_w_js_ref.Get())
.GetSize();
- const size_t js_size = Utils::OpenHandle(*wrapper_object)->Size();
ForEachEntryWithName(snapshot, GetExpectedName<GCedWithJSRef>(),
[cpp_size, js_size](const HeapEntry& entry) {
EXPECT_EQ(cpp_size + js_size, entry.self_size());
});
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ ForEachEntryWithName(snapshot, GetExpectedName<GCedWithJSRef>(),
+ [js_size](const HeapEntry& entry) {
+ EXPECT_EQ(js_size, entry.self_size());
+ });
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
}
namespace {
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index 597cbcf2cf2..404cf2e1a0e 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -6,6 +6,7 @@
#include "include/cppgc/garbage-collected.h"
#include "include/cppgc/persistent.h"
#include "include/cppgc/platform.h"
+#include "include/cppgc/testing.h"
#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "src/api/api-inl.h"
@@ -140,6 +141,7 @@ TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
cpp_heap.AsBase().sweeper().FinishIfRunning();
EXPECT_TRUE(weak_holder);
}
+ USE(object);
{
js_heap.SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
@@ -149,5 +151,41 @@ TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
}
}
+TEST_F(UnifiedHeapDetachedTest, StandAloneCppGC) {
+ // Test ensures that stand-alone C++ GC are possible when using CppHeap. This
+ // works even in the presence of wrappables using TracedReference as long
+ // as the reference is empty.
+ auto heap = v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()});
+ auto* object =
+ cppgc::MakeGarbageCollected<Wrappable>(heap->GetAllocationHandle());
+ cppgc::WeakPersistent<Wrappable> weak_holder{object};
+
+ heap->EnableDetachedGarbageCollectionsForTesting();
+ {
+ heap->CollectGarbageForTesting(
+ cppgc::EmbedderStackState::kMayContainHeapPointers);
+ EXPECT_TRUE(weak_holder);
+ }
+ USE(object);
+ {
+ heap->CollectGarbageForTesting(cppgc::EmbedderStackState::kNoHeapPointers);
+ EXPECT_FALSE(weak_holder);
+ }
+}
+
+TEST_F(UnifiedHeapDetachedTest, StandaloneTestingHeap) {
+ // Perform garbage collection through the StandaloneTestingHeap API.
+ auto cpp_heap = v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()});
+ cpp_heap->EnableDetachedGarbageCollectionsForTesting();
+ cppgc::testing::StandaloneTestingHeap heap(cpp_heap->GetHeapHandle());
+ heap.StartGarbageCollection();
+ heap.PerformMarkingStep(cppgc::EmbedderStackState::kNoHeapPointers);
+ heap.FinalizeGarbageCollection(cppgc::EmbedderStackState::kNoHeapPointers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index d2beba0fbcb..e027f12b78a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <limits>
+#include "src/interpreter/bytecode-array-builder.h"
-#include "src/init/v8.h"
+#include <limits>
#include "src/ast/scopes.h"
-#include "src/interpreter/bytecode-array-builder.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index ea60664bea8..88e87f7e941 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
-#include "src/interpreter/bytecode-array-iterator.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 735ecf4d2f7..619d270452d 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -15,7 +15,6 @@
using ::testing::_;
using ::testing::Eq;
-using v8::internal::compiler::Node;
namespace c = v8::internal::compiler;
@@ -53,10 +52,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
}
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
+Matcher<c::Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<c::LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
- LoadSensitivity needs_poisoning) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher, LoadSensitivity needs_poisoning) {
CHECK_NE(LoadSensitivity::kUnsafe, needs_poisoning);
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
if (poisoning_level() == PoisoningMitigationLevel::kPoisonCriticalOnly &&
@@ -67,31 +66,35 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
const Matcher<c::LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher) {
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
index_matcher, _, _);
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
+Matcher<c::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher,
+ const Matcher<c::Node*>& value_matcher) {
return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
value_matcher, _, _);
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
- const Matcher<Node*>& value_matcher) {
+Matcher<c::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
+ const Matcher<c::Node*>& value_matcher) {
return kSystemPointerSize == 8
? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
: IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset, LoadSensitivity needs_poisoning) {
return IsLoad(
@@ -103,7 +106,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
needs_poisoning);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
int offset, LoadSensitivity needs_poisoning) {
return IsLoad(
@@ -115,7 +118,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
needs_poisoning);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -136,7 +139,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[2];
+ Matcher<c::Node*> bytes[2];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
@@ -151,7 +154,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -172,7 +175,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[2];
+ Matcher<c::Node*> bytes[2];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
@@ -187,7 +190,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -208,7 +211,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[4];
+ Matcher<c::Node*> bytes[4];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
@@ -228,7 +231,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -249,7 +252,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[4];
+ Matcher<c::Node*> bytes[4];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
@@ -269,7 +272,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
switch (operand_size) {
@@ -285,7 +288,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
return nullptr;
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
switch (operand_size) {
@@ -301,10 +304,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
return nullptr;
}
-Matcher<compiler::Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
int offset, OperandSize operand_size) {
- Matcher<compiler::Node*> reg_operand = IsChangeInt32ToIntPtr(
+ Matcher<c::Node*> reg_operand = IsChangeInt32ToIntPtr(
IsSignedOperand(offset, operand_size, LoadSensitivity::kSafe));
return IsBitcastWordToTagged(IsLoad(
MachineType::Pointer(), c::IsLoadParentFramePointer(),
@@ -409,36 +412,35 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
{
TNode<IntPtrT> index = m.IntPtrConstant(2);
TNode<Object> load_constant = m.LoadConstantPoolEntry(index);
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag),
- LoadSensitivity::kCritical));
+ Matcher<c::Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag),
+ LoadSensitivity::kCritical));
}
{
- Node* index = m.UntypedParameter(2);
+ c::Node* index = m.UntypedParameter(2);
TNode<Object> load_constant =
m.LoadConstantPoolEntry(m.ReinterpretCast<IntPtrT>(index));
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(
- MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrAdd(
- c::IsIntPtrConstant(FixedArray::kHeaderSize -
- kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
- LoadSensitivity::kCritical));
+ Matcher<c::Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(
+ MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrAdd(
+ c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
+ LoadSensitivity::kCritical));
}
}
}
@@ -488,19 +490,19 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
m.Int32Constant(2));
TNode<Context> context = m.ReinterpretCast<Context>(m.Int32Constant(4));
- Matcher<Node*> function_table = c::IsExternalConstant(
+ Matcher<c::Node*> function_table = c::IsExternalConstant(
ExternalReference::runtime_function_table_address_for_unittests(
isolate()));
- Matcher<Node*> function =
+ Matcher<c::Node*> function =
c::IsIntPtrAdd(function_table,
c::IsChangeUint32ToWord(c::IsInt32Mul(
Eq(function_id),
c::IsInt32Constant(sizeof(Runtime::Function)))));
- Matcher<Node*> function_entry =
+ Matcher<c::Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
- Node* call_runtime =
+ c::Node* call_runtime =
m.CallRuntimeN(function_id, context, registers, result_size);
EXPECT_THAT(call_runtime,
c::IsCall(_, c::IsHeapConstant(builtin.code()),
@@ -521,30 +523,28 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
// Feedback vector is a phi node with two inputs. One of them is loading the
// feedback vector and the other is undefined constant (when feedback
// vectors aren't allocated). Find the input that loads feedback vector.
- CHECK_EQ(static_cast<Node*>(feedback_vector)->opcode(),
+ CHECK_EQ(static_cast<c::Node*>(feedback_vector)->opcode(),
i::compiler::IrOpcode::kPhi);
- Node* value0 =
+ c::Node* value0 =
i::compiler::NodeProperties::GetValueInput(feedback_vector, 0);
- Node* value1 =
+ c::Node* value1 =
i::compiler::NodeProperties::GetValueInput(feedback_vector, 1);
- Node* load_feedback_vector = value0;
+ c::Node* load_feedback_vector = value0;
if (value0->opcode() == i::compiler::IrOpcode::kHeapConstant) {
load_feedback_vector = value1;
}
- Matcher<Node*> load_function_matcher = IsBitcastWordToTagged(
+ Matcher<c::Node*> load_function_matcher = IsBitcastWordToTagged(
m.IsLoad(MachineType::Pointer(), c::IsLoadParentFramePointer(),
c::IsIntPtrConstant(Register::function_closure().ToOperand() *
kSystemPointerSize)));
- Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_feedback_vector,
- m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ Matcher<c::Node*> load_vector_cell_matcher = m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
+ EXPECT_THAT(load_feedback_vector,
+ m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 828af4ade43..c2539d8a281 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -57,7 +57,8 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
- Matcher<Node*> IsWordNot(const Matcher<Node*>& value_matcher);
+ Matcher<compiler::Node*> IsWordNot(
+ const Matcher<compiler::Node*>& value_matcher);
Matcher<compiler::Node*> IsUnsignedByteOperand(
int offset, LoadSensitivity needs_poisoning);
diff --git a/deps/v8/test/unittests/numbers/conversions-unittest.cc b/deps/v8/test/unittests/numbers/conversions-unittest.cc
index e0c1c55aae0..43b761ac67a 100644
--- a/deps/v8/test/unittests/numbers/conversions-unittest.cc
+++ b/deps/v8/test/unittests/numbers/conversions-unittest.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
-
#include "src/numbers/conversions.h"
+
+#include "src/codegen/source-position.h"
+#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/objects/object-unittest.cc b/deps/v8/test/unittests/objects/object-unittest.cc
index eb666ebca80..d959c3ff086 100644
--- a/deps/v8/test/unittests/objects/object-unittest.cc
+++ b/deps/v8/test/unittests/objects/object-unittest.cc
@@ -95,11 +95,7 @@ TEST(Object, StructListOrder) {
<< " vs. current = " << current_type; \
prev = current;
- // Only test the _BASE portion (the hand-coded part). Note that the values are
- // not necessarily consecutive because some Structs that need special
- // handling, such as those that have multiple Map instances associated, are
- // omitted from this list.
- STRUCT_LIST_GENERATOR_BASE(STRUCT_LIST_ADAPTER, TEST_STRUCT)
+ STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, TEST_STRUCT)
#undef TEST_STRUCT
}
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index afefdc1f45e..8cbce76b76b 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -12,12 +12,16 @@
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-result.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace {
@@ -2043,6 +2047,7 @@ class ValueSerializerTestWithSharedArrayBufferClone
Local<SharedArrayBuffer> NewSharedArrayBuffer(void* data, size_t byte_length,
bool is_wasm_memory) {
+#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory) {
// TODO(titzer): there is no way to create Wasm memory backing stores
// through the API, or to create a shared array buffer whose backing
@@ -2057,17 +2062,19 @@ class ValueSerializerTestWithSharedArrayBufferClone
i_isolate->factory()->NewJSSharedArrayBuffer(
std::move(backing_store));
return Utils::ToLocalShared(buffer);
- } else {
- std::unique_ptr<v8::BackingStore> backing_store =
- SharedArrayBuffer::NewBackingStore(
- data, byte_length,
- [](void*, size_t, void*) {
- // Leak the buffer as it has the
- // lifetime of the test.
- },
- nullptr);
- return SharedArrayBuffer::New(isolate(), std::move(backing_store));
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CHECK(!is_wasm_memory);
+ std::unique_ptr<v8::BackingStore> backing_store =
+ SharedArrayBuffer::NewBackingStore(
+ data, byte_length,
+ [](void*, size_t, void*) {
+ // Leak the buffer as it has the
+ // lifetime of the test.
+ },
+ nullptr);
+ return SharedArrayBuffer::New(isolate(), std::move(backing_store));
}
static void SetUpTestCase() {
@@ -2173,6 +2180,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
ExpectScriptTrue("new Uint8Array(result.a).toString() === '0,1,128,255'");
}
+#if V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
RoundTripWebAssemblyMemory) {
bool flag_was_enabled = i::FLAG_experimental_wasm_threads;
@@ -2205,6 +2213,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
i::FLAG_experimental_wasm_threads = flag_was_enabled;
}
+#endif // V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTest, UnsupportedHostObject) {
InvalidEncodeTest("new ExampleHostObject()");
diff --git a/deps/v8/test/unittests/objects/backing-store-unittest.cc b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
index b31669a79c2..e1e70f5f8d4 100644
--- a/deps/v8/test/unittests/objects/backing-store-unittest.cc
+++ b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects/backing-store.h"
#include "src/base/platform/platform.h"
+#include "src/objects/backing-store.h"
#include "test/unittests/test-utils.h"
-
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 124d3bb1b6b..5dff8b6b876 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -68,6 +68,9 @@ constexpr size_t kMaxByteSizedLeb128 = 127;
using F = std::pair<ValueType, bool>;
+// Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
+using MakeSig = FixedSizeSignature<ValueType>;
+
enum MemoryType { kMemory32, kMemory64 };
// A helper for tests that require a module environment for functions,
@@ -1303,7 +1306,7 @@ TEST_F(FunctionBodyDecoderTest, TypeConversions) {
TestUnop(kExprF32ConvertF64, kWasmF32, kWasmF64);
}
-TEST_F(FunctionBodyDecoderTest, MacrosStmt) {
+TEST_F(FunctionBodyDecoderTest, MacrosVoid) {
builder.InitializeMemory();
ExpectValidates(sigs.v_i(), {WASM_LOCAL_SET(0, WASM_I32V_3(87348))});
ExpectValidates(
@@ -1743,7 +1746,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithMismatchedSigs3) {
WASM_FEATURE_SCOPE(return_call);
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1786,7 +1789,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithoutTableCrash) {
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectReturnCall) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprReturnCallIndirect};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1866,7 +1869,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs1) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1928,7 +1931,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprCallIndirect};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1937,7 +1940,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
const FunctionSig* sig = sigs.i_i();
builder.InitializeMemory();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprI32StoreMem};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1947,7 +1950,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteI8x16Shuffle) {
WASM_FEATURE_SCOPE(simd);
const FunctionSig* sig = sigs.i_i();
builder.InitializeMemory();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kSimdPrefix,
static_cast<byte>(kExprI8x16Shuffle & 0xff)};
@@ -2239,14 +2242,6 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
ExpectFailure(sigs.i_d(), code);
}
-TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
- module->origin = kAsmJsSloppyOrigin;
- builder.InitializeMemory();
-
- byte code[] = {WASM_LOCAL_GET(0), kExprMemoryGrow, 0};
- ExpectFailure(sigs.i_i(), code);
-}
-
TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
ValueType float32int32float32[] = {kWasmF32, kWasmI32, kWasmF32};
FunctionSig sig_f_if(1, 2, float32int32float32);
@@ -2939,6 +2934,11 @@ TEST_F(FunctionBodyDecoderTest, TryDelegate) {
WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
kAppendEnd,
"cannot delegate inside the catch handler of the target");
+ ExpectFailure(sigs.v_v(),
+ {WASM_TRY_OP, kExprUnwind,
+ WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
+ kAppendEnd,
+ "cannot delegate inside the catch handler of the target");
ExpectFailure(
sigs.v_v(),
{WASM_BLOCK(WASM_TRY_OP, WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 3),
@@ -3325,7 +3325,7 @@ TEST_F(FunctionBodyDecoderTest, DeclarativeElemDrop) {
}
TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i());
ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
@@ -3334,7 +3334,7 @@ TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
}
TEST_F(FunctionBodyDecoderTest, RefFuncUndeclared) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i(), false);
WASM_FEATURE_SCOPE(reftypes);
@@ -3355,7 +3355,7 @@ TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
}
TEST_F(FunctionBodyDecoderTest, TableCopy) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
ExpectValidates(sigs.v_v(),
{WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
@@ -4269,17 +4269,18 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
HeapType::Representation func_heap_2 =
static_cast<HeapType::Representation>(builder.AddSignature(sigs.i_v()));
- // Passing/failing tests due to static subtyping.
std::tuple<HeapType::Representation, HeapType::Representation, bool> tests[] =
{std::make_tuple(HeapType::kData, array_heap, true),
std::make_tuple(HeapType::kData, super_struct_heap, true),
std::make_tuple(HeapType::kFunc, func_heap_1, true),
std::make_tuple(func_heap_1, func_heap_1, true),
- std::make_tuple(func_heap_1, func_heap_2, false),
+ std::make_tuple(func_heap_1, func_heap_2, true),
std::make_tuple(super_struct_heap, sub_struct_heap, true),
- std::make_tuple(sub_struct_heap, super_struct_heap, false),
- std::make_tuple(sub_struct_heap, array_heap, false),
- std::make_tuple(HeapType::kFunc, array_heap, false)};
+ std::make_tuple(array_heap, sub_struct_heap, true),
+ std::make_tuple(super_struct_heap, func_heap_1, true),
+ std::make_tuple(HeapType::kEq, super_struct_heap, false),
+ std::make_tuple(HeapType::kAny, func_heap_1, false),
+ std::make_tuple(HeapType::kI31, array_heap, false)};
for (auto test : tests) {
HeapType from_heap = HeapType(std::get<0>(test));
@@ -4308,10 +4309,10 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
ExpectValidates(&cast_sig,
{WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
} else {
- std::string error_message = "[0] expected supertype of type " +
- std::to_string(to_heap.ref_index()) +
- ", found local.get of type " +
- test_reps[1].name();
+ std::string error_message =
+ "[0] expected subtype of (ref null func) or (ref null data), found "
+ "local.get of type " +
+ test_reps[1].name();
ExpectFailure(&test_sig,
{WASM_REF_TEST(WASM_LOCAL_GET(0),
WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
@@ -4339,20 +4340,27 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
kAppendEnd,
"ref.cast[0] expected subtype of (ref null func) or (ref null data), "
"found i32.const of type i32");
+}
+
+TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte array_type = builder.AddArray(kWasmI8, true);
+
+ ValueType types[] = {ValueType::Ref(array_type, kNonNullable)};
+ FunctionSig sig(1, 0, types);
+
+ AddLocals(ValueType::Ref(array_type, kNullable), 1);
- // Trivial type error.
- ExpectFailure(
- sigs.v_v(),
- {WASM_REF_TEST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
- kAppendEnd,
- "ref.test[0] expected subtype of (ref null func) or (ref null data), "
- "found i32.const of type i32");
ExpectFailure(
- sigs.v_v(),
- {WASM_REF_CAST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
- kAppendEnd,
- "ref.cast[0] expected subtype of (ref null func) or (ref null data), "
- "found i32.const of type i32");
+ &sig,
+ {WASM_LOCAL_TEE(0, WASM_ARRAY_NEW_DEFAULT(array_type, WASM_I32V(5),
+ WASM_RTT_CANON(array_type)))},
+ kAppendEnd, "expected (ref 0), got (ref null 0)");
}
// This tests that num_locals_ in decoder remains consistent, even if we fail
@@ -4365,6 +4373,16 @@ TEST_F(FunctionBodyDecoderTest, Regress_1154439) {
ExpectFailure(sigs.v_v(), {}, kAppendEnd, "local count too large");
}
+TEST_F(FunctionBodyDecoderTest, DropOnEmptyStack) {
+ // Valid code:
+ ExpectValidates(sigs.v_v(), {kExprI32Const, 1, kExprDrop}, kAppendEnd);
+ // Invalid code (dropping from empty stack):
+ ExpectFailure(sigs.v_v(), {kExprDrop}, kAppendEnd,
+ "not enough arguments on the stack for drop");
+ // Valid code (dropping from empty stack in unreachable code):
+ ExpectValidates(sigs.v_v(), {kExprUnreachable, kExprDrop}, kAppendEnd);
+}
+
class BranchTableIteratorTest : public TestWithZone {
public:
BranchTableIteratorTest() : TestWithZone() {}
@@ -5000,6 +5018,19 @@ TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, MemorySize) {
{WASM_MEMORY_SIZE, kExprI64Eqz, kExprDrop});
}
+TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, MemoryGrow) {
+ builder.InitializeMemory(GetParam());
+ // memory.grow is i32->i32 memory32.
+ Validate(!is_memory64(), sigs.i_i(), {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ // memory.grow is i64->i64 memory32.
+ Validate(is_memory64(), sigs.l_l(), {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ // any other combination always fails.
+ auto sig_l_i = MakeSig::Returns(kWasmI64).Params(kWasmI32);
+ ExpectFailure(&sig_l_i, {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ auto sig_i_l = MakeSig::Returns(kWasmI32).Params(kWasmI64);
+ ExpectFailure(&sig_i_l, {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+}
+
#undef B1
#undef B2
#undef B3
diff --git a/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
new file mode 100644
index 00000000000..84f5908768f
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
@@ -0,0 +1,41 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#if V8_TARGET_ARCH_IA32
+#include "src/execution/ia32/frame-constants-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/execution/x64/frame-constants-x64.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/execution/mips/frame-constants-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/execution/arm/frame-constants-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/execution/arm64/frame-constants-arm64.h"
+#elif V8_TARGET_ARCH_S390X
+#include "src/execution/s390/frame-constants-s390.h"
+#elif V8_TARGET_ARCH_PPC64
+#include "src/execution/ppc/frame-constants-ppc.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/execution/riscv64/frame-constants-riscv64.h"
+#endif
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The registers used by Liftoff and the registers spilled by the
+// WasmDebugBreak builtin should match.
+STATIC_ASSERT(kLiftoffAssemblerGpCacheRegs ==
+ WasmDebugBreakFrameConstants::kPushedGpRegs);
+
+STATIC_ASSERT(kLiftoffAssemblerFpCacheRegs ==
+ WasmDebugBreakFrameConstants::kPushedFpRegs);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
index edd12b022aa..1109200a9aa 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
@@ -4,6 +4,7 @@
#include "src/objects/objects-inl.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "test/common/wasm/wasm-macro-gen.h"
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index f721dc33d3f..3a9fec0c994 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/test-utils.h"
+#include "src/wasm/module-decoder.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
using testing::HasSubstr;
@@ -495,7 +496,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
1) // mutable
};
EXPECT_FAILURE_WITH_MSG(no_initializer_no_end,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte no_initializer[] = {
SECTION(Global, //--
@@ -505,7 +506,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
kExprEnd) // --
};
EXPECT_FAILURE_WITH_MSG(no_initializer,
- "Found 'end' in global initalizer, but no "
+ "Found 'end' in initializer expression, but no "
"expressions were found on the stack");
static const byte too_many_initializers_no_end[] = {
@@ -517,7 +518,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(43)) // another value is too much
};
EXPECT_FAILURE_WITH_MSG(too_many_initializers_no_end,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte too_many_initializers[] = {
SECTION(Global, // --
@@ -528,8 +529,8 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(43), // another value is too much
kExprEnd)};
EXPECT_FAILURE_WITH_MSG(too_many_initializers,
- "Found 'end' in global initalizer, but more than one "
- "expressions were found on the stack");
+ "Found 'end' in initializer expression, but more than"
+ " one expressions were found on the stack");
static const byte missing_end_opcode[] = {
SECTION(Global, // --
@@ -539,7 +540,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(42)) // init value
};
EXPECT_FAILURE_WITH_MSG(missing_end_opcode,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte referencing_out_of_bounds_global[] = {
SECTION(Global, ENTRY_COUNT(1), // --
@@ -1971,6 +1972,24 @@ TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
EXPECT_EQ(ValueType::Ref(0, kNullable), result.value()->tables[0].type);
}
+TEST_F(WasmModuleVerifyTest, NullableTableIllegalInitializer) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v), // type section
+ ONE_EMPTY_FUNCTION(0), // function section
+ SECTION(Table, // table section
+ ENTRY_COUNT(1), // 1 table
+ kOptRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kExprRefFunc, 0, kExprEnd)}; // table 0: initializer
+
+ EXPECT_FAILURE_WITH_MSG(
+ data,
+ "section was shorter than expected size (8 bytes expected, 5 decoded)");
+}
+
TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
@@ -1999,13 +2018,47 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
auto result = DecodeModule(data.data(), data.data() + data.size());
- EXPECT_NOT_OK(
- result,
- "Currently, only externref and function references are allowed "
- "as table types");
+ EXPECT_NOT_OK(result,
+ "Currently, only externref and function references are "
+ "allowed as table types");
}
}
+TEST_F(WasmModuleVerifyTest, NonNullableTable) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v), // type section
+ ONE_EMPTY_FUNCTION(0), // function section
+ SECTION(Table, // table section
+ ENTRY_COUNT(1), // 1 table
+ kRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kExprRefFunc, 0, kExprEnd), // table 0: init. expression
+ SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(ValueType::Ref(0, kNonNullable), result.value()->tables[0].type);
+}
+
+TEST_F(WasmModuleVerifyTest, NonNullableTableNoInitializer) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kI32Code)),
+ SECTION(Table, // table section
+ ENTRY_COUNT(2), // 2 tables
+ kRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kRefCode, 0, // table 1: type
+ 5, 6)}; // table 1: limits
+
+ EXPECT_FAILURE_WITH_MSG(data,
+ "invalid opcode 0x6b in initializer expression");
+}
+
TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
WASM_FEATURE_SCOPE(compilation_hints);
static const byte data[] = {
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index 9689a15eb4d..b9970cc097d 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/test-utils.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/signature.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-linkage.h"
+#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
@@ -66,6 +67,45 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
}
}
+TEST_F(WasmCallDescriptorTest, Regress_1174500) {
+ // Our test signature should have just enough params and returns to force
+ // 1 param and 1 return to be allocated as stack slots. Use FP registers to
+ // avoid interference with implicit parameters, like the Wasm Instance.
+ constexpr int kParamRegisters = arraysize(kFpParamRegisters);
+ constexpr int kParams = kParamRegisters + 1;
+ constexpr int kReturnRegisters = arraysize(kFpReturnRegisters);
+ constexpr int kReturns = kReturnRegisters + 1;
+ ValueType types[kReturns + kParams];
+ // One S128 return slot which shouldn't be padded unless the arguments area
+ // of the frame requires it.
+ for (int i = 0; i < kReturnRegisters; ++i) types[i] = kWasmF32;
+ types[kReturnRegisters] = kWasmS128;
+ // One F32 parameter slot to misalign the parameter area.
+ for (int i = 0; i < kParamRegisters; ++i) types[kReturns + i] = kWasmF32;
+ types[kReturns + kParamRegisters] = kWasmF32;
+
+ FunctionSig sig(kReturns, kParams, types);
+ compiler::CallDescriptor* desc =
+ compiler::GetWasmCallDescriptor(zone(), &sig);
+
+ // Get the location of our stack parameter slot. Skip the implicit Wasm
+ // instance parameter.
+ compiler::LinkageLocation last_param = desc->GetInputLocation(kParams + 1);
+ EXPECT_TRUE(last_param.IsCallerFrameSlot());
+ EXPECT_EQ(MachineType::Float32(), last_param.GetType());
+ EXPECT_EQ(-1, last_param.GetLocation());
+
+ // The stack return slot should be right above our last parameter, and any
+ // argument padding slots. The return slot itself should not be padded.
+ const int padding = ShouldPadArguments(1);
+ const int first_return_slot = -1 - (padding + 1);
+ compiler::LinkageLocation return_location =
+ desc->GetReturnLocation(kReturns - 1);
+ EXPECT_TRUE(return_location.IsCallerFrameSlot());
+ EXPECT_EQ(MachineType::Simd128(), return_location.GetType());
+ EXPECT_EQ(first_return_slot, return_location.GetLocation());
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/BUILD.gn b/deps/v8/test/wasm-api-tests/BUILD.gn
index 5bc48f57da3..acb3f0c27cb 100644
--- a/deps/v8/test/wasm-api-tests/BUILD.gn
+++ b/deps/v8/test/wasm-api-tests/BUILD.gn
@@ -8,6 +8,8 @@ v8_executable("wasm_api_tests") {
testonly = true
deps = [
+ "../:common_test_headers",
+ "../..:v8_internal_headers",
"../..:v8_maybe_icu",
"../..:wee8",
"//build/win:default_exe_manifest",
@@ -15,9 +17,7 @@ v8_executable("wasm_api_tests") {
"//testing/gtest",
]
- data_deps = [
- "../../tools:v8_testrunner",
- ]
+ data_deps = [ "../../tools:v8_testrunner" ]
data = [
"testcfg.py",
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-tests.status b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
index 05488c17115..6aa0f51011f 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-tests.status
+++ b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
@@ -4,10 +4,10 @@
[
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 2788ed5fce9..6a99554898a 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -14,17 +14,6 @@ META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
proposal_flags = [{
- 'name': 'reference-types',
- 'flags': ['--experimental-wasm-reftypes',
- '--no-experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
- 'name': 'bulk-memory-operations',
- 'flags': ['--experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index 287917ecdad..63fbde0778c 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-ef30002bb06bd09b91b62d3fa152d1af94b28eaf \ No newline at end of file
+50b01d97338b464df8daa56355f83011930ec678 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 9f8d54442d0..39d9b86b957 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -15,18 +15,13 @@
'prototypes': [FAIL],
- # Outdated proposals, will work after rebasing.
- 'proposals/reference-types/global/value-get-set': [FAIL],
- 'proposals/reference-types/global/constructor': [FAIL],
- 'proposals/bulk-memory-operations/global/value-get-set': [FAIL],
- 'proposals/bulk-memory-operations/global/constructor': [FAIL],
-
# These are slow, and not useful to run for the proposals:
- 'proposals/reference-types/limits': [SKIP],
- 'proposals/bulk-memory-operations/limits': [SKIP],
'proposals/js-types/limits': [SKIP],
- # TODO(wasm): Update memory limit.
- 'limits': [FAIL],
+ 'proposals/simd/limits': [SKIP],
+ 'proposals/memory64/limits': [SKIP],
+
+ # TODO(v8:11577): investigate this failure.
+ 'limits': [SKIP],
}], # ALWAYS
['arch == s390 or arch == s390x or system == aix', {
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 908ce09d4e1..c3e57ce6c55 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -8,16 +8,6 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
proposal_flags = [{
- 'name': 'reference-types',
- 'flags': ['--experimental-wasm-reftypes',
- '--wasm-staging']
- },
- {
- 'name': 'bulk-memory-operations',
- 'flags': ['--experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
@@ -62,7 +52,7 @@ class TestCase(testcase.D8TestCase):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
- return []
+ return ['--experimental-wasm-reftypes']
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index ee7a50fdd92..8390796a1a9 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-4db01ba8549a087ae9adaa8540cec2689c7dad64 \ No newline at end of file
+38fd550b9d30afab338b1902dbb78ce86500ad0f \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 38ac495bea1..17e2d00c59a 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -6,16 +6,14 @@
[ALWAYS, {
'skip-stack-guard-page': [PASS, ['((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# TODO(v8:10994): Failing spec test after update.
- 'proposals/simd/imports': [FAIL],
- 'proposals/simd/data': [FAIL],
'proposals/js-types/data': [FAIL],
- # TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
- # the bulk-memory proposal. Since we've enabled bulk-memory by default, we
- # need to update to use its testsuite.
- 'linking': [FAIL],
- 'elem': [FAIL],
- 'data': [FAIL],
+ # Missing rebase in the proposal repository.
+ 'proposals/js-types/table': [FAIL],
+ 'proposals/js-types/unreached-invalid': [FAIL],
+ 'proposals/memory64/linking': [FAIL],
+ 'proposals/memory64/table': [FAIL],
+ 'proposals/memory64/unreached-invalid': [FAIL],
# TODO(wasm): Roll newest tests into "js-types" repository.
'proposals/js-types/elem': [FAIL],
@@ -44,9 +42,7 @@
'proposals/memory64/elem': [FAIL],
'proposals/memory64/float_memory64': [FAIL],
'proposals/memory64/imports': [FAIL],
- 'proposals/memory64/load64': [FAIL],
'proposals/memory64/memory64': [FAIL],
- 'proposals/memory64/memory_grow64': [FAIL],
'proposals/memory64/memory_trap64': [FAIL],
}], # ALWAYS
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 7737ab78e38..334bc44922a 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -1388,11 +1388,11 @@ ArrayPrototypeSort(
// 3. Let len be ? ToLength(? Get(obj, "length")).
const len: Number = GetLengthProperty(obj);
- if (len < 2) return receiver;
+ if (len < 2) return obj;
const sortState: SortState = NewSortState(obj, comparefn, len);
ArrayTimSort(context, sortState);
- return receiver;
+ return obj;
}
}
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 1e86afe77d9..1910cf2c953 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -101,7 +101,7 @@ ZipReader::EntryInfo::EntryInfo(const std::string& file_name_in_zip,
is_unsafe_ = file_path_.ReferencesParent();
// We also consider that the file name is unsafe, if it's invalid UTF-8.
- base::string16 file_name_utf16;
+ std::u16string file_name_utf16;
if (!base::UTF8ToUTF16(file_name_in_zip.data(), file_name_in_zip.size(),
&file_name_utf16)) {
is_unsafe_ = true;
diff --git a/deps/v8/tools/arguments.mjs b/deps/v8/tools/arguments.mjs
index 4e607b7ee9b..1d266b7cee5 100644
--- a/deps/v8/tools/arguments.mjs
+++ b/deps/v8/tools/arguments.mjs
@@ -23,10 +23,10 @@ export class BaseArgumentsProcessor {
result() { return this.result_ }
printUsageAndExit() {
- print('Cmdline args: [options] [log-file-name]\n' +
+ console.log('Cmdline args: [options] [log-file-name]\n' +
'Default log file name is "' +
this.result_.logFileName + '".\n');
- print('Options:');
+ console.log('Options:');
for (const arg in this.argsDispatch_) {
const synonyms = [arg];
const dispatch = this.argsDispatch_[arg];
@@ -36,7 +36,7 @@ export class BaseArgumentsProcessor {
delete this.argsDispatch_[synArg];
}
}
- print(` ${synonyms.join(', ').padEnd(20)} ${dispatch[2]}`);
+ console.log(` ${synonyms.join(', ').padEnd(20)} ${dispatch[2]}`);
}
quit(2);
}
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 27e73b7ad64..a7d525f1790 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -27,33 +27,112 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Inspired by and based on:
-# http://src.chromium.org/viewvc/chrome/trunk/src/tools/bash-completion
+# https://chromium.googlesource.com/chromium/src/+/master/tools/bash-completion
# Flag completion rule for bash.
# To load in your shell, "source path/to/this/file".
-v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
+v8_source="$(realpath "$(dirname "$BASH_SOURCE")"/..)"
-_v8_flag() {
- local cur defines targets
- cur="${COMP_WORDS[COMP_CWORD]}"
- defines=$(cat $v8_source/src/flags/flag-definitions.h \
+_get_v8_flags() {
+ # The first `sed` command joins lines when a line ends with '('.
+ # See http://sed.sourceforge.net/sedfaq3.html#s3.2
+ local flags_file="$v8_source/src/flags/flag-definitions.h"
+ local defines=$( \
+ sed -e :a -e '/($/N; s/(\n\s*/(/; ta' < "$flags_file" \
| grep "^DEFINE" \
| grep -v "DEFINE_IMPLICATION" \
+ | grep -v "DEFINE_NEG_IMPLICATION" \
+ | grep -v "DEFINE_VALUE_IMPLICATION" \
| sed -e 's/_/-/g'; \
- cat $v8_source/src/flags/flag-definitions.h \
- | grep "^ V(harmony_" \
+ grep "^ V(harmony_" "$flags_file" \
| sed -e 's/^ V/DEFINE-BOOL/' \
+ | sed -e 's/_/-/g'; \
+ grep "^ V(" "$v8_source/src/wasm/wasm-feature-flags.h" \
+ | sed -e 's/^ V(/DEFINE-BOOL(experimental-wasm-/' \
| sed -e 's/_/-/g')
- targets=$(echo "$defines" \
- | sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
- echo "$defines" \
- | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
- cat $v8_source/src/d8/d8.cc \
- | grep "strcmp(argv\[i\]" \
- | sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
- COMPREPLY=($(compgen -W "$targets" -- "$cur"))
+ sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p' <<< "$defines"
+ sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p' <<< "$defines"
+}
+
+_get_d8_flags() {
+ grep "strcmp(argv\[i\]" "$v8_source/src/d8/d8.cc" \
+ | sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p'
+}
+
+_d8_flag() {
+ local targets
+ targets=$(_get_v8_flags ; _get_d8_flags)
+ COMPREPLY=($(compgen -W "$targets" -- "${COMP_WORDS[COMP_CWORD]}"))
+ return 0
+}
+
+_test_flag() {
+ local targets
+ targets=$(_get_v8_flags)
+ COMPREPLY=($(compgen -W "$targets" -- "${COMP_WORDS[COMP_CWORD]}"))
+ return 0
+}
+
+complete -F _d8_flag -f d8 v8 v8-debug
+complete -F _test_flag -f cctest unittests
+
+# Many distros set up their own GDB completion scripts. The logic below is
+# careful to wrap any such functions (with additional logic), rather than
+# overwriting them.
+# An additional complication is that tested distros dynamically load their
+# completion scripts on first use. So in order to be able to detect their
+# presence, we have to force-load them first.
+_maybe_setup_gdb_completions() {
+ # We only want to set up the wrapping once:
+ [[ -n "$_ORIGINAL_GDB_COMPLETIONS" ]] && return 0;
+
+ # This path works for Debian, Ubuntu, and Gentoo; other distros unknown.
+ # Feel free to submit patches to extend the logic.
+ local GDB_COMP
+ for GDB_COMP in "/usr/share/bash-completion/completions/gdb"; do
+ [[ -f "$GDB_COMP" ]] && source $GDB_COMP
+ done
+ _ORIGINAL_GDB_COMPLETIONS="$(complete -p gdb 2>/dev/null \
+ | sed -e 's/^.*-F \([^ ]*\).*/\1/')"
+
+ _gdb_v8_flag() {
+ local c i next
+ for (( i=1; i<$(($COMP_CWORD-1)); i++ )); do
+ c=${COMP_WORDS[$i]}
+ if [ "$c" = "-args" ] || [ "$c" = "--args" ] || [ "$c" == "--" ]; then
+ next=$(basename -- ${COMP_WORDS[$(($i+1))]})
+ if [ "$next" = "d8" ] ; then
+ _d8_flag
+ return 0
+ elif [ "$next" = "unittests" ] || [ "$next" = "cctest" ]; then
+ _test_flag
+ return 0
+ fi
+ fi
+ done
+ [[ -n "$_ORIGINAL_GDB_COMPLETIONS" ]] && $_ORIGINAL_GDB_COMPLETIONS
+ return 0
+ }
+ complete -F _gdb_v8_flag -f gdb
+}
+_maybe_setup_gdb_completions
+unset _maybe_setup_gdb_completions
+
+_get_gm_flags() {
+ "$v8_source/tools/dev/gm.py" --print-completions
+
+ # cctest ignore directory structure, it's always "cctest/filename".
+ find "$v8_source/test/cctest/" -type f -name 'test-*' | \
+ xargs basename -a -s ".cc" | \
+ while read -r item; do echo "cctest/$item/*"; done
+}
+
+_gm_flag() {
+ local targets=$(_get_gm_flags)
+ COMPREPLY=($(compgen -W "$targets" -- "${COMP_WORDS[COMP_CWORD]}"))
return 0
}
-complete -F _v8_flag -f d8
+# gm might be an alias, based on https://v8.dev/docs/build-gn#gm.
+complete -F _gm_flag gm.py gm
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index cc4260d2806..701e5bee874 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -7,12 +7,29 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<head>
<meta charset="utf-8">
- <title>V8 Runtime Stats Komparator</title>
+ <title>V8 Runtime Call Stats Komparator</title>
+ <link rel="stylesheet" type="text/css" href="system-analyzer/index.css">
<style>
body {
font-family: arial;
}
+ .panel {
+ display: none;
+ }
+
+ .loaded .panel {
+ display: block;
+ }
+
+ .panel.alwaysVisible {
+ display: inherit !important;
+ }
+
+ .error #inputs {
+ background-color: var(--error-color);
+ }
+
table {
display: table;
border-spacing: 0px;
@@ -30,12 +47,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
.inline {
display: inline-block;
- vertical-align: top;
- }
-
- h2,
- h3 {
- margin-bottom: 0px;
+ vertical-align: middle;
+ margin-right: 10px;
}
.hidden {
@@ -46,6 +59,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
display: table;
}
+ .panel-group {
+ display: grid;
+ align-content: center;
+ grid-template-columns: repeat(auto-fill, minmax(500px, 1fr));
+ grid-auto-flow: row dense;
+ grid-gap: 10px;
+ margin-top: 10px;
+ }
+
.column {
display: table-cell;
border-right: 1px black dotted;
@@ -73,11 +95,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
.list tr:nth-child(even) {
- background-color: #EFEFEF;
- }
-
- .list tr:nth-child(even).selected {
- background-color: #DDD;
+ background-color: rgba(0.5, 0.5, 0.5, 0.1);
}
.list tr.child {
@@ -100,23 +118,17 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
font-weight: bold
}
- .list tr.parent {
- background-color: #FFF;
- }
-
- .list tr.parent.selected {
- background-color: #DDD;
- }
-
+ .list tr.parent.selected,
+ .list tr:nth-child(even).selected,
tr.selected {
- background-color: #DDD;
+ background-color: rgba(0.5, 0.5, 0.5, 0.1);
}
.codeSearch {
display: block-inline;
float: right;
border-radius: 5px;
- background-color: #EEE;
+ background-color: #333;
width: 1em;
text-align: center;
}
@@ -155,10 +167,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
width: auto;
}
- .compareSelector {
- padding-bottom: 20px;
- }
-
.pageDetailTable tbody {
cursor: pointer
}
@@ -173,7 +181,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
box-shadow: -2px 10px 44px -10px #000;
border-radius: 5px;
z-index: 1;
- background-color: #FFF;
+ background-color: var(--surface-color);
display: none;
white-space: nowrap;
}
@@ -184,13 +192,14 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
text-align: right;
margin: 10px;
}
+
#popover td {
padding: 3px 0px 3px 5px;
white-space: nowrap;
}
.popoverArrow {
- background-color: #FFF;
+ background-color: var(--surface-color);
position: absolute;
width: 30px;
height: 30px;
@@ -218,14 +227,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
#popover .compare .version {
padding-left: 10px;
}
- .graph,
- .graph .content {
- width: 100%;
- }
.diff .hideDiff {
display: none;
}
+
.noDiff .hideNoDiff {
display: none;
}
@@ -233,11 +239,13 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<script src="https://www.gstatic.com/charts/loader.js"></script>
<script>
"use strict"
- google.charts.load('current', {packages: ['corechart']});
+ google.charts.load('current', {
+ packages: ['corechart']
+ });
// Did anybody say monkeypatching?
if (!NodeList.prototype.forEach) {
- NodeList.prototype.forEach = function(func) {
+ NodeList.prototype.forEach = function (func) {
for (let i = 0; i < this.length; i++) {
func(this[i]);
}
@@ -249,17 +257,18 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let selectedPage;
let baselineVersion;
let selectedEntry;
+ let sortByLabel = false;
// Marker to programatically replace the defaultData.
- let defaultData = /*default-data-start*/undefined/*default-data-end*/;
+ let defaultData = /*default-data-start*/ undefined /*default-data-end*/;
function initialize() {
// Initialize the stats table and toggle lists.
let original = $("column");
- let view = document.createElement('div');
- view.id = 'view';
+ let viewBody = $("view").querySelector('.panelBody');
+ removeAllChildren(viewBody);
let i = 0;
- versions.forEach((version) => {
+ versions.forEach((version) => {
if (!version.enabled) return;
// add column
let column = original.cloneNode(true);
@@ -292,11 +301,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
});
select.appendChild(optgroup);
});
- view.appendChild(column);
+ viewBody.appendChild(column);
i++;
});
- let oldView = $('view');
- oldView.parentNode.replaceChild(view, oldView);
let select = $('baseline');
removeAllChildren(select);
@@ -310,7 +317,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
initializeToggleList(versions.versions, $('versionSelector'));
initializeToggleList(pages.values(), $('pageSelector'));
initializeToggleList(Group.groups.values(), $('groupSelector'));
- initializeToggleContentVisibility();
}
function initializeToggleList(items, node) {
@@ -329,25 +335,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
li.appendChild(document.createTextNode(item.name));
list.appendChild(li);
});
- $('results').querySelectorAll('#results > .hidden').forEach((node) => {
- toggleCssClass(node, 'hidden', false);
- })
- }
-
- function initializeToggleContentVisibility() {
- let nodes = document.querySelectorAll('.toggleContentVisibility');
- nodes.forEach((node) => {
- let content = node.querySelector('.content');
- let header = node.querySelector('h1,h2,h3');
- if (content === undefined || header === undefined) return;
- if (header.querySelector('input') != undefined) return;
- let checkbox = document.createElement('input');
- checkbox.type = 'checkbox';
- checkbox.checked = content.className.indexOf('hidden') == -1;
- checkbox.contentNode = content;
- checkbox.addEventListener('click', handleToggleContentVisibility);
- header.insertBefore(checkbox, header.childNodes[0]);
- });
}
window.addEventListener('popstate', (event) => {
@@ -382,8 +369,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (JSON.stringify(window.history.state) === JSON.stringify(state)) return;
let params = "?";
for (let pairs of Object.entries(state)) {
- params += encodeURIComponent(pairs[0]) + "="
- + encodeURIComponent(pairs[1]) + "&";
+ params += encodeURIComponent(pairs[0]) + "=" +
+ encodeURIComponent(pairs[1]) + "&";
}
window.history.pushState(state, selection.toString(), params);
}
@@ -396,8 +383,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function showPage(firstPage) {
- let changeSelectedEntry = selectedEntry !== undefined
- && selectedEntry.page === selectedPage;
+ let changeSelectedEntry = selectedEntry !== undefined &&
+ selectedEntry.page === selectedPage;
selectedPage = firstPage;
selectedPage.sort();
showPageInColumn(firstPage, 0);
@@ -417,30 +404,42 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
pushHistoryState();
}
+ function clamp(value, min, max) {
+ if (value < min) return min;
+ if (value > max) return max;
+ return value;
+ }
+
+ function diffColorFromRatio(ratio) {
+ if (ratio > 1) {
+ // ratio > 1: #FFFFFF => #00FF00
+ const red = clamp(((ratio - 1) * 255 * 10) | 0, 0, 255);
+ const other = (255 - red).toString(16).padStart(2, '0');
+ return `#ff${other}${other}`;
+ }
+ // ratio < 1: #FF0000 => #FFFFFF
+ const green = clamp(((1 - ratio) * 255 * 10) | 0, 0, 255);
+ const other = (255 - green).toString(16).padStart(2, '0');
+ return `#${other}ff${other}`;
+ }
+
function showPageInColumn(page, columnIndex) {
page.sort();
- let showDiff = (baselineVersion === undefined && columnIndex !== 0) ||
- (baselineVersion !== undefined && page.version !== baselineVersion);
- let diffStatus = (td, a, b) => {};
+ let showDiff = columnIndex !== 0;
+ if (baselineVersion) showDiff = page.version !== baselineVersion;
+ let diffColor = (td, a, b) => { };
if (showDiff) {
- if (baselineVersion !== undefined) {
- diffStatus = (td, a, b) => {
- if (a == 0) return;
- td.style.color = a < 0 ? '#FF0000' : '#00BB00';
+ if (baselineVersion) {
+ diffColor = (td, diff, baseline) => {
+ if (diff == 0) return;
+ const ratio = (baseline + diff) / baseline;
+ td.style.color = diffColorFromRatio(ratio);
};
} else {
- diffStatus = (td, a, b) => {
- if (a == b) return;
- let color;
- let ratio = a / b;
- if (ratio > 1) {
- ratio = Math.min(Math.round((ratio - 1) * 255 * 10), 200);
- color = '#' + ratio.toString(16) + "0000";
- } else {
- ratio = Math.min(Math.round((1 - ratio) * 255 * 10), 200);
- color = '#00' + ratio.toString(16) + "00";
- }
- td.style.color = color;
+ diffColor = (td, value, reference) => {
+ if (value == reference) return;
+ const ratio = value / reference;
+ td.style.color = diffColorFromRatio(ratio);
}
}
}
@@ -476,31 +475,31 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
td(tr, entry.position == 0 ? '' : entry.position, 'position');
}
addCodeSearchButton(entry,
- td(tr, entry.name, 'name ' + entry.cssClass()));
+ td(tr, entry.name, 'name ' + entry.cssClass()));
- diffStatus(
+ diffColor(
td(tr, ms(entry.time), 'value time'),
entry.time, referenceEntry.time);
- diffStatus(
+ diffColor(
td(tr, percent(entry.timePercent), 'value time'),
entry.time, referenceEntry.time);
- diffStatus(
+ diffColor(
td(tr, count(entry.count), 'value count'),
entry.count, referenceEntry.count);
- } else if (baselineVersion !== undefined && referenceEntry
- && page.version !== baselineVersion) {
+ } else if (baselineVersion !== undefined && referenceEntry &&
+ page.version !== baselineVersion) {
// Show comparison of entry that does not exist on the current page.
tr.entry = new Entry(0, referenceEntry.name);
tr.entry.page = page;
td(tr, '-', 'position');
td(tr, referenceEntry.name, 'name');
- diffStatus(
+ diffColor(
td(tr, ms(referenceEntry.time), 'value time'),
referenceEntry.time, 0);
- diffStatus(
+ diffColor(
td(tr, percent(referenceEntry.timePercent), 'value time'),
referenceEntry.timePercent, 0);
- diffStatus(
+ diffColor(
td(tr, count(referenceEntry.count), 'value count'),
referenceEntry.count, 0);
} else {
@@ -549,7 +548,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let childNodes = $('column_0').querySelector('.list tbody').childNodes;
for (let i = 0; i < childNodes.length; i++) {
if (childNodes[i].entry !== undefined &&
- childNodes[i].entry.name == entry.name) {
+ childNodes[i].entry.name == entry.name) {
rowIndex = i;
break;
}
@@ -588,10 +587,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function showVersionDetails(entry) {
let table, tbody, entries;
- table = $('detailView').querySelector('.versionDetailTable');
+ table = $('versionDetails').querySelector('.versionDetailTable');
tbody = document.createElement('tbody');
if (entry !== undefined) {
- $('detailView').querySelector('.versionDetail h3 span').textContent =
+ $('versionDetails').querySelector('h2 span').textContent =
entry.name + ' in ' + entry.page.name;
entries = versions.getPageVersions(entry.page).map(
(page) => {
@@ -618,7 +617,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function showPageDetails(entry) {
let table, tbody, entries;
- table = $('detailView').querySelector('.pageDetailTable');
+ table = $('pageDetail').querySelector('.pageDetailTable');
tbody = document.createElement('tbody');
if (entry === undefined) {
table.replaceChild(tbody, table.querySelector('tbody'));
@@ -626,12 +625,12 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
let version = entry.page.version;
let showDiff = version !== baselineVersion;
- $('detailView').querySelector('.pageDetail h3 span').textContent =
+ $('pageDetail').querySelector('h2 span').textContent =
version.name;
entries = version.pages.map((page) => {
- if (!page.enabled) return;
- return page.get(entry.name)
- });
+ if (!page.enabled) return;
+ return page.get(entry.name)
+ });
entries.sort((a, b) => {
let cmp = b.timePercent - a.timePercent;
if (cmp.toFixed(1) == 0) return b.time - a.time;
@@ -646,7 +645,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
td(tr, ms(pageEntry.time, showDiff), 'value time');
td(tr, percent(pageEntry.timePercent, showDiff), 'value time');
td(tr, percent(pageEntry.timePercentPerEntry, showDiff),
- 'value time hideNoDiff');
+ 'value time hideNoDiff');
td(tr, count(pageEntry.count, showDiff), 'value count');
tbody.appendChild(tr);
});
@@ -661,15 +660,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function showImpactList(page) {
- let impactView = $('detailView').querySelector('.impactView');
- impactView.querySelector('h3 span').textContent = page.version.name;
+ let impactView = $('impactView');
+ impactView.querySelector('h2 span').textContent = page.version.name;
let table = impactView.querySelector('table');
let tbody = document.createElement('tbody');
let version = page.version;
let entries = version.allEntries();
if (selectedEntry !== undefined && selectedEntry.isGroup) {
- impactView.querySelector('h3 span').textContent += " " + selectedEntry.name;
+ impactView.querySelector('h2 span').textContent += " " + selectedEntry.name;
entries = entries.filter((entry) => {
return entry.name == selectedEntry.name ||
(entry.parent && entry.parent.name == selectedEntry.name)
@@ -723,10 +722,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
// Display graphs delayed for a snappier UI.
setTimeout(() => {
- showPageVersionGraph(groups, page);
- showPageGraph(groups, page);
- showVersionGraph(groups, page)
- }, 10);
+ showPageVersionGraph(groups, page);
+ showPageGraph(groups, page);
+ showVersionGraph(groups, page);
+ }, 10);
}
function getGraphDataTable(groups, page) {
@@ -735,12 +734,16 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
groups.forEach(group => {
let column = dataTable.addColumn('number', group.name.substring(6));
dataTable.setColumnProperty(column, 'group', group);
- column = dataTable.addColumn({role: "annotation"});
+ column = dataTable.addColumn({
+ role: "annotation"
+ });
dataTable.setColumnProperty(column, 'group', group);
});
let column = dataTable.addColumn('number', 'Chart Total');
dataTable.setColumnProperty(column, 'group', page.total);
- column = dataTable.addColumn({role: "annotation"});
+ column = dataTable.addColumn({
+ role: "annotation"
+ });
dataTable.setColumnProperty(column, 'group', page.total);
return dataTable;
}
@@ -749,17 +752,17 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
class ChartRow {
static kSortFirstValueRelative(chartRow) {
- if (selectedGroup?.isTotal) return chartRow.total
+ if (selectedGroup?.isTotal) return chartRow.total;
return chartRow.data[0] / chartRow.total;
}
static kSortByFirstValue(chartRow) {
- if (selectedGroup?.isTotal) return chartRow.total
+ if (selectedGroup?.isTotal) return chartRow.total;
return chartRow.data[0];
}
constructor(linkedPage, label, sortValue_fn, data,
- excludeFromAverage=false) {
+ excludeFromAverage = false) {
this.linkedPage = linkedPage;
this.label = label;
if (!Array.isArray(data)) {
@@ -783,7 +786,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
const value = this.data[i];
let label = '';
// Only show labels for entries that are large enough..
- if (Math.abs(value / maxRowsTotal) * chartWidth > kMinLabelWidth) {
+ if (Math.abs(value / maxRowsTotal) * chartWidth > kMinLabelWidth) {
label = ms(value);
}
rowData.push(value, label);
@@ -794,11 +797,16 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return rowData;
}
}
+ const collator = new Intl.Collator('en-UK');
function setDataTableRows(dataTable, rows) {
let skippedRows = 0;
// Always sort by the selected entry (first column after the label)
- rows.sort((a,b) => b.sortValue - a.sortValue);
+ if (sortByLabel) {
+ rows.sort((a, b) => collator.compare(a.label, b.label));
+ } else {
+ rows.sort((a, b) => b.sortValue - a.sortValue);
+ }
// Aggregate row data for Average/SUM chart entry:
const aggregateData = rows[0].data.slice().fill(0);
let maxTotal = 0;
@@ -820,7 +828,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
aggregateData[i] /= rows.length;
}
const averageRow = new ChartRow(undefined, 'Average',
- ChartRow.kSortByFirstValue, aggregateData);
+ ChartRow.kSortByFirstValue, aggregateData);
dataTable.addRow(averageRow.forDataTable());
rows.forEach(chartRow => {
@@ -834,11 +842,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let vs = versions.getPageVersions(page);
// Calculate the entries for the versions
const rows = vs.map(page => new ChartRow(
- page, page.version.name, ChartRow.kSortByFirstValue,
- groups.map(group => page.getEntry(group).time),
- page.version === baselineVersion));
+ page, page.version.name, ChartRow.kSortByFirstValue,
+ groups.map(group => page.getEntry(group).time),
+ page.version === baselineVersion));
renderGraph(`Versions for ${page.name}`, groups, dataTable, rows,
- 'pageVersionGraph', true);
+ 'pageVersionGraph', true);
}
function showPageGraph(groups, page) {
@@ -849,12 +857,12 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let pages = page.version.pages.filter(page => page.enabled);
// Calculate the entries for the pages
const rows = pages.map(page => new ChartRow(
- page, page.name,
- isDiffView ?
- ChartRow.kSortByFirstValue : ChartRow.kSortFirstValueRelative,
- groups.map(group => page.getEntry(group).time)));
+ page, page.name,
+ isDiffView ?
+ ChartRow.kSortByFirstValue : ChartRow.kSortFirstValueRelative,
+ groups.map(group => page.getEntry(group).time)));
renderGraph(`Pages for ${page.version.name}`, groups, dataTable, rows,
- 'pageGraph', isDiffView ? true : 'percent');
+ 'pageGraph', isDiffView ? true : 'percent');
}
function showVersionGraph(groups, page) {
@@ -862,11 +870,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let vs = versions.versions.filter(version => version.enabled);
// Calculate the entries for the versions
const rows = vs.map((version) => new ChartRow(
- version.get(page), version.name, ChartRow.kSortByFirstValue,
- groups.map(group => version.getEntry(group).getTimeImpact()),
- version === baselineVersion));
+ version.get(page), version.name, ChartRow.kSortByFirstValue,
+ groups.map(group => version.getEntry(group).getTimeImpact()),
+ version === baselineVersion));
renderGraph('Versions Total Time over all Pages', groups, dataTable, rows,
- 'versionGraph', true);
+ 'versionGraph', true);
}
function renderGraph(title, groups, dataTable, rows, id, isStacked) {
@@ -886,24 +894,49 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
height: height,
hAxis: {
minValue: 0,
- textStyle: { fontSize: 14 }
+ textStyle: {
+ fontSize: 14
+ }
},
vAxis: {
- textStyle: { fontSize: 14 }
+ textStyle: {
+ fontSize: 14
+ }
+ },
+ tooltip: {
+ textStyle: {
+ fontSize: 14
+ }
+ },
+ annotations: {
+ textStyle: {
+ fontSize: 8
+ }
},
- tooltip: { textStyle: { fontSize: 14 }},
- annotations: { textStyle: { fontSize: 8 }},
explorer: {
actions: ['dragToZoom', 'rightClickToReset'],
maxZoomIn: 0.01
},
- legend: {position:'top', maxLines: 3, textStyle: { fontSize: 12 }},
- chartArea: {left:200, top:50 },
- colors: [...groups.map(each => each.color), /* Chart Total */ "#000000"]
+ legend: {
+ position: 'top',
+ maxLines: 3,
+ textStyle: {
+ fontSize: 12
+ }
+ },
+ chartArea: {
+ left: 200,
+ top: 50
+ },
+ colors: [
+ ...groups.map(each => each.color),
+ /* Chart Total */
+ "#000000",
+ ]
};
let parentNode = $(id);
parentNode.querySelector('h2>span, h3>span').textContent = title;
- let graphNode = parentNode.querySelector('.content');
+ let graphNode = parentNode.querySelector('.panelBody');
let chart = graphNode.chart;
if (chart === undefined) {
@@ -912,6 +945,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
google.visualization.events.removeAllListeners(chart);
}
google.visualization.events.addListener(chart, 'select', selectHandler);
+
function getChartEntry(selection) {
if (!selection) return undefined;
let column = selection.column;
@@ -923,16 +957,27 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (!page) return selectedGroup;
return page.getEntry(selectedGroup);
}
+
function selectHandler(e) {
- selectedGroup = getChartEntry(chart.getSelection()[0])
- if (!selectedGroup) return;
+ const newSelectedGroup = getChartEntry(chart.getSelection()[0]);
+ if (newSelectedGroup == selectedGroup) {
+ sortByLabel = !sortByLabel;
+ } else if (newSelectedGroup === undefined && selectedPage) {
+ sortByLabel = true;
+ return showGraphs(selectedPage);
+ } else {
+ sortByLabel = false;
+ }
+ selectedGroup = newSelectedGroup;
selectEntry(selectedGroup, true);
}
// Make our global tooltips work
google.visualization.events.addListener(chart, 'onmouseover', mouseOverHandler);
+
function mouseOverHandler(selection) {
- graphNode.entry = getChartEntry(selection);
+ const selectedGroup = getChartEntry(selection);
+ graphNode.entry = selectedGroup;
}
chart.draw(dataTable, options);
}
@@ -971,33 +1016,27 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
node('.timeVariance').textContent = '-';
node('.percent').textContent = '-';
node('.percentPerEntry').textContent = '-';
- node('.percentVariance').textContent = '-';
- node('.count').textContent = '-';
+ node('.percentVariance').textContent = '-';
+ node('.count').textContent = '-';
node('.countVariance').textContent = '-';
node('.timeImpact').textContent = '-';
node('.timePercentImpact').textContent = '-';
} else {
node('.version').textContent = entry.page.version.name;
node('.time').textContent = ms(entry._time, false);
- node('.timeVariance').textContent
- = percent(entry.timeVariancePercent, false);
+ node('.timeVariance').textContent = percent(entry.timeVariancePercent, false);
node('.percent').textContent = percent(entry.timePercent, false);
- node('.percentPerEntry').textContent
- = percent(entry.timePercentPerEntry, false);
- node('.percentVariance').textContent
- = percent(entry.timePercentVariancePercent, false);
+ node('.percentPerEntry').textContent = percent(entry.timePercentPerEntry, false);
+ node('.percentVariance').textContent = percent(entry.timePercentVariancePercent, false);
node('.count').textContent = count(entry._count, false);
- node('.countVariance').textContent
- = percent(entry.timeVariancePercent, false);
- node('.timeImpact').textContent
- = ms(entry.getTimeImpact(false), false);
- node('.timePercentImpact').textContent
- = percent(entry.getTimeImpactVariancePercent(false), false);
+ node('.countVariance').textContent = percent(entry.timeVariancePercent, false);
+ node('.timeImpact').textContent = ms(entry.getTimeImpact(false), false);
+ node('.timePercentImpact').textContent = percent(entry.getTimeImpactVariancePercent(false), false);
}
}
</script>
<script>
- "use strict"
+ "use strict"
// =========================================================================
// Helpers
function $(id) {
@@ -1053,7 +1092,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return -1;
}
- function toggleCssClass(node, cssClass, toggleState) {
+ function toggleCssClass(node, cssClass, toggleState = true) {
let index = -1;
let classes;
if (node.className != undefined) {
@@ -1095,28 +1134,45 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function percent(value, showDiff) {
return diffSign(value, 1, '%', showDiff);
}
-
</script>
<script>
- "use strict"
+ "use strict"
// =========================================================================
// EventHandlers
- function handleBodyLoad() {
+ async function handleBodyLoad() {
$('uploadInput').focus();
- if (defaultData) return handleLoadJSON(defaultData);
- if (tryLoadFromURLParams()) return;
- if (window.location.protocol !== 'file:') return tryLoadDefaultResults();
+ if (tryLoadDefaultData() || await tryLoadFromURLParams() ||
+ await tryLoadDefaultResults()) {
+ displayResultsAfterLoading();
+ }
+ }
+
+ function tryLoadDefaultData() {
+ if (!defaultData) return false;
+ handleLoadJSON(defaultData);
+ return true;
+ }
+
+ async function tryLoadFromURLParams() {
+ let params = new URLSearchParams(document.location.search);
+ let hasFile = false;
+ params.forEach(async (value, key) => {
+ if (key !== 'file') return;
+ hasFile ||= await tryLoadFile(value, true);
+ });
+ return hasFile;
}
async function tryLoadDefaultResults() {
- // Try to load a results.json file adjacent to this day.
- // The markers on the following line can be used to replace the url easily
- // with scripts.
- const url = /*results-url-start*/'results.json'/*results-url-end*/;
- tryLoadFile(url);
+ if (window.location.protocol === 'file:') return false;
+ // Try to load a results.json file adjacent to this day.
+ // The markers on the following line can be used to replace the url easily
+ // with scripts.
+ const url = /*results-url-start*/ 'results.json' /*results-url-end*/;
+ return tryLoadFile(url);
}
- async function tryLoadFile(url, append=false) {
+ async function tryLoadFile(url, append = false) {
if (!url.startsWith('http')) {
// hack to get relative urls
let location = window.location;
@@ -1126,19 +1182,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let response = await fetch(url);
if (!response.ok) return false;
let filename = url.split('/');
- filename = filename[filename.length-1];
+ filename = filename[filename.length - 1];
handleLoadText(await response.text(), append, filename);
- }
-
- async function tryLoadFromURLParams() {
- let params = new URLSearchParams(document.location.search);
- let hasFile = false;
- params.forEach((value, key) => {
- if (key !== 'file') return;
- hasFile = true;
- tryLoadFile(value, true);
- });
- return hasFile;
+ return true;
}
function handleAppendFiles() {
@@ -1146,7 +1192,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
loadFiles(files, true);
}
- function handleLoadFile() {
+ function handleLoadFiles() {
let files = document.getElementById("uploadInput").files;
loadFiles(files, false)
}
@@ -1161,20 +1207,24 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
reader.readAsText(file);
});
handleLoadText(text, append, file.name);
+ // Only the first file might clear existing data, all sequent files
+ // are always append.
+ append = true;
}
+ displayResultsAfterLoading();
+ toggleCssClass(document.body, "loaded");
}
function handleLoadText(text, append, fileName) {
if (fileName.endsWith('.json')) {
handleLoadJSON(JSON.parse(text), append, fileName);
} else if (fileName.endsWith('.csv') ||
- fileName.endsWith('.output') || fileName.endsWith('.output.txt')) {
+ fileName.endsWith('.output') || fileName.endsWith('.output.txt')) {
handleLoadCSV(text, append, fileName);
} else if (fileName.endsWith('.txt')) {
handleLoadTXT(text, append, fileName);
} else {
- alert(`Error parsing "${fileName}"`);
- console.error(e);
+ alert(`Unsupported file extension: "${fileName}"`);
}
}
@@ -1195,7 +1245,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
json = fixSingleVersionJSON(json, fileName);
let isFirstLoad = pages === undefined;
if (append && !isFirstLoad) {
- json = createUniqueVersions(json)
+ json = createUniqueVersions(json);
}
if (!append || isFirstLoad) {
pages = new Pages();
@@ -1203,7 +1253,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
} else {
Versions.fromJSON(json).forEach(e => versions.add(e))
}
- displayResultsAfterLoading(isFirstLoad)
}
function handleLoadCSV(csv, append, fileName) {
@@ -1219,25 +1268,23 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
fields.set(name, index);
});
if (fields.has('displayLabel') && fields.has('stories')) {
- handleLoadResultCSV(fields, lines, fileName)
+ handleLoadResultCSV(fields, lines);
} else if (fields.has('page_name')) {
- handleLoadClusterTelemetryCSV(fields, lines, fileName)
+ handleLoadClusterTelemetryCSV(fields, lines, fileName);
} else {
return alert("Unknown CSV format");
}
- displayResultsAfterLoading(isFirstLoad)
}
-
function csvSplit(line) {
let fields = [];
let index = 0;
while (index < line.length) {
let lastIndex = index;
if (line[lastIndex] == '"') {
- index = line.indexOf('"', lastIndex+1);
+ index = line.indexOf('"', lastIndex + 1);
if (index < 0) index = line.length;
- fields.push(line.substring(lastIndex+1, index));
+ fields.push(line.substring(lastIndex + 1, index));
// Consume ','
index++;
} else {
@@ -1251,16 +1298,25 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return fields;
}
+ // Ignore the following categories as they are aggregated values and are
+ // created by callstats.html on the fly.
+ const import_skip_categories = new Set([
+ 'V8-Only', 'V8-Only-Main-Thread', 'Total-Main-Thread', 'Blink_Total'
+ ])
+
function handleLoadClusterTelemetryCSV(fields, lines, fileName) {
const rscFields = Array.from(fields.keys())
- .filter(field => field.endsWith(':duration (ms)'))
+ .filter(field => {
+ return field.endsWith(':duration (ms)') &&
+ !import_skip_categories.has(field.split(':')[0])
+ })
.map(field => {
let name = field.split(':')[0];
return [name, fields.get(field), fields.get(`${name}:count`)];
})
const page_name_i = fields.get('page_name');
const version = versions.getOrCreate(fileName);
- for (let i=1; i<lines.length; i++) {
+ for (let i = 1; i < lines.length; i++) {
const line = csvSplit(lines[i]);
if (line.length == 0) continue;
let page_name = line[page_name_i];
@@ -1271,35 +1327,31 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
const duration = Number.parseFloat(line[duration_i]);
const count = Number.parseFloat(line[count_i]);
// Skip over entries without metrics (most likely crashes)
- if (Number.isNaN(count)|| Number.isNaN(duration)) {
+ if (Number.isNaN(count) || Number.isNaN(duration)) {
console.warn(`BROKEN ${page_name}`, lines[i])
break;
}
- pageVersion.add(new Entry(0, fieldName, duration, 0, 0, count, 0 ,0))
+ pageVersion.add(new Entry(0, fieldName, duration, 0, 0, count, 0, 0))
}
}
}
- function handleLoadResultCSV(fields, lines, fileName) {
+ function handleLoadResultCSV(fields, lines) {
const version_i = fields.get('displayLabel');
const page_i = fields.get('stories');
const category_i = fields.get('name');
const value_i = fields.get('avg');
- // Ignore the following categories as they are aggregated values and are
- // created by callstats.html on the fly.
- const skip_categories = new Set([
- 'V8-Only', 'V8-Only-Main-Thread', 'Total-Main-Thread', 'Blink_Total'])
const tempEntriesCache = new Map();
- for (let i=1; i<lines.length; i++) {
+ for (let i = 1; i < lines.length; i++) {
const line = csvSplit(lines[i]);
if (line.length == 0) continue;
const raw_category = line[category_i];
if (!raw_category.endsWith(':duration') &&
- !raw_category.endsWith(':count')) {
+ !raw_category.endsWith(':count')) {
continue;
}
let [category, type] = raw_category.split(':');
- if (skip_categories.has(category)) continue;
+ if (import_skip_categories.has(category)) continue;
const version = versions.getOrCreate(line[version_i]);
const pageVersion = version.getOrCreate(line[page_i]);
const value = Number.parseFloat(line[value_i]);
@@ -1331,7 +1383,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
tempEntries = new Map();
cache.set(pageVersion, tempEntries);
}
- let tempEntry = tempEntries.get(category);
+ let tempEntry = tempEntries.get(category);
if (tempEntry === undefined) {
tempEntry = new TempEntry(category);
tempEntries.set(category, tempEntry);
@@ -1343,7 +1395,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
const [duration, durationStddev] = this.stats(this.durations);
const [count, countStddev] = this.stats(this.durations);
return new Entry(0, this.category,
- duration, durationStddev, 0, count, countStddev, 0)
+ duration, durationStddev, 0, count, countStddev, 0)
}
stats(values) {
@@ -1363,6 +1415,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function handleLoadTXT(txt, append, fileName) {
+ fileName = window.prompt('Version name:', fileName);
let isFirstLoad = pages === undefined;
// Load raw RCS output which contains a single page
if (!append || isFirstLoad) {
@@ -1370,11 +1423,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
versions = new Versions()
}
versions.add(Version.fromTXT(fileName, txt));
- displayResultsAfterLoading(isFirstLoad);
}
- function displayResultsAfterLoading(isFirstLoad=true) {
+ function displayResultsAfterLoading() {
+ const isFirstLoad = pages === undefined;
let state = getStateFromParams();
initialize()
if (isFirstLoad && !popHistoryState(state) && selectedPage) {
@@ -1382,7 +1435,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return;
}
const page = versions.versions[0].pages[0]
- if (page == undefined) return;
+ if (page == undefined) return;
showPage(page);
showEntry(page.total);
}
@@ -1405,7 +1458,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let entries = [];
let file_data = json[file_name].pairs;
for (let name in file_data) {
- if(name != "Total" && groupNames.has(name)) continue;
+ if (name != "Total" && groupNames.has(name)) continue;
let entry = file_data[name];
let count = entry.count;
let time = entry.time;
@@ -1414,38 +1467,55 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let domain = file_name.split("/").slice(-1)[0];
result[domain] = entries;
}
- return {__proto__:null, ClusterTelemetry: result};
+ return {
+ __proto__: null,
+ ClusterTelemetry: result
+ };
}
function fixTraceImportJSON(json) {
// Fix json file that was created by converting a trace json output
if (!('telemetry-results' in json)) return json;
// { telemetry-results: { PAGE:[ { METRIC: [ COUNT TIME ], ... }, ... ]}}
- let version_data = {__proto__:null};
+ let version_data = {
+ __proto__: null
+ };
json = json["telemetry-results"];
for (let page_name in json) {
if (page_name == "placeholder") continue;
let page_data = {
- __proto__:null,
- Total: {
- duration: {average: 0, stddev: 0},
- count: {average:0, stddev: 0}
- }
- };
+ __proto__: null,
+ Total: {
+ duration: {
+ average: 0,
+ stddev: 0
+ },
+ count: {
+ average: 0,
+ stddev: 0
+ }
+ }
+ };
let page = json[page_name];
- for (let slice of page ) {
+ for (let slice of page) {
for (let metric_name in slice) {
if (metric_name == "Blink_V8") continue;
// sum up entries
if (!(metric_name in page_data)) {
page_data[metric_name] = {
- duration: {average: 0, stddev: 0},
- count: {average:0, stddev: 0}
+ duration: {
+ average: 0,
+ stddev: 0
+ },
+ count: {
+ average: 0,
+ stddev: 0
+ }
}
}
let [metric_count, metric_duration] = slice[metric_name]
let metric = page_data[metric_name];
- const kMicroToMilli = 1/1000;
+ const kMicroToMilli = 1 / 1000;
metric.duration.average += metric_duration * kMicroToMilli;
metric.count.average += metric_count;
@@ -1463,7 +1533,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function fixSingleVersionJSON(json, name) {
// Try to detect the single-version case, where we're missing the toplevel
// version object. The incoming JSON is of the form:
- // { PAGE: ... , PAGE_2: }
+ // { PAGE: ... , PAGE_2: }
// Instead of the default multi-page JSON:
// {"Version 1": { "Page 1": ..., ...}, "Version 2": {...}, ...}
// In this case insert a single "Default" version as top-level entry.
@@ -1475,48 +1545,55 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let tempName = name ? name : new Date().toISOString();
tempName = window.prompt('Enter a name for the loaded file:', tempName);
if ('count' in maybeMetrics && 'duration' in maybeMetrics) {
- return {[tempName]: json}
+ return {
+ [tempName]: json
+ }
}
// Legacy fallback where the metrics are encoded as arrays:
// { PAGE: [[metric_name, ...], [...], ]}
if (Array.isArray(maybeMetrics)) {
- return {[tempName]: json}
+ return {
+ [tempName]: json
+ }
}
return json
}
let appendIndex = 0;
+
function createUniqueVersions(json) {
// Make sure all toplevel entries are unique names and added properly
appendIndex++;
- let result = {__proto__:null}
+ let result = {
+ __proto__: null
+ }
for (let key in json) {
- result[key+"_"+appendIndex] = json[key];
+ result[key + "_" + appendIndex] = json[key];
}
return result
}
function handleCopyToClipboard(event) {
- const names =[ "Group", ...versions.versions.map(e=>e.name)];
- let result = [ names.join("\t") ];
+ const names = ["Group", ...versions.versions.map(e => e.name)];
+ let result = [names.join("\t")];
let groups = Array.from(Group.groups.values());
// Move the total group to the end.
groups.push(groups.shift())
groups.forEach(group => {
let row = [group.name];
versions.forEach(v => {
- const time = v.pages[0].get("Group-"+group.name)?._time ?? 0;
- row.push(time)
+ const time = v.pages[0].get("Group-" + group.name)?._time ?? 0;
+ row.push(time);
})
result.push(row.join("\t"));
});
result = result.join("\n");
- navigator.clipboard.writeText(result)
+ navigator.clipboard.writeText(result);
}
function handleToggleGroup(event) {
let group = event.target.parentNode.parentNode.entry;
- toggleGroup(selectedPage.get(group.name));
+ toggleGroup(selectedPage.get(group.name), 'toggle');
}
function handleSelectPage(select, event) {
@@ -1593,9 +1670,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function handleToggleVersionOrPageEnable(event) {
- let item = this.item ;
- if (item === undefined) return;
- item .enabled = this.checked;
+ let item = this.item;
+ if (item === undefined) return;
+ item.enabled = this.checked;
initialize();
let page = selectedPage;
if (page === undefined || !page.version.enabled) {
@@ -1607,11 +1684,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
showPage(page);
}
- function handleToggleContentVisibility(event) {
- let content = event.target.contentNode;
- toggleCssClass(content, 'hidden');
- }
-
function handleCodeSearch(event) {
let entry = findEntry(event);
if (entry === undefined) return;
@@ -1621,11 +1693,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
name = name.substring(4);
}
url += encodeURIComponent(name) + "+file:src/v8/src";
- window.open(url,'_blank');
+ window.open(url, '_blank');
}
</script>
<script>
- "use strict"
+ "use strict"
// =========================================================================
class Versions {
constructor() {
@@ -1640,7 +1712,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.versions.forEach((version) => {
if (!version.enabled) return;
let versionPage = version.get(page.name);
- if (versionPage !== undefined) result.push(versionPage);
+ if (versionPage !== undefined) result.push(versionPage);
});
return result;
}
@@ -1654,7 +1726,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return this.versions.find((each) => each.name == name);
}
getOrCreate(name) {
- return this.getByName(name) ?? this.add(new Version(name))
+ return this.getByName(name) ?? this.add(new Version(name));
}
forEach(f) {
this.versions.forEach(f);
@@ -1704,14 +1776,14 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
get(name) {
let index = this.indexOf(name);
if (0 <= index) return this.pages[index];
- return undefined
+ return undefined;
}
getOrCreate(name) {
return this.get(name) ??
this.add(new PageVersion(this, pages.getOrCreate(name)));
}
get length() {
- return this.pages.length
+ return this.pages.length;
}
getEntry(entry) {
if (entry === undefined) return undefined;
@@ -1758,7 +1830,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
// Otherwise return the difference to the sum of the baseline version.
let baselineValue = baselineVersion.getTotalTime(name, false);
let total = this.getTotalValue(name, '_time');
- return (total / baselineValue - 1) * 100;
+ return (total / baselineValue - 1) * 100;
}
getTotalTimeVariance(name, showDiff) {
// Calculate the overall error for a given entry name
@@ -1805,8 +1877,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
static fromTXT(name, txt) {
let version = new Version(name);
- let pageName = "RAW DATA";
- version.add(PageVersion.fromTXT(version, pageName, txt));
+ let defaultName = "RAW DATA";
+ PageVersion.fromTXT(version, defaultName, txt)
+ .forEach(each => version.add(each));
return version;
}
}
@@ -1875,23 +1948,27 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return this.version.name + ": " + this.name;
}
urlParams() {
- return { version: this.version.name, page: this.name};
+ return {
+ version: this.version.name,
+ page: this.name
+ };
}
add(entry) {
- // Ignore accidentally added Group entries.
- if (entry.name.startsWith(GroupedEntry.prefix)) {
- console.warn("Skipping accidentally added Group entry:", entry, this);
- return;
- }
let existingEntry = this.entryDict.get(entry.name);
if (existingEntry !== undefined) {
// Duplicate entries happen when multiple runs are combined into a
// single file.
existingEntry.add(entry);
- for (let group of this.groups) {
+ for (let i = 0; i < this.groups.length; i++) {
+ const group = this.groups[i];
if (group.addTimeAndCount(entry)) return;
}
} else {
+ // Ignore accidentally added Group entries.
+ if (entry.name.startsWith(GroupedEntry.prefix)) {
+ console.warn("Skipping accidentally added Group entry:", entry, this);
+ return;
+ }
entry.page = this;
this.entryDict.set(entry.name, entry);
for (let group of this.groups) {
@@ -1910,8 +1987,12 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
get length() {
return this.versions.length
}
- get name() { return this.page.name }
- get enabled() { return this.page.enabled }
+ get name() {
+ return this.page.name
+ }
+ get enabled() {
+ return this.page.enabled
+ }
forEachSorted(referencePage, func) {
// Iterate over all the entries in the order they appear on the
// reference page.
@@ -1946,7 +2027,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.groups.forEach(group => {
if (group == this.total) return;
let value = group.getTimePercentImpact() -
- this.getEntry(group).timePercent;
+ this.getEntry(group).timePercent;
sum += value * value;
});
return sum;
@@ -1974,28 +2055,43 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return page
}
- static fromTXT(version, name, txt) {
- let pageVersion = new PageVersion(version, pages.get(name));
- let lines = txt.split('\n');
- let split = / +/g
- // Skip the first two lines (HEADER and SEPARATOR)
- for (let i = 2; i < lines.length; i++) {
- let line = lines[i].trim().split(split)
- // Skip header lines
- if (lines[i].startsWith("======")) continue;
- if (lines[i+1]?.startsWith("======")) continue;
- if (line.length != 5) continue;
- let position = i-2;
- pageVersion.add(Entry.fromTXT(position, line));
+ static fromTXT(version, defaultName, txt) {
+ const kPageNameIdentifier = "== Page:";
+ const kCommentStart = "=="
+ const lines = txt.split('\n');
+ const split = / +/g
+ const result = [];
+ let pageVersion = undefined;
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ // Skip header separators
+ if (line.startsWith(kCommentStart)) {
+ // Check for page names
+ if (line.startsWith(kPageNameIdentifier)) {
+ const name = line.split(kPageNameIdentifier)[1];
+ pageVersion = new PageVersion(version, pages.get(name));
+ result.push(pageVersion);
+ }
+ }
+ // Skip header lines.
+ if (lines[i + 1]?.startsWith(kCommentStart)) continue;
+ const split_line = line.trim().split(split)
+ if (split_line.length != 5) continue;
+ if (pageVersion === undefined) {
+ pageVersion = new PageVersion(version, pages.get(defaultName));
+ result.push(pageVersion);
+ }
+ const position = i - 2;
+ pageVersion.add(Entry.fromTXT(position, split_line));
}
- return pageVersion;
+ return result;
}
}
class Entry {
constructor(position, name, time, timeVariance, timeVariancePercent,
- count, countVariance, countVariancePercent) {
+ count, countVariance, countVariancePercent) {
this.position = position;
this.name = name;
this._time = time;
@@ -2017,7 +2113,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
add(entry) {
- if (this.name != entry.name) {
+ if (this.name !== entry.name) {
console.error("Should not combine entries with different names");
return;
}
@@ -2085,13 +2181,13 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return this.page.version.getPagesByPercentImpact(this.name);
}
get isGroup() {
- return false
+ return false;
}
get timeVariance() {
- return this._timeVariance
+ return this._timeVariance;
}
get timeVariancePercent() {
- return this._timeVariancePercent
+ return this._timeVariancePercent;
}
static fromLegacyJSON(position, data) {
@@ -2102,14 +2198,21 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let time = data.duration;
let count = data.count;
return new Entry(position, name, time.average, time.stddev, 0,
- count.average, count.stddev, 0);
+ count.average, count.stddev, 0);
}
static fromTXT(position, splitLine) {
- let [name, time, timePercent, count, countPercent] = splitLine;
- time = time.split('ms')
- let timeDeviation = 0, countDeviation = 0;
- let timeDeviationPercent = 0, countDeviationPercent = 0
+ const name = splitLine[0];
+ let time = splitLine[1];
+ const msIndex = time.indexOf('m');
+ if (msIndex > 0) time = time.substring(0, msIndex);
+ const timePercent = splitLine[2];
+ const count = splitLine[3];
+ const countPercent = splitLine[4];
+ const timeDeviation = 0;
+ const countDeviation = 0;
+ const timeDeviationPercent = 0;
+ const countDeviationPercent = 0
return new Entry(position, name,
Number.parseFloat(time), timeDeviation, timeDeviationPercent,
Number.parseInt(count), countDeviation, countDeviationPercent)
@@ -2117,47 +2220,49 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
class Group {
- constructor(name, regexp, color, enabled=true, addsToTotal=true) {
+ constructor(name, regexp, color, enabled = true, addsToTotal = true) {
this.name = name;
this.regexp = regexp;
this.color = color;
this.enabled = enabled;
this.addsToTotal = addsToTotal;
}
- entry() { return new GroupedEntry(this) };
+ entry() {
+ return new GroupedEntry(this);
+ }
}
Group.groups = new Map();
- Group.add = function(name, group) {
+ Group.add = function (name, group) {
this.groups.set(name, group);
return group;
}
Group.add('total', new Group('Total', /.*Total.*/, '#BBB', true, false));
Group.add('ic', new Group('IC', /(.*IC_.*)|IC/, "#3366CC"));
Group.add('optimize-background', new Group('Optimize-Background',
- /(.*Optimize-?Background.*)/, "#702000"));
+ /.*Optimize(d?-?)(Background|Concurrent).*/, "#702000"));
Group.add('optimize', new Group('Optimize',
- /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
+ /(StackGuard|Optimize|Deoptimize|Recompile).*/, "#DC3912"));
Group.add('compile-background', new Group('Compile-Background',
- /(.*Compile-?Background.*)/, "#b08000"));
+ /(.*Compile-?Background.*)/, "#b08000"));
Group.add('compile', new Group('Compile',
- /(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
+ /(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
Group.add('parse-background',
- new Group('Parse-Background', /.*Parse-?Background.*/, "#c05000"));
+ new Group('Parse-Background', /.*Parse-?Background.*/, "#c05000"));
Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
Group.add('callback',
- new Group('Blink C++', /.*(Callback)|(Blink C\+\+).*/, "#109618"));
+ new Group('Blink C++', /.*(Callback)|(Blink C\+\+).*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
Group.add('gc-custom', new Group('GC-Custom', /GC_Custom_.*/, "#0099C6"));
Group.add('gc-background',
- new Group(
- 'GC-Background', /.*GC.*(BACKGROUND|Background).*/, "#00597c"));
+ new Group(
+ 'GC-Background', /.*GC.*(BACKGROUND|Background).*/, "#00597c"));
Group.add('gc',
- new Group('GC', /GC_.*|AllocateInTargetSpace|GC/, "#00799c"));
+ new Group('GC', /GC_.*|AllocateInTargetSpace|GC/, "#00799c"));
Group.add('javascript',
- new Group('JavaScript', /JS_Execution|JavaScript/, "#DD4477"));
+ new Group('JavaScript', /JS_Execution|JavaScript/, "#DD4477"));
Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
Group.add('blink',
- new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
+ new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
Group.add('unclassified', new Group('Unclassified', /.*/, "#000", false));
class GroupedEntry extends Entry {
@@ -2168,9 +2273,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.missingEntries = null;
this.addsToTotal = group.addsToTotal;
}
- get regexp() { return this.group.regexp }
- get color() { return this.group.color }
- get enabled() { return this.group.enabled }
+ get regexp() {
+ return this.group.regexp;
+ }
+ get color() {
+ return this.group.color;
+ }
+ get enabled() {
+ return this.group.enabled;
+ }
add(entry) {
if (!this.addTimeAndCount(entry)) return;
// TODO: sum up variance
@@ -2197,7 +2308,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
}
});
- this.missingEntries = [];
+ this.missingEntries = [];
for (let name of dummyEntryNames) {
let tmpEntry = new Entry(0, name, 0, 0, 0, 0, 0, 0);
tmpEntry.page = this.page;
@@ -2241,7 +2352,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
get timeVariancePercent() {
if (this._time == 0) return 0;
- return this.getVarianceForProperty('time') / this._time * 100
+ return this.getVarianceForProperty('time') / this._time * 100
}
get timeVariance() {
return this.getVarianceForProperty('time')
@@ -2294,199 +2405,235 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<body id="body" onmousemove="handleUpdatePopover(event)" onload="handleBodyLoad()" class="noDiff">
<h1>Runtime Stats Komparator</h1>
- <div id="results">
- <div class="inline">
- <h2>Data</h2>
- <form name="fileForm">
- <p>
- <label for="uploadInput">Load File:</label>
- <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json,.txt,.csv,.output">
+ <section id="inputs" class="panel alwaysVisible">
+ <input type="checkbox" id="inputsCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="inputsCheckbox">▼</label>
+ <h2>Input/Output</h2>
+ <div class="panelBody">
+ <form name="fileForm" class="inline">
+ <p class="inline">
+ <label for="uploadInput">Load Files:</label>
+ <input id="uploadInput" type="file" name="files" onchange="handleLoadFiles();" multiple
+ accept=".json,.txt,.csv,.output">
</p>
- <p>
+ <p class="inline">
<label for="appendInput">Append Files:</label>
- <input id="appendInput" type="file" name="files" onchange="handleAppendFiles();" multiple accept=".json,.txt,.csv,.output">
+ <input id="appendInput" type="file" name="files" onchange="handleAppendFiles();" multiple
+ accept=".json,.txt,.csv,.output">
</p>
</form>
- <p>
+ <p class="inline">
<button onclick="handleCopyToClipboard()">Copy Table to Clipboard</button>
</p>
</div>
-
- <div class="inline hidden">
- <h2>Result</h2>
- <div class="compareSelector inline">
- Compare against:&nbsp;<select id="baseline" onchange="handleSelectBaseline(this, event)"></select><br/>
- <span style="color: #060">Green</span> the selected version above performs
- better on this measurement.
- </div>
+ </section>
+
+ <section class="panel">
+ <h2>Baseline Selector</h2>
+ <div class="panel-body">
+ Compare against baseline:&nbsp;<select id="baseline" onchange="handleSelectBaseline(this, event)"></select><br />
+ <span style="color: #060">Green</span> a selected version performs
+ better than the baseline.
</div>
-
- <div id="versionSelector" class="inline toggleContentVisibility">
- <h2>Versions</h2>
- <div class="content hidden">
+ </section>
+
+ <section class="panel-group">
+ <div id="versionSelector" class="panel">
+ <input type="checkbox" checked id="versionSelectorCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="versionSelectorCheckbox">▼</label>
+ <h2>Selected Versions</h2>
+ <div class="panelBody">
<ul></ul>
</div>
</div>
- <div id="pageSelector" class="inline toggleContentVisibility">
- <h2>Pages</h2>
- <div class="content hidden">
+ <div id="pageSelector" class="panel">
+ <input type="checkbox" checked id="pageSelectorCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="pageSelectorCheckbox">▼</label>
+ <h2>Selected Pages</h2>
+ <div class="panelBody">
<ul></ul>
</div>
</div>
- <div id="groupSelector" class="inline toggleContentVisibility">
- <h2>Groups</h2>
- <div class="content hidden">
+ <div id="groupSelector" class="panel">
+ <input type="checkbox" checked id="groupSelectorCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="groupSelectorCheckbox">▼</label>
+ <h2>Selected RCS Groups</h2>
+ <div class="panelBody">
<ul></ul>
</div>
</div>
-
- <div id="view">
+ </section>
+
+ <section id="view" class="panel">
+ <input type="checkbox" id="tableViewCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="tableViewCheckbox">▼</label>
+ <h2>RCS Table</h2>
+ <div class="panelBody"></div>
+ </section>
+
+ <section class="panel-group">
+ <div id="versionDetails" class="panel">
+ <input type="checkbox" checked id="versionDetailCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="versionDetailCheckbox">▼</label>
+ <h2><span>Compare Page Versions</span></h2>
+ <div class="conten panelBody">
+ <table class="versionDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="version">Version&nbsp;</th>
+ <th class="position">Pos.&nbsp;</th>
+ <th class="value time">Time▴&nbsp;</th>
+ <th class="value time">Percent&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ </div>
</div>
- <div id="detailView" class="hidden">
- <div class="versionDetail inline toggleContentVisibility">
- <h3><span></span></h3>
- <div class="content">
- <table class="versionDetailTable" onclick="handleSelectDetailRow(this, event);">
- <thead>
- <tr>
- <th class="version">Version&nbsp;</th>
- <th class="position">Pos.&nbsp;</th>
- <th class="value time">Time▴&nbsp;</th>
- <th class="value time">Percent&nbsp;</th>
- <th class="value count">Count&nbsp;</th>
- </tr>
- </thead>
- <tbody></tbody>
- </table>
- </div>
- </div>
- <div class="pageDetail inline toggleContentVisibility">
- <h3>Page Comparison for <span></span></h3>
- <div class="content">
- <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
- <thead>
- <tr>
- <th class="page">Page&nbsp;</th>
- <th class="value time">Time&nbsp;</th>
- <th class="value time">Percent▾&nbsp;</th>
- <th class="value time hideNoDiff">%/Entry&nbsp;</th>
- <th class="value count">Count&nbsp;</th>
- </tr>
- </thead>
- <tfoot>
- <tr>
- <td class="page">Total:</td>
- <td class="value time"></td>
- <td class="value time"></td>
- <td class="value time hideNoDiff"></td>
- <td class="value count"></td>
- </tr>
- </tfoot>
- <tbody></tbody>
- </table>
- </div>
- </div>
- <div class="impactView inline toggleContentVisibility">
- <h3>Impact list for <span></span></h3>
- <div class="content">
- <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
- <thead>
- <tr>
- <th class="page">Name&nbsp;</th>
- <th class="value time">Time&nbsp;</th>
- <th class="value time">Percent▾&nbsp;</th>
- <th class="">Top Pages</th>
- </tr>
- </thead>
- <tbody></tbody>
- </table>
- </div>
+ <div id="pageDetail" class="panel">
+ <input type="checkbox" checked id="pageDetailCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="pageDetailCheckbox">▼</label>
+ <h2>Page Comparison for <span></span></h2>
+ <div class="panelBody">
+ <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="page">Page&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent▾&nbsp;</th>
+ <th class="value time hideNoDiff">%/Entry&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tfoot>
+ <tr>
+ <td class="page">Total:</td>
+ <td class="value time"></td>
+ <td class="value time"></td>
+ <td class="value time hideNoDiff"></td>
+ <td class="value count"></td>
+ </tr>
+ </tfoot>
+ <tbody></tbody>
+ </table>
</div>
</div>
- <div id="pageVersionGraph" class="graph hidden toggleContentVisibility">
- <h3><span></span></h3>
- <div class="content"></div>
- </div>
- <div id="pageGraph" class="graph hidden toggleContentVisibility">
- <h3><span></span></h3>
- <div class="content"></div>
- </div>
- <div id="versionGraph" class="graph hidden toggleContentVisibility">
- <h3><span></span></h3>
- <div class="content"></div>
- </div>
- <div id="column" class="column">
- <div class="header">
- <select class="version" onchange="handleSelectVersion(this, event);"></select>
- <select class="pageVersion" onchange="handleSelectPage(this, event);"></select>
+ <div id="impactView" class="panel">
+ <input type="checkbox" checked id="impactViewCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="impactViewCheckbox">▼</label>
+ <h2>Impact list for <span></span></h2>
+ <div class="panelBody">
+ <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="page">Name&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent▾&nbsp;</th>
+ <th class="">Top Pages</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
</div>
- <table class="list" onclick="handleSelectRow(this, event);">
- <thead>
- <tr>
- <th class="position">Pos.&nbsp;</th>
- <th class="name">Name&nbsp;</th>
- <th class="value time">Time&nbsp;</th>
- <th class="value time">Percent&nbsp;</th>
- <th class="value count">Count&nbsp;</th>
- </tr>
- </thead>
- <tbody></tbody>
- </table>
</div>
+ </section>
+
+ <section id="pageVersionGraph" class="panel">
+ <input type="checkbox" id="pageVersionGraphCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="pageVersionGraphCheckbox">▼</label>
+ <h2><span></span></h2>
+ <div class="panelBody"></div>
+ </section>
+
+ <section id="pageGraph" class="panel">
+ <input type="checkbox" id="pageGraphCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="pageGraphCheckbox">▼</label>
+ <h2><span></span></h2>
+ <div class="panelBody"></div>
+ </section>
+
+ <section id="versionGraph" class="panel">
+ <input type="checkbox" id="versionGraphCheckbox" class="panelCloserInput">
+ <label class="panelCloserLabel" for="versionGraphCheckbox">▼</label>
+ <h2><span></span></h2>
+ <div class="panelBody"></div>
+ </section>
+
+ <div id="column" class="column">
+ <div class="header">
+ <select class="version" onchange="handleSelectVersion(this, event);"></select>
+ <select class="pageVersion" onchange="handleSelectPage(this, event);"></select>
+ </div>
+ <table class="list" onclick="handleSelectRow(this, event);">
+ <thead>
+ <tr>
+ <th class="position">Pos.&nbsp;</th>
+ <th class="name">Name&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
</div>
- <div class="inline">
- <h2>Usage</h2>
- <ol>
- <li>Build chrome.</li>
- </ol>
- <h3>Telemetry benchmark</h3>
- <ol>
- <li>Run <code>v8.browsing</code> benchmarks:
- <pre>$CHROMIUM_DIR/tools/perf/run_benchmark run v8.browsing_desktop \
- --browser=exact --browser-executable=$CHROMIUM_DIR/out/release/chrome \
- --story-filter='.*2020 ' \
- --also-run-disabled-tests
- </pre>
- </li>
- <li>Install <a href="https://stedolan.github.io/jq/">jq</a>.</li>
- <li>Convert the telemetry JSON files to callstats JSON file:
- <pre>
- $V8_DIR/tools/callstats-from-telemetry.sh $CHROMIUM_DIR/tools/perf/artifacts/run_XXXX
- </pre>
- </li>
- <li>Load the generated <code>out.json</code></li>
- </ol>
- <h3>Merged CSV from results.html</h3>
- <ol>
- <li>Open a results.html page for RCS-enabled benchmarks</li>
- <li>Select "Export merged CSV" in the toolbar</li>
- <li>Load the downloading .csv file normally in callstats.html</li>
- </ol>
- <h3>Aggregated raw txt output</h3>
- <ol>
- <li>Install scipy, e.g. <code>sudo aptitude install python-scipy</code>
- <li>Check out a known working version of webpagereply:
- <pre>git -C $CHROME_DIR/third_party/webpagereplay checkout 7dbd94752d1cde5536ffc623a9e10a51721eff1d</pre>
- </li>
- <li>Run <code>callstats.py</code> with a web-page-replay archive:
- <pre>$V8_DIR/tools/callstats.py run \
- --replay-bin=$CHROME_SRC/third_party/webpagereplay/replay.py \
- --replay-wpr=$INPUT_DIR/top25.wpr \
- --js-flags="" \
- --with-chrome=$CHROME_SRC/out/Release/chrome \
- --sites-file=$INPUT_DIR/top25.json</pre>
- </li>
- <li>Move results file to a subdirectory: <code>mkdir $VERSION_DIR; mv *.txt $VERSION_DIR</code></li>
- <li>Repeat from step 1 with a different configuration (e.g. <code>--js-flags="--nolazy"</code>).</li>
- <li>Create the final results file: <code>./callstats.py json $VERSION_DIR1 $VERSION_DIR2 > result.json</code></li>
- <li>Use <code>results.json</code> on this site.</code>
- </ol>
- </div>
+ <section class="panel alwaysVisible">
+ <h2>Instructions</h2>
+ <div class="panelBody">
+ <ol>
+ <li>Build chrome.</li>
+ </ol>
+ <h3>Telemetry benchmark</h3>
+ <ol>
+ <li>Run <code>v8.browsing</code> benchmarks:
+ <pre>$CHROMIUM_DIR/tools/perf/run_benchmark run v8.browsing_desktop \
+ --browser=exact --browser-executable=$CHROMIUM_DIR/out/release/chrome \
+ --story-filter='.*2020 ' \
+ --also-run-disabled-tests
+ </pre>
+ </li>
+ <li>Install <a href="https://stedolan.github.io/jq/">jq</a>.</li>
+ <li>Convert the telemetry JSON files to callstats JSON file:
+ <pre>
+ $V8_DIR/tools/callstats-from-telemetry.sh $CHROMIUM_DIR/tools/perf/artifacts/run_XXXX
+ </pre>
+ </li>
+ <li>Load the generated <code>out.json</code></li>
+ </ol>
+ <h3>Merged CSV from results.html</h3>
+ <ol>
+ <li>Open a results.html page for RCS-enabled benchmarks</li>
+ <li>Select "Export merged CSV" in the toolbar</li>
+ <li>Load the downloading .csv file normally in callstats.html</li>
+ </ol>
+ <h3>Aggregated raw txt output</h3>
+ <ol>
+ <li>Install scipy, e.g. <code>sudo aptitude install python-scipy</code>
+ <li>Check out a known working version of webpagereply:
+ <pre>git -C $CHROME_DIR/third_party/webpagereplay checkout 7dbd94752d1cde5536ffc623a9e10a51721eff1d</pre>
+ </li>
+ <li>Run <code>callstats.py</code> with a web-page-replay archive:
+ <pre>$V8_DIR/tools/callstats.py run \
+ --replay-bin=$CHROME_SRC/third_party/webpagereplay/replay.py \
+ --replay-wpr=$INPUT_DIR/top25.wpr \
+ --js-flags="" \
+ --with-chrome=$CHROME_SRC/out/Release/chrome \
+ --sites-file=$INPUT_DIR/top25.json</pre>
+ </li>
+ <li>Move results file to a subdirectory: <code>mkdir $VERSION_DIR; mv *.txt $VERSION_DIR</code></li>
+ <li>Repeat from step 1 with a different configuration (e.g. <code>--js-flags="--nolazy"</code>).</li>
+ <li>Create the final results file: <code>./callstats.py json $VERSION_DIR1 $VERSION_DIR2 > result.json</code>
+ </li>
+ <li>Use <code>results.json</code> on this site.</code>
+ </ol>
+ </div>
+ </section>
<div id="popover">
<div class="popoverArrow"></div>
@@ -2505,30 +2652,49 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
</tr>
<tr>
<td>Time:</td>
- <td class="time"></td><td>±</td><td class="timeVariance"></td>
- <td class="compare time"></td><td class="compare"> ± </td><td class="compare timeVariance"></td>
+ <td class="time"></td>
+ <td>±</td>
+ <td class="timeVariance"></td>
+ <td class="compare time"></td>
+ <td class="compare"> ± </td>
+ <td class="compare timeVariance"></td>
</tr>
<tr>
<td>Percent:</td>
- <td class="percent"></td><td>±</td><td class="percentVariance"></td>
- <td class="compare percent"></td><td class="compare"> ± </td><td class="compare percentVariance"></td>
+ <td class="percent"></td>
+ <td>±</td>
+ <td class="percentVariance"></td>
+ <td class="compare percent"></td>
+ <td class="compare"> ± </td>
+ <td class="compare percentVariance"></td>
</tr>
<tr>
<td>Percent per Entry:</td>
- <td class="percentPerEntry"></td><td colspan=2></td>
- <td class="compare percentPerEntry"></td><td colspan=2></td>
+ <td class="percentPerEntry"></td>
+ <td colspan=2></td>
+ <td class="compare percentPerEntry"></td>
+ <td colspan=2></td>
</tr>
<tr>
<td>Count:</td>
- <td class="count"></td><td>±</td><td class="countVariance"></td>
- <td class="compare count"></td><td class="compare"> ± </td><td class="compare countVariance"></td>
+ <td class="count"></td>
+ <td>±</td>
+ <td class="countVariance"></td>
+ <td class="compare count"></td>
+ <td class="compare"> ± </td>
+ <td class="compare countVariance"></td>
</tr>
<tr>
<td>Overall Impact:</td>
- <td class="timeImpact"></td><td>±</td><td class="timePercentImpact"></td>
- <td class="compare timeImpact"></td><td class="compare"> ± </td><td class="compare timePercentImpact"></td>
+ <td class="timeImpact"></td>
+ <td>±</td>
+ <td class="timePercentImpact"></td>
+ <td class="compare timeImpact"></td>
+ <td class="compare"> ± </td>
+ <td class="compare timePercentImpact"></td>
</tr>
</table>
</div>
</body>
-</html>
+
+</html> \ No newline at end of file
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
index f981b82efce..efb1a8a6499 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
@@ -147,6 +147,7 @@ const DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS = [
const ALLOWED_RUNTIME_FUNCTIONS = new Set([
// List of allowed runtime functions. Others will be replaced with no-ops.
'ArrayBufferDetach',
+ 'CompileBaseline',
'DeoptimizeFunction',
'DeoptimizeNow',
'EnableCodeLoggingForTesting',
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
index 4f34e15c06e..00272fcd55c 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
@@ -49,7 +49,7 @@ class FunctionCallMutator extends mutator.Mutator {
}
const probability = random.random();
- if (probability < 0.5) {
+ if (probability < 0.4) {
const randFunc = common.randomFunction(path);
if (randFunc) {
thisMutator.annotate(
@@ -58,7 +58,7 @@ class FunctionCallMutator extends mutator.Mutator {
path.node.callee = randFunc;
}
- } else if (probability < 0.7 && thisMutator.settings.engine == 'V8') {
+ } else if (probability < 0.6 && thisMutator.settings.engine == 'V8') {
const prepareTemplate = babelTemplate(
'__V8BuiltinPrepareFunctionForOptimization(ID)');
const optimizeTemplate = babelTemplate(
@@ -86,6 +86,28 @@ class FunctionCallMutator extends mutator.Mutator {
thisMutator.insertBeforeSkip(
path, _liftExpressionsToStatements(path, nodes));
}
+ } else if (probability < 0.75 && thisMutator.settings.engine == 'V8') {
+ const template = babelTemplate(
+ '__V8BuiltinCompileBaseline(ID)');
+
+ const nodes = [
+ template({
+ ID: babelTypes.cloneDeep(path.node.callee),
+ }).expression,
+ ];
+
+ thisMutator.annotate(
+ nodes[0],
+ `Compiling baseline ${path.node.callee.name}`);
+
+ if (!babelTypes.isExpressionStatement(path.parent)) {
+ nodes.push(path.node);
+ thisMutator.replaceWithSkip(
+ path, babelTypes.sequenceExpression(nodes));
+ } else {
+ thisMutator.insertBeforeSkip(
+ path, _liftExpressionsToStatements(path, nodes));
+ }
} else if (probability < 0.85 &&
thisMutator.settings.engine == 'V8') {
const template = babelTemplate(
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js b/deps/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js
index 042f1cec57e..f87641326e4 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js
@@ -23,5 +23,7 @@ assertOptimized = () => {};
isNeverOptimize = () => {};
isAlwaysOptimize = () => {};
isInterpreted = () => {};
+isBaseline = () => {};
+isUnoptimized = () => {};
isOptimized = () => {};
isTurboFanned = () => {};
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
index 30793b34d9c..292c1c0c7e8 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
@@ -36,16 +36,14 @@ describe('Mutate functions', () => {
});
it('is robust without available functions', () => {
- // This chooses replacing fuctions.
- sandbox.stub(random, 'random').callsFake(() => { return 0.4; });
+ sandbox.stub(random, 'random').callsFake(() => { return 0.3; });
// We just ensure here that mutating this file doesn't throw.
loadAndMutate('mutate_function_call.js');
});
it('optimizes functions in V8', () => {
- // This omits function replacement and chooses V8 optimizations.
- sandbox.stub(random, 'random').callsFake(() => { return 0.6; });
+ sandbox.stub(random, 'random').callsFake(() => { return 0.5; });
const source = loadAndMutate('mutate_function_call.js');
const mutated = sourceHelpers.generateCode(source);
@@ -53,8 +51,16 @@ describe('Mutate functions', () => {
'mutate_function_call_expected.js', mutated);
});
+ it('compiles functions in V8 to baseline', () => {
+ sandbox.stub(random, 'random').callsFake(() => { return 0.7; });
+
+ const source = loadAndMutate('mutate_function_call.js');
+ const mutated = sourceHelpers.generateCode(source);
+ helpers.assertExpectedResult(
+ 'mutate_function_call_baseline_expected.js', mutated);
+ });
+
it('deoptimizes functions in V8', () => {
- // This chooses V8 deoptimization.
sandbox.stub(random, 'random').callsFake(() => { return 0.8; });
const source = loadAndMutate('mutate_function_call.js');
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_baseline_expected.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_baseline_expected.js
new file mode 100644
index 00000000000..1c02041cc5a
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_baseline_expected.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* FunctionCallMutator: Compiling baseline __f_0 */
+%CompileBaseline(__f_0);
+
+// Original: mutate_function_call.js
+__f_0(1);
+
+a = (
+/* FunctionCallMutator: Compiling baseline __f_0 */
+%CompileBaseline(__f_0), __f_0(1));
+foo(1, (
+/* FunctionCallMutator: Compiling baseline __f_0 */
+%CompileBaseline(__f_0), __f_0()));
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
index 4a8ed35cc26..b81f8dd952a 100644
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
@@ -89,6 +89,10 @@ try {
isInterpreted = function isInterpreted() {}
+ isBaseline = function isBaseline() {}
+
+ isUnoptimized = function isUnoptimized() {}
+
isOptimized = function isOptimized() {}
isTurboFanned = function isTurboFanned() {}
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
index 8d52aeb2fe3..851694b89ad 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
@@ -28,5 +28,6 @@
[0.25, "--no-lazy-feedback-allocation"],
[0.1, "--no-lazy-feedback-allocation --interrupt-budget=100"],
[0.05, "--budget-for-feedback-vector-allocation=0"],
+ [0.1, "--no-wasm-generic-wrapper"],
[0.0001, "--simulate-errors"]
-] \ No newline at end of file
+]
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index 54cd3b7a4cc..2256df1f553 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -96,6 +96,7 @@ v8_component("v8_debug_helper") {
"../..:generate_bytecode_builtins_list",
"../..:run_torque",
"../..:v8_headers",
+ "../..:v8_internal_headers",
"../..:v8_libbase",
"../..:v8_shared_internal_headers",
"../..:v8_tracing",
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc
index 29af7ebdd76..51c8da6f278 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.cc
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc
@@ -14,7 +14,7 @@ namespace debug_helper_internal {
bool IsPointerCompressed(uintptr_t address) {
#if COMPRESS_POINTERS_BOOL
- return address < i::kPtrComprHeapReservationSize;
+ return address < i::kPtrComprCageReservationSize;
#else
return false;
#endif
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index a6ebcc0761a..7199bc51d22 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -14,6 +14,7 @@
#include "src/objects/string-inl.h"
#include "src/strings/unicode-inl.h"
#include "torque-generated/class-debug-readers.h"
+#include "torque-generated/debug-macros.h"
namespace i = v8::internal;
@@ -133,6 +134,23 @@ TypedObject GetTypedObjectByInstanceType(uintptr_t address,
}
}
+bool IsTypedHeapObjectInstanceTypeOf(uintptr_t address,
+ d::MemoryAccessor accessor,
+ i::InstanceType instance_type) {
+ auto heap_object = std::make_unique<TqHeapObject>(address);
+ Value<uintptr_t> map_ptr = heap_object->GetMapValue(accessor);
+
+ if (map_ptr.validity == d::MemoryAccessResult::kOk) {
+ Value<i::InstanceType> type =
+ TqMap(map_ptr.value).GetInstanceTypeValue(accessor);
+ if (type.validity == d::MemoryAccessResult::kOk) {
+ return instance_type == type.value;
+ }
+ }
+
+ return false;
+}
+
TypedObject GetTypedHeapObject(uintptr_t address, d::MemoryAccessor accessor,
const char* type_hint,
const d::HeapAddresses& heap_addresses) {
@@ -330,7 +348,7 @@ class ReadStringVisitor : public TqObjectVisitor {
GetOrFinish(object->GetResourceDataValue(accessor_));
#ifdef V8_COMPRESS_POINTERS
uintptr_t data_address = static_cast<uintptr_t>(
- DecodeExternalPointer(GetIsolateForPtrComprFromOnHeapAddress(
+ DecodeExternalPointer(GetPtrComprCageBaseFromOnHeapAddress(
heap_addresses_.any_heap_pointer),
resource_data, kExternalStringResourceDataTag));
#else
@@ -654,6 +672,90 @@ std::unique_ptr<StackFrameResult> GetStackFrame(
sizeof(v8::internal::JSFunction),
std::vector<std::unique_ptr<StructProperty>>(),
d::PropertyKind::kSingle));
+ // Add more items in the Locals pane representing the JS function name,
+ // source file name, and line & column numbers within the source file, so
+ // that the user doesn’t need to dig through the shared_function_info to
+ // find them.
+ intptr_t js_function_ptr = 0;
+ validity = memory_accessor(
+ frame_pointer + StandardFrameConstants::kFunctionOffset,
+ reinterpret_cast<void*>(&js_function_ptr), sizeof(intptr_t));
+ if (validity == d::MemoryAccessResult::kOk) {
+ TqJSFunction js_function(js_function_ptr);
+ auto shared_function_info_ptr =
+ js_function.GetSharedFunctionInfoValue(memory_accessor);
+ if (shared_function_info_ptr.validity == d::MemoryAccessResult::kOk) {
+ TqSharedFunctionInfo shared_function_info(
+ shared_function_info_ptr.value);
+ auto script_or_debug_info_ptr =
+ shared_function_info.GetScriptOrDebugInfoValue(memory_accessor);
+ if (script_or_debug_info_ptr.validity == d::MemoryAccessResult::kOk) {
+ // Make sure script_or_debug_info_ptr is script.
+ auto address = script_or_debug_info_ptr.value;
+ if (IsTypedHeapObjectInstanceTypeOf(address, memory_accessor,
+ i::InstanceType::SCRIPT_TYPE)) {
+ TqScript script(script_or_debug_info_ptr.value);
+ props.push_back(std::make_unique<ObjectProperty>(
+ "script_name", kObjectAsStoredInHeap, kObject,
+ script.GetNameAddress(), 1, i::kTaggedSize,
+ std::vector<std::unique_ptr<StructProperty>>(),
+ d::PropertyKind::kSingle));
+ props.push_back(std::make_unique<ObjectProperty>(
+ "script_source", kObjectAsStoredInHeap, kObject,
+ script.GetSourceAddress(), 1, i::kTaggedSize,
+ std::vector<std::unique_ptr<StructProperty>>(),
+ d::PropertyKind::kSingle));
+ }
+ }
+ auto name_or_scope_info_ptr =
+ shared_function_info.GetNameOrScopeInfoValue(memory_accessor);
+ if (name_or_scope_info_ptr.validity == d::MemoryAccessResult::kOk) {
+ auto scope_info_address = name_or_scope_info_ptr.value;
+ // Make sure name_or_scope_info_ptr is scope info.
+ if (IsTypedHeapObjectInstanceTypeOf(
+ scope_info_address, memory_accessor,
+ i::InstanceType::SCOPE_INFO_TYPE)) {
+ auto indexed_field_slice_function_variable_info =
+ TqDebugFieldSliceScopeInfoFunctionVariableInfo(
+ memory_accessor, scope_info_address);
+ if (indexed_field_slice_function_variable_info.validity ==
+ d::MemoryAccessResult::kOk) {
+ props.push_back(std::make_unique<ObjectProperty>(
+ "function_name", kObjectAsStoredInHeap, kObject,
+ scope_info_address - i::kHeapObjectTag +
+ std::get<1>(
+ indexed_field_slice_function_variable_info.value),
+ std::get<2>(
+ indexed_field_slice_function_variable_info.value),
+ i::kTaggedSize,
+ std::vector<std::unique_ptr<StructProperty>>(),
+ d::PropertyKind::kSingle));
+ }
+ std::vector<std::unique_ptr<StructProperty>>
+ position_info_struct_field_list;
+ position_info_struct_field_list.push_back(
+ std::make_unique<StructProperty>(
+ "start", kObjectAsStoredInHeap, kObject, 0, 0, 0));
+ position_info_struct_field_list.push_back(
+ std::make_unique<StructProperty>("end", kObjectAsStoredInHeap,
+ kObject, 4, 0, 0));
+ auto indexed_field_slice_position_info =
+ TqDebugFieldSliceScopeInfoPositionInfo(memory_accessor,
+ scope_info_address);
+ if (indexed_field_slice_position_info.validity ==
+ d::MemoryAccessResult::kOk) {
+ props.push_back(std::make_unique<ObjectProperty>(
+ "function_character_offset", "", "",
+ scope_info_address - i::kHeapObjectTag +
+ std::get<1>(indexed_field_slice_position_info.value),
+ std::get<2>(indexed_field_slice_position_info.value),
+ i::kTaggedSize, std::move(position_info_struct_field_list),
+ d::PropertyKind::kSingle));
+ }
+ }
+ }
+ }
+ }
}
}
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index c5ae178c18b..4e318f2f32e 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -98,15 +98,26 @@ TESTSUITES_TARGETS = {"benchmarks": "d8",
OUTDIR = "out"
+def _Which(cmd):
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(os.path.join(path, cmd)):
+ return os.path.join(path, cmd)
+ return None
+
def DetectGoma():
- home_goma = os.path.expanduser("~/goma")
- if os.path.exists(home_goma):
- return home_goma
if os.environ.get("GOMA_DIR"):
return os.environ.get("GOMA_DIR")
if os.environ.get("GOMADIR"):
return os.environ.get("GOMADIR")
- return None
+ # There is a copy of goma in depot_tools, but it might not be in use on
+ # this machine.
+ goma = _Which("goma_ctl")
+ if goma is None: return None
+ cipd_bin = os.path.join(os.path.dirname(goma), ".cipd_bin")
+ if not os.path.exists(cipd_bin): return None
+ goma_auth = os.path.expanduser("~/.goma_client_oauth2_config")
+ if not os.path.exists(goma_auth): return None
+ return cipd_bin
GOMADIR = DetectGoma()
IS_GOMA_MACHINE = GOMADIR is not None
@@ -118,12 +129,11 @@ is_component_build = false
is_debug = false
%s
use_goma = {GOMA}
-goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
v8_enable_verify_heap = true
-""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+""".replace("{GOMA}", USE_GOMA)
DEBUG_ARGS_TEMPLATE = """\
is_component_build = true
@@ -131,12 +141,11 @@ is_debug = true
symbol_level = 2
%s
use_goma = {GOMA}
-goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_slow_dchecks = true
v8_optimized_debug = false
-""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+""".replace("{GOMA}", USE_GOMA)
OPTDEBUG_ARGS_TEMPLATE = """\
is_component_build = true
@@ -144,12 +153,11 @@ is_debug = true
symbol_level = 1
%s
use_goma = {GOMA}
-goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_verify_heap = true
v8_optimized_debug = true
-""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+""".replace("{GOMA}", USE_GOMA)
ARGS_TEMPLATES = {
"release": RELEASE_ARGS_TEMPLATE,
@@ -162,6 +170,17 @@ def PrintHelpAndExit():
print(HELP)
sys.exit(0)
+def PrintCompletionsAndExit():
+ for a in ARCHES:
+ print("%s" % a)
+ for m in MODES:
+ print("%s" % m)
+ print("%s.%s" % (a, m))
+ for t in TARGETS:
+ print("%s" % t)
+ print("%s.%s.%s" % (a, m, t))
+ sys.exit(0)
+
def _Call(cmd, silent=False):
if not silent: print("# %s" % cmd)
return subprocess.call(cmd, shell=True)
@@ -195,12 +214,6 @@ def _CallWithOutput(cmd):
p.wait()
return p.returncode, "".join(output)
-def _Which(cmd):
- for path in os.environ["PATH"].split(os.pathsep):
- if os.path.exists(os.path.join(path, cmd)):
- return os.path.join(path, cmd)
- return None
-
def _Write(filename, content):
print("# echo > %s << EOF\n%sEOF" % (filename, content))
with open(filename, "w") as f:
@@ -212,6 +225,11 @@ def _Notify(summary, body):
else:
print("{} - {}".format(summary, body))
+def _GetMachine():
+ # Once we migrate to Python3, this can use os.uname().machine.
+ # The index-based access is compatible with all Python versions.
+ return os.uname()[4]
+
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
return os.path.join(OUTDIR, subdir)
@@ -239,33 +257,51 @@ class Config(object):
self.tests.update(tests)
def GetTargetCpu(self):
- if self.arch == "android_arm": return "target_cpu = \"arm\""
- if self.arch == "android_arm64": return "target_cpu = \"arm64\""
cpu = "x86"
- if "64" in self.arch or self.arch == "s390x":
+ if self.arch == "android_arm":
+ cpu = "arm"
+ elif self.arch == "android_arm64":
+ cpu = "arm64"
+ elif self.arch == "arm64" and _GetMachine() == "aarch64":
+ # arm64 build host:
+ cpu = "arm64"
+ elif self.arch == "arm" and _GetMachine() == "aarch64":
+ cpu = "arm"
+ elif "64" in self.arch or self.arch == "s390x":
+ # Native x64 or simulator build.
cpu = "x64"
- return "target_cpu = \"%s\"" % cpu
+ return ["target_cpu = \"%s\"" % cpu]
def GetV8TargetCpu(self):
- if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
- if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
- if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x"):
- return "\nv8_target_cpu = \"%s\"" % self.arch
- return ""
+ if self.arch == "android_arm":
+ v8_cpu = "arm"
+ elif self.arch == "android_arm64":
+ v8_cpu = "arm64"
+ elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
+ "riscv64", "s390", "s390x"):
+ v8_cpu = self.arch
+ else:
+ return []
+ return ["v8_target_cpu = \"%s\"" % v8_cpu]
def GetTargetOS(self):
if self.arch in ("android_arm", "android_arm64"):
- return "\ntarget_os = \"android\""
- return ""
+ return ["target_os = \"android\""]
+ return []
+
+ def GetSpecialCompiler(self):
+ if _GetMachine() == "aarch64":
+ # We have no prebuilt Clang for arm64. Use the system Clang instead.
+ return ["clang_base_path = \"/usr\"", "clang_use_chrome_plugins = false"]
+ return []
def GetGnArgs(self):
# Use only substring before first '-' as the actual mode
mode = re.match("([^-]+)", self.mode).group(1)
template = ARGS_TEMPLATES[mode]
arch_specific = (self.GetTargetCpu() + self.GetV8TargetCpu() +
- self.GetTargetOS())
- return template % arch_specific
+ self.GetTargetOS() + self.GetSpecialCompiler())
+ return template % "\n".join(arch_specific)
def Build(self):
path = GetPath(self.arch, self.mode)
@@ -352,6 +388,8 @@ class ArgumentParser(object):
def ParseArg(self, argstring):
if argstring in ("-h", "--help", "help"):
PrintHelpAndExit()
+ if argstring == "--print-completions":
+ PrintCompletionsAndExit()
arches = []
modes = []
targets = []
@@ -426,7 +464,7 @@ def Main(argv):
configs = parser.ParseArguments(argv[1:])
return_code = 0
# If we have Goma but it is not running, start it.
- if (GOMADIR is not None and
+ if (IS_GOMA_MACHINE and
_Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % GOMADIR)
for c in configs:
diff --git a/deps/v8/tools/dumpcpp.mjs b/deps/v8/tools/dumpcpp.mjs
index 9deab5d2aae..316c48f65bf 100644
--- a/deps/v8/tools/dumpcpp.mjs
+++ b/deps/v8/tools/dumpcpp.mjs
@@ -27,7 +27,7 @@ export class CppProcessor extends LogReader {
* @override
*/
printError(str) {
- print(str);
+ console.log(str);
}
processLogFile(fileName) {
@@ -61,7 +61,7 @@ export class CppProcessor extends LogReader {
const entry = staticEntries[i];
const printValues = ['cpp', `0x${entry[0].toString(16)}`, entry[1].size,
`"${entry[1].name}"`];
- print(printValues.join(','));
+ console.log(printValues.join(','));
}
}
}
diff --git a/deps/v8/tools/find-builtin b/deps/v8/tools/find-builtin
new file mode 100755
index 00000000000..29cb0f4c4b7
--- /dev/null
+++ b/deps/v8/tools/find-builtin
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -euo pipefail
+
+BUILTIN_NAME="$1"
+
+if ! which rg >/dev/null ; then
+ echo >&2 "This tool requires 'rg', install it with 'sudo apt install ripgrep'"
+ exit 1
+fi
+
+TOOLS_DIRNAME="$(dirname "$0")"
+V8_DIRNAME="$(dirname "$TOOLS_DIRNAME")"
+
+if rg --type-add 'tq:*.tq' --type tq --with-filename --line-number "\bbuiltin $BUILTIN_NAME\b" "$V8_DIRNAME" | rg -v '\bextern builtin\b' | cut -f1-2 -d: ; then
+ exit 0
+fi
+
+if rg --type cpp --with-filename --line-number "\b(TF_BUILTIN\(|::Generate_?)$BUILTIN_NAME\b" "$V8_DIRNAME" | cut -f1-2 -d: ; then
+ exit 0
+fi
+
+echo >&2 "Builtin '$BUILTIN_NAME' not found"
+exit 1 \ No newline at end of file
diff --git a/deps/v8/tools/gcmole/gcmole.py b/deps/v8/tools/gcmole/gcmole.py
index 978cd315981..3df0788adef 100644
--- a/deps/v8/tools/gcmole/gcmole.py
+++ b/deps/v8/tools/gcmole/gcmole.py
@@ -99,6 +99,7 @@ def MakeClangCommandLine(plugin, plugin_args, arch_cfg, clang_bin_dir,
arch_cfg.arch_define,
"-DENABLE_DEBUGGER_SUPPORT",
"-DV8_INTL_SUPPORT",
+ "-DV8_ENABLE_WEBASSEMBLY",
"-I./",
"-Iinclude/",
"-Iout/build/gen",
@@ -306,7 +307,7 @@ ALLOWLIST = [
]
GC_PATTERN = ",.*Collect.*Garbage"
-SAFEPOINT_PATTERN = ",EnterSafepoint"
+SAFEPOINT_PATTERN = ",SafepointSlowPath"
ALLOWLIST_PATTERN = "|".join("(?:%s)" % p for p in ALLOWLIST)
diff --git a/deps/v8/tools/ic-processor-driver.mjs b/deps/v8/tools/ic-processor-driver.mjs
index c8042736b03..2087ef80564 100644
--- a/deps/v8/tools/ic-processor-driver.mjs
+++ b/deps/v8/tools/ic-processor-driver.mjs
@@ -22,7 +22,7 @@ export function readFile(fileName) {
try {
return read(fileName);
} catch (e) {
- print(fileName + ': ' + (e.message || e));
+ console.log(fileName + ': ' + (e.message || e));
throw e;
}
}
@@ -78,13 +78,13 @@ const accumulator = {
StoreInArrayLiteralIC: 0,
}
for (const ic of processor.icTimeline.all) {
- print(Object.values(ic));
+ console.log(Object.values(ic));
accumulator[ic.type]++;
}
-print("========================================");
+console.log("========================================");
for (const key of Object.keys(accumulator)) {
- print(key + ": " + accumulator[key]);
+ console.log(key + ": " + accumulator[key]);
}
diff --git a/deps/v8/tools/index.html b/deps/v8/tools/index.html
index 93155dfbdfd..53b22f170d5 100644
--- a/deps/v8/tools/index.html
+++ b/deps/v8/tools/index.html
@@ -30,6 +30,7 @@ a:link, a:visited {
text-align: center;
text-decoration: none;
display: inline-block;
+ border-radius: 2px;
}
a:hover, a:active {
background-color: white;
@@ -42,12 +43,16 @@ a:hover, a:active {
background-color: #000000;
grid-gap: 15px;
}
+.grid-2{
+ grid-template-columns: auto auto;
+}
.card {
text-align: center;
padding: 10px 50px 10px 50px ;
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
background-color: #121212;
width: auto;
+ border-radius: 10px;
}
.card:hover {
box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2);
@@ -95,13 +100,15 @@ dd, dt {
<dt><a href="./zone-stats/index.html">Zone Stats</a></dt>
<dd>Analyse zone memory usage.</dd>
</div>
+ </dl>
+ <dl class="grid-container grid-2">
<div class="card">
- <dt><a href="https://v8.dev/tools">Other V8 Versions</a></dt>
- <dd>Check out the V8 website for available tool versions.</dd>
+ <dt><a href="https://v8.dev/tools/versions">Other V8 Versions</a></dt>
+ <dd>Archived versions of V8 tools</dd>
</div>
<div class="card">
<dt><a href="https://v8.dev">V8.Dev</a></dt>
- <dd>Check out the V8 website for more information.</dd>
+ <dd>The main V8 website.</dd>
</div>
</dl>
</div>
diff --git a/deps/v8/tools/ninja/ninja_output.py b/deps/v8/tools/ninja/ninja_output.py
deleted file mode 100644
index ec4d27e097f..00000000000
--- a/deps/v8/tools/ninja/ninja_output.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import os
-import os.path
-
-
-def GetNinjaOutputDirectory(v8_root, configuration=None):
- """Returns <v8_root>/<output_dir>/(Release|Debug).
-
- The configuration chosen is the one most recently generated/built, but can be
- overriden via the <configuration> parameter. Detects a custom output_dir
- specified by GYP_GENERATOR_FLAGS."""
-
- output_dir = 'out'
- generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
- for flag in generator_flags:
- name_value = flag.split('=', 1)
- if len(name_value) == 2 and name_value[0] == 'output_dir':
- output_dir = name_value[1]
-
- root = os.path.join(v8_root, output_dir)
- if configuration:
- return os.path.join(root, configuration)
-
- debug_path = os.path.join(root, 'Debug')
- release_path = os.path.join(root, 'Release')
-
- def is_release_newer(test_path):
- try:
- debug_mtime = os.path.getmtime(os.path.join(debug_path, test_path))
- except os.error:
- debug_mtime = 0
- try:
- rel_mtime = os.path.getmtime(os.path.join(release_path, test_path))
- except os.error:
- rel_mtime = 0
- return rel_mtime >= debug_mtime
-
- if is_release_newer('.ninja_log') or is_release_newer('.ninja_deps'):
- return release_path
- return debug_path
diff --git a/deps/v8/tools/profview/profile-utils.js b/deps/v8/tools/profview/profile-utils.js
index 9007855ea9b..35fe3d7cb25 100644
--- a/deps/v8/tools/profview/profile-utils.js
+++ b/deps/v8/tools/profview/profile-utils.js
@@ -21,7 +21,8 @@ let codeKinds = [
"JSOPT",
"JSUNOPT",
"JSNCI",
- "JSTURBOPROP"
+ "JSTURBOPROP",
+ "JSBASELINE",
];
function resolveCodeKind(code) {
@@ -59,6 +60,8 @@ function resolveCodeKind(code) {
return "JSUNOPT";
} else if (code.kind === "NCI") {
return "JSNCI";
+ } else if (code.kind === "Baseline") {
+ return "JSBASELINE";
} else if (code.kind === "Turboprop") {
return "JSTURBOPROP";
}
@@ -272,6 +275,7 @@ function buildCategoryTreeAndLookup() {
addCategory("JS Optimized", [ "JSOPT" ]);
addCategory("JS NCI", [ "JSNCI" ]);
addCategory("JS Turboprop", [ "JSTURBOPROP" ]);
+ addCategory("JS Baseline", [ "JSBASELINE" ]);
addCategory("JS Unoptimized", [ "JSUNOPT", "BC" ]);
addCategory("IC", [ "IC" ]);
addCategory("RegExp", [ "REGEXP" ]);
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index 248146f99f8..15a74f72405 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -224,6 +224,10 @@ const bucketDescriptors =
color : "#693eb8",
backgroundColor : "#a6c452",
text : "JS Turboprop" },
+ { kinds : [ "JSBASELINE" ],
+ color : "#b3005b",
+ backgroundColor : "#ff9e80",
+ text : "JS Baseline" },
{ kinds : [ "JSUNOPT", "BC" ],
color : "#dd2c00",
backgroundColor : "#ff9e80",
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 27ba3e42168..ffba545c8f2 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -155,13 +155,14 @@ class UploadCL(Step):
message.append(ISSUE_MSG)
- message.append("TBR=%s" % self._options.reviewer)
+ message.append("R=%s" % self._options.reviewer)
self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
if not self._options.dry_run:
self.GitUpload(force=True,
bypass_hooks=True,
cq=self._options.use_commit_queue,
cq_dry_run=self._options.use_dry_run,
+ set_bot_commit=True,
cwd=cwd)
print("CL uploaded.")
else:
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index 716d1461364..a90266aa714 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -206,8 +206,9 @@ class GitRecipesMixin(object):
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", force=False, cq=False,
- cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
- no_autocc=False, message_file=None, **kwargs):
+ cq_dry_run=False, set_bot_commit=False, bypass_hooks=False,
+ cc="", tbr_reviewer="", no_autocc=False, message_file=None,
+ **kwargs):
args = ["cl upload --send-mail"]
if reviewer:
args += ["-r", Quoted(reviewer)]
@@ -219,6 +220,8 @@ class GitRecipesMixin(object):
args.append("--use-commit-queue")
if cq_dry_run:
args.append("--cq-dry-run")
+ if set_bot_commit:
+ args.append("--set-bot-commit")
if bypass_hooks:
args.append("--bypass-hooks")
if no_autocc:
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index cf86efb3cad..bfac9a4a34a 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -540,7 +540,7 @@ CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel
-TBR=reviewer@chromium.org"""
+R=reviewer@chromium.org"""
# Snippet from the original DEPS file.
FAKE_DEPS = """
@@ -624,7 +624,7 @@ deps = {
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail -f "
- "--cq-dry-run --bypass-hooks", "",
+ "--cq-dry-run --set-bot-commit --bypass-hooks", "",
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
diff --git a/deps/v8/tools/system-analyzer/index.css b/deps/v8/tools/system-analyzer/index.css
index 27b07531bf6..ad3f24d27ba 100644
--- a/deps/v8/tools/system-analyzer/index.css
+++ b/deps/v8/tools/system-analyzer/index.css
@@ -132,7 +132,7 @@ dd {
font-weight: 400;
}
-.panel > select{
+.panel > select {
width: calc(100% + 20px);
margin: 0 -10px 10px -10px;
}
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 295633258b7..fdaf0370a17 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -358,9 +358,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -650,9 +647,6 @@ class BaseTestRunner(object):
'--no-enable-sse4_1'])
# Set no_simd_sse on architectures without Simd enabled.
- if self.build_config.arch == 'ppc64':
- no_simd_sse = True
-
if self.build_config.arch == 'mips64el' or \
self.build_config.arch == 'mipsel':
no_simd_sse = not simd_mips
@@ -786,9 +780,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422a..00000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 595f7e27f45..0af6d3ab0e6 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -13,8 +13,10 @@ ALL_VARIANT_FLAGS = {
"infra_staging": [[]],
"interpreted_regexp": [["--regexp-interpret-all"]],
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
+ "concurrent_inlining": [["--concurrent-inlining"]],
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
+ "always_sparkplug": [[ "--always-sparkplug" ]],
"minor_mc": [["--minor-mc"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
@@ -23,14 +25,13 @@ ALL_VARIANT_FLAGS = {
# For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
# "TurboFan-only" in the stress variant. The WebAssembly configuration is
# independent of JS optimizations, so we can combine those configs.
- "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up",
- "--wasm-generic-wrapper"]],
+ "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
"slow_path": [["--force-slow-path"]],
- "stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions"]],
+ "stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions",
+ "--no-wasm-generic-wrapper"]],
"stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
"stress_concurrent_inlining": [["--stress-concurrent-inlining"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
- "--finalize-streaming-on-background",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
"stress_snapshot": [["--stress-snapshot"]],
@@ -50,23 +51,34 @@ ALL_VARIANT_FLAGS = {
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_VARIANT = {
"assert_types": ["--no-assert-types"],
- "jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types", "--validate-asm", "--sparkplug", "--always-sparkplug"],
+ "jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types",
+ "--validate-asm", "--sparkplug", "--always-sparkplug"],
"no_wasm_traps": ["--wasm-trap-handler"],
- "nooptimization": ["--opt", "--always-opt", "--no-liftoff", "--wasm-tier-up"],
+ "nooptimization": ["--opt", "--always-opt", "--no-liftoff",
+ "--wasm-tier-up"],
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "stress_concurrent_inlining": ["--single-threaded", "--predictable", "--no-turbo-direct-heap-access"],
+ "stress_concurrent_inlining": ["--single-threaded", "--predictable",
+ "--no-turbo-direct-heap-access"],
"stress_incremental_marking": ["--no-stress-incremental-marking"],
- "future": ["--parallel-compile-tasks", "--no-turbo-direct-heap-access"],
- "stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile", "--parallel-compile-tasks"],
- "stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff", "--max-inlined-bytecode-size=*",
- "--max-inlined-bytecode-size-cumulative=*", "--stress-inline"],
- "sparkplug": ["--jitless"],
- "turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access", "--no-turboprop"],
- "turboprop_as_toptier": ["--interrupt-budget=*", "--no-turbo-direct-heap-access", "--no-turboprop", "--no-turboprop-as-toptier"],
- "code_serializer": ["--cache=after-execute", "--cache=full-code-cache", "--cache=none"],
+ "future": ["--no-turbo-direct-heap-access"],
+ "stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
+ "stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff",
+ "--max-inlined-bytecode-size=*",
+ "--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
+ "--wasm-generic-wrapper"],
+ "sparkplug": ["--jitless", "--no-sparkplug" ],
+ "always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"],
+ "turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access",
+ "--no-turboprop"],
+ "turboprop_as_toptier": ["--interrupt-budget=*",
+ "--no-turbo-direct-heap-access", "--no-turboprop",
+ "--no-turboprop-as-toptier"],
+ "code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
+ "--cache=none"],
"no_local_heaps": ["--concurrent-inlining", "--turboprop"],
- "experimental_regexp": ["--no-enable-experimental-regexp-engine", "--no-default-to-experimental-regexp-engine"],
+ "experimental_regexp": ["--no-enable-experimental-regexp-engine",
+ "--no-default-to-experimental-regexp-engine"],
}
# Flags that lead to a contradiction under certain build variables.
@@ -93,12 +105,13 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--enable-armv8": ["--no-enable-armv8"],
"--gc-interval=*": ["--gc-interval=*"],
"--no-enable-sse3": ["--enable-sse3"],
+ "--no-enable-ssse3": ["--enable-ssse3"],
"--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"--stress_concurrent_inlining": ["--single-threaded", "--predictable"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
- "--future": ["--parallel-compile-tasks", "--no-turbo-direct-heap-access"],
+ "--future": ["--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
}
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 634ef7c2f2e..9ff943a5c2b 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test):
@@ -349,45 +348,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tickprocessor.mjs b/deps/v8/tools/tickprocessor.mjs
index 3041a0cddda..1ad67e22c0d 100644
--- a/deps/v8/tools/tickprocessor.mjs
+++ b/deps/v8/tools/tickprocessor.mjs
@@ -293,7 +293,7 @@ export class TickProcessor extends LogReader {
}
processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) {
- if (maybe_func.length) {
+ if (type != 'RegExp' && maybe_func.length) {
const funcAddr = parseInt(maybe_func[0]);
const state = Profile.parseState(maybe_func[1]);
this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index dba01c88390..f0cc6481a88 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -95,48 +95,48 @@ INSTANCE_TYPES = {
131: "BYTECODE_ARRAY_TYPE",
132: "FIXED_DOUBLE_ARRAY_TYPE",
133: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 134: "SCOPE_INFO_TYPE",
- 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 136: "AWAIT_CONTEXT_TYPE",
- 137: "BLOCK_CONTEXT_TYPE",
- 138: "CATCH_CONTEXT_TYPE",
- 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 140: "EVAL_CONTEXT_TYPE",
- 141: "FUNCTION_CONTEXT_TYPE",
- 142: "MODULE_CONTEXT_TYPE",
- 143: "NATIVE_CONTEXT_TYPE",
- 144: "SCRIPT_CONTEXT_TYPE",
- 145: "WITH_CONTEXT_TYPE",
- 146: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 147: "EXPORTED_SUB_CLASS_TYPE",
- 148: "EXPORTED_SUB_CLASS2_TYPE",
- 149: "SMALL_ORDERED_HASH_MAP_TYPE",
- 150: "SMALL_ORDERED_HASH_SET_TYPE",
- 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 152: "DESCRIPTOR_ARRAY_TYPE",
- 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 154: "SOURCE_TEXT_MODULE_TYPE",
- 155: "SYNTHETIC_MODULE_TYPE",
- 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 158: "WEAK_FIXED_ARRAY_TYPE",
- 159: "TRANSITION_ARRAY_TYPE",
- 160: "CELL_TYPE",
- 161: "CODE_TYPE",
- 162: "CODE_DATA_CONTAINER_TYPE",
- 163: "COVERAGE_INFO_TYPE",
- 164: "EMBEDDER_DATA_ARRAY_TYPE",
- 165: "FEEDBACK_METADATA_TYPE",
- 166: "FEEDBACK_VECTOR_TYPE",
- 167: "FILLER_TYPE",
- 168: "FREE_SPACE_TYPE",
- 169: "INTERNAL_CLASS_TYPE",
- 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 171: "MAP_TYPE",
- 172: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 173: "PREPARSE_DATA_TYPE",
- 174: "PROPERTY_ARRAY_TYPE",
- 175: "PROPERTY_CELL_TYPE",
+ 134: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 135: "AWAIT_CONTEXT_TYPE",
+ 136: "BLOCK_CONTEXT_TYPE",
+ 137: "CATCH_CONTEXT_TYPE",
+ 138: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 139: "EVAL_CONTEXT_TYPE",
+ 140: "FUNCTION_CONTEXT_TYPE",
+ 141: "MODULE_CONTEXT_TYPE",
+ 142: "NATIVE_CONTEXT_TYPE",
+ 143: "SCRIPT_CONTEXT_TYPE",
+ 144: "WITH_CONTEXT_TYPE",
+ 145: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 146: "EXPORTED_SUB_CLASS_TYPE",
+ 147: "EXPORTED_SUB_CLASS2_TYPE",
+ 148: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 149: "SMALL_ORDERED_HASH_SET_TYPE",
+ 150: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 151: "DESCRIPTOR_ARRAY_TYPE",
+ 152: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 153: "SOURCE_TEXT_MODULE_TYPE",
+ 154: "SYNTHETIC_MODULE_TYPE",
+ 155: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 156: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 157: "WEAK_FIXED_ARRAY_TYPE",
+ 158: "TRANSITION_ARRAY_TYPE",
+ 159: "CELL_TYPE",
+ 160: "CODE_TYPE",
+ 161: "CODE_DATA_CONTAINER_TYPE",
+ 162: "COVERAGE_INFO_TYPE",
+ 163: "EMBEDDER_DATA_ARRAY_TYPE",
+ 164: "FEEDBACK_METADATA_TYPE",
+ 165: "FEEDBACK_VECTOR_TYPE",
+ 166: "FILLER_TYPE",
+ 167: "FREE_SPACE_TYPE",
+ 168: "INTERNAL_CLASS_TYPE",
+ 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 170: "MAP_TYPE",
+ 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 172: "PREPARSE_DATA_TYPE",
+ 173: "PROPERTY_ARRAY_TYPE",
+ 174: "PROPERTY_CELL_TYPE",
+ 175: "SCOPE_INFO_TYPE",
176: "SHARED_FUNCTION_INFO_TYPE",
177: "SMI_BOX_TYPE",
178: "SMI_PAIR_TYPE",
@@ -233,16 +233,16 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02119): (171, "MetaMap"),
+ ("read_only_space", 0x02119): (170, "MetaMap"),
("read_only_space", 0x02141): (67, "NullMap"),
- ("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
+ ("read_only_space", 0x02169): (152, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x02191): (157, "WeakFixedArrayMap"),
("read_only_space", 0x021d1): (97, "EnumCacheMap"),
("read_only_space", 0x02205): (117, "FixedArrayMap"),
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
- ("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
- ("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
+ ("read_only_space", 0x0229d): (167, "FreeSpaceMap"),
+ ("read_only_space", 0x022c5): (166, "OnePointerFillerMap"),
+ ("read_only_space", 0x022ed): (166, "TwoPointerFillerMap"),
("read_only_space", 0x02315): (67, "UninitializedMap"),
("read_only_space", 0x0238d): (67, "UndefinedMap"),
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
@@ -253,139 +253,139 @@ KNOWN_MAPS = {
("read_only_space", 0x02559): (118, "HashTableMap"),
("read_only_space", 0x02581): (64, "SymbolMap"),
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
- ("read_only_space", 0x025d1): (134, "ScopeInfoMap"),
+ ("read_only_space", 0x025d1): (175, "ScopeInfoMap"),
("read_only_space", 0x025f9): (176, "SharedFunctionInfoMap"),
- ("read_only_space", 0x02621): (161, "CodeMap"),
- ("read_only_space", 0x02649): (160, "CellMap"),
- ("read_only_space", 0x02671): (175, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x02621): (160, "CodeMap"),
+ ("read_only_space", 0x02649): (159, "CellMap"),
+ ("read_only_space", 0x02671): (174, "GlobalPropertyCellMap"),
("read_only_space", 0x02699): (70, "ForeignMap"),
- ("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
+ ("read_only_space", 0x026c1): (158, "TransitionArrayMap"),
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
- ("read_only_space", 0x0274d): (67, "ArgumentsMarkerMap"),
- ("read_only_space", 0x027ad): (67, "ExceptionMap"),
- ("read_only_space", 0x02809): (67, "TerminationExceptionMap"),
- ("read_only_space", 0x02871): (67, "OptimizedOutMap"),
- ("read_only_space", 0x028d1): (67, "StaleRegisterMap"),
- ("read_only_space", 0x02931): (129, "ScriptContextTableMap"),
- ("read_only_space", 0x02959): (127, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x02981): (165, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a9): (117, "ArrayListMap"),
- ("read_only_space", 0x029d1): (65, "BigIntMap"),
- ("read_only_space", 0x029f9): (128, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a21): (131, "BytecodeArrayMap"),
- ("read_only_space", 0x02a49): (162, "CodeDataContainerMap"),
- ("read_only_space", 0x02a71): (163, "CoverageInfoMap"),
- ("read_only_space", 0x02a99): (132, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02ac1): (120, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae9): (98, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b11): (117, "ModuleInfoMap"),
- ("read_only_space", 0x02b39): (121, "NameDictionaryMap"),
- ("read_only_space", 0x02b61): (98, "NoClosuresCellMap"),
- ("read_only_space", 0x02b89): (122, "NumberDictionaryMap"),
- ("read_only_space", 0x02bb1): (98, "OneClosureCellMap"),
- ("read_only_space", 0x02bd9): (123, "OrderedHashMapMap"),
- ("read_only_space", 0x02c01): (124, "OrderedHashSetMap"),
- ("read_only_space", 0x02c29): (125, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c51): (173, "PreparseDataMap"),
- ("read_only_space", 0x02c79): (174, "PropertyArrayMap"),
- ("read_only_space", 0x02ca1): (94, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02cc9): (94, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02cf1): (94, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d19): (126, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d41): (149, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d69): (150, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02d91): (151, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02db9): (154, "SourceTextModuleMap"),
- ("read_only_space", 0x02de1): (180, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02e09): (155, "SyntheticModuleMap"),
- ("read_only_space", 0x02e31): (71, "WasmTypeInfoMap"),
- ("read_only_space", 0x02e59): (184, "WeakArrayListMap"),
- ("read_only_space", 0x02e81): (119, "EphemeronHashTableMap"),
- ("read_only_space", 0x02ea9): (164, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02ed1): (185, "WeakCellMap"),
- ("read_only_space", 0x02ef9): (32, "StringMap"),
- ("read_only_space", 0x02f21): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x02f49): (33, "ConsStringMap"),
- ("read_only_space", 0x02f71): (37, "ThinStringMap"),
- ("read_only_space", 0x02f99): (35, "SlicedStringMap"),
- ("read_only_space", 0x02fc1): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x02fe9): (34, "ExternalStringMap"),
- ("read_only_space", 0x03011): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x03039): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x03061): (0, "InternalizedStringMap"),
- ("read_only_space", 0x03089): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x030b1): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x030d9): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x03101): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x03129): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x03151): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x03179): (67, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x031bd): (87, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x032bd): (100, "InterceptorInfoMap"),
- ("read_only_space", 0x05411): (72, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x05439): (73, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05461): (74, "CallableTaskMap"),
- ("read_only_space", 0x05489): (75, "CallbackTaskMap"),
- ("read_only_space", 0x054b1): (76, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x054d9): (79, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x05501): (80, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05529): (81, "AccessCheckInfoMap"),
- ("read_only_space", 0x05551): (82, "AccessorInfoMap"),
- ("read_only_space", 0x05579): (83, "AccessorPairMap"),
- ("read_only_space", 0x055a1): (84, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x055c9): (85, "AllocationMementoMap"),
- ("read_only_space", 0x055f1): (88, "AsmWasmDataMap"),
- ("read_only_space", 0x05619): (89, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x05641): (90, "BaselineDataMap"),
- ("read_only_space", 0x05669): (91, "BreakPointMap"),
- ("read_only_space", 0x05691): (92, "BreakPointInfoMap"),
- ("read_only_space", 0x056b9): (93, "CachedTemplateObjectMap"),
- ("read_only_space", 0x056e1): (95, "ClassPositionsMap"),
- ("read_only_space", 0x05709): (96, "DebugInfoMap"),
- ("read_only_space", 0x05731): (99, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05759): (101, "InterpreterDataMap"),
- ("read_only_space", 0x05781): (102, "ModuleRequestMap"),
- ("read_only_space", 0x057a9): (103, "PromiseCapabilityMap"),
- ("read_only_space", 0x057d1): (104, "PromiseReactionMap"),
- ("read_only_space", 0x057f9): (105, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05821): (106, "PrototypeInfoMap"),
- ("read_only_space", 0x05849): (107, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x05871): (108, "ScriptMap"),
- ("read_only_space", 0x05899): (109, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x058c1): (110, "StackFrameInfoMap"),
- ("read_only_space", 0x058e9): (111, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05911): (112, "Tuple2Map"),
- ("read_only_space", 0x05939): (113, "WasmExceptionTagMap"),
- ("read_only_space", 0x05961): (114, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x05989): (115, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x059b1): (116, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x059d9): (135, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x05a01): (152, "DescriptorArrayMap"),
- ("read_only_space", 0x05a29): (157, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x05a51): (156, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05a79): (172, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x05aa1): (182, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x05ac9): (169, "InternalClassMap"),
- ("read_only_space", 0x05af1): (178, "SmiPairMap"),
- ("read_only_space", 0x05b19): (177, "SmiBoxMap"),
- ("read_only_space", 0x05b41): (146, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05b69): (147, "ExportedSubClassMap"),
- ("read_only_space", 0x05b91): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05bb9): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05be1): (133, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05c09): (170, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05c31): (148, "ExportedSubClass2Map"),
- ("read_only_space", 0x05c59): (179, "SortStateMap"),
- ("read_only_space", 0x05c81): (86, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05ca9): (86, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05cd1): (77, "LoadHandler1Map"),
- ("read_only_space", 0x05cf9): (77, "LoadHandler2Map"),
- ("read_only_space", 0x05d21): (77, "LoadHandler3Map"),
- ("read_only_space", 0x05d49): (78, "StoreHandler0Map"),
- ("read_only_space", 0x05d71): (78, "StoreHandler1Map"),
- ("read_only_space", 0x05d99): (78, "StoreHandler2Map"),
- ("read_only_space", 0x05dc1): (78, "StoreHandler3Map"),
+ ("read_only_space", 0x02711): (165, "FeedbackVectorMap"),
+ ("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x027a9): (67, "ExceptionMap"),
+ ("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
+ ("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
+ ("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
+ ("read_only_space", 0x0292d): (129, "ScriptContextTableMap"),
+ ("read_only_space", 0x02955): (127, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x0297d): (164, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029a5): (117, "ArrayListMap"),
+ ("read_only_space", 0x029cd): (65, "BigIntMap"),
+ ("read_only_space", 0x029f5): (128, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a1d): (131, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a45): (161, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a6d): (162, "CoverageInfoMap"),
+ ("read_only_space", 0x02a95): (132, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02abd): (120, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02ae5): (98, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b0d): (117, "ModuleInfoMap"),
+ ("read_only_space", 0x02b35): (121, "NameDictionaryMap"),
+ ("read_only_space", 0x02b5d): (98, "NoClosuresCellMap"),
+ ("read_only_space", 0x02b85): (122, "NumberDictionaryMap"),
+ ("read_only_space", 0x02bad): (98, "OneClosureCellMap"),
+ ("read_only_space", 0x02bd5): (123, "OrderedHashMapMap"),
+ ("read_only_space", 0x02bfd): (124, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c25): (125, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02c4d): (172, "PreparseDataMap"),
+ ("read_only_space", 0x02c75): (173, "PropertyArrayMap"),
+ ("read_only_space", 0x02c9d): (94, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02cc5): (94, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02ced): (94, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d15): (126, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02d3d): (148, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02d65): (149, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02d8d): (150, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02db5): (153, "SourceTextModuleMap"),
+ ("read_only_space", 0x02ddd): (180, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02e05): (154, "SyntheticModuleMap"),
+ ("read_only_space", 0x02e2d): (71, "WasmTypeInfoMap"),
+ ("read_only_space", 0x02e55): (184, "WeakArrayListMap"),
+ ("read_only_space", 0x02e7d): (119, "EphemeronHashTableMap"),
+ ("read_only_space", 0x02ea5): (163, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02ecd): (185, "WeakCellMap"),
+ ("read_only_space", 0x02ef5): (32, "StringMap"),
+ ("read_only_space", 0x02f1d): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x02f45): (33, "ConsStringMap"),
+ ("read_only_space", 0x02f6d): (37, "ThinStringMap"),
+ ("read_only_space", 0x02f95): (35, "SlicedStringMap"),
+ ("read_only_space", 0x02fbd): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x02fe5): (34, "ExternalStringMap"),
+ ("read_only_space", 0x0300d): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x03035): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x0305d): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x03085): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x030ad): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x030d5): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x030fd): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x03125): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x0314d): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x03175): (67, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x031b9): (87, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x032b9): (100, "InterceptorInfoMap"),
+ ("read_only_space", 0x05401): (72, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x05429): (73, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05451): (74, "CallableTaskMap"),
+ ("read_only_space", 0x05479): (75, "CallbackTaskMap"),
+ ("read_only_space", 0x054a1): (76, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x054c9): (79, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x054f1): (80, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05519): (81, "AccessCheckInfoMap"),
+ ("read_only_space", 0x05541): (82, "AccessorInfoMap"),
+ ("read_only_space", 0x05569): (83, "AccessorPairMap"),
+ ("read_only_space", 0x05591): (84, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x055b9): (85, "AllocationMementoMap"),
+ ("read_only_space", 0x055e1): (88, "AsmWasmDataMap"),
+ ("read_only_space", 0x05609): (89, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x05631): (90, "BaselineDataMap"),
+ ("read_only_space", 0x05659): (91, "BreakPointMap"),
+ ("read_only_space", 0x05681): (92, "BreakPointInfoMap"),
+ ("read_only_space", 0x056a9): (93, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x056d1): (95, "ClassPositionsMap"),
+ ("read_only_space", 0x056f9): (96, "DebugInfoMap"),
+ ("read_only_space", 0x05721): (99, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05749): (101, "InterpreterDataMap"),
+ ("read_only_space", 0x05771): (102, "ModuleRequestMap"),
+ ("read_only_space", 0x05799): (103, "PromiseCapabilityMap"),
+ ("read_only_space", 0x057c1): (104, "PromiseReactionMap"),
+ ("read_only_space", 0x057e9): (105, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05811): (106, "PrototypeInfoMap"),
+ ("read_only_space", 0x05839): (107, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05861): (108, "ScriptMap"),
+ ("read_only_space", 0x05889): (109, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x058b1): (110, "StackFrameInfoMap"),
+ ("read_only_space", 0x058d9): (111, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05901): (112, "Tuple2Map"),
+ ("read_only_space", 0x05929): (113, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05951): (114, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x05979): (115, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x059a1): (116, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x059c9): (134, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x059f1): (151, "DescriptorArrayMap"),
+ ("read_only_space", 0x05a19): (156, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05a41): (155, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05a69): (171, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05a91): (168, "InternalClassMap"),
+ ("read_only_space", 0x05ab9): (178, "SmiPairMap"),
+ ("read_only_space", 0x05ae1): (177, "SmiBoxMap"),
+ ("read_only_space", 0x05b09): (145, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05b31): (146, "ExportedSubClassMap"),
+ ("read_only_space", 0x05b59): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05b81): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05ba9): (133, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05bd1): (169, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05bf9): (147, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05c21): (179, "SortStateMap"),
+ ("read_only_space", 0x05c49): (182, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x05c71): (86, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05c99): (86, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05cc1): (77, "LoadHandler1Map"),
+ ("read_only_space", 0x05ce9): (77, "LoadHandler2Map"),
+ ("read_only_space", 0x05d11): (77, "LoadHandler3Map"),
+ ("read_only_space", 0x05d39): (78, "StoreHandler0Map"),
+ ("read_only_space", 0x05d61): (78, "StoreHandler1Map"),
+ ("read_only_space", 0x05d89): (78, "StoreHandler2Map"),
+ ("read_only_space", 0x05db1): (78, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
("map_space", 0x02141): (1098, "JSMessageObjectMap"),
}
@@ -406,37 +406,37 @@ KNOWN_OBJECTS = {
("read_only_space", 0x024cd): "FalseValue",
("read_only_space", 0x024fd): "empty_string",
("read_only_space", 0x02739): "EmptyScopeInfo",
- ("read_only_space", 0x02775): "ArgumentsMarker",
- ("read_only_space", 0x027d5): "Exception",
- ("read_only_space", 0x02831): "TerminationException",
- ("read_only_space", 0x02899): "OptimizedOut",
- ("read_only_space", 0x028f9): "StaleRegister",
- ("read_only_space", 0x031a1): "EmptyPropertyArray",
- ("read_only_space", 0x031a9): "EmptyByteArray",
- ("read_only_space", 0x031b1): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x031e5): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x031f1): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x031f9): "EmptySlowElementDictionary",
- ("read_only_space", 0x0321d): "EmptyOrderedHashMap",
- ("read_only_space", 0x03231): "EmptyOrderedHashSet",
- ("read_only_space", 0x03245): "EmptyFeedbackMetadata",
- ("read_only_space", 0x03251): "EmptyPropertyDictionary",
- ("read_only_space", 0x03279): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x03291): "EmptySwissPropertyDictionary",
- ("read_only_space", 0x032e5): "NoOpInterceptorInfo",
- ("read_only_space", 0x0330d): "EmptyWeakArrayList",
- ("read_only_space", 0x03319): "InfinityValue",
- ("read_only_space", 0x03325): "MinusZeroValue",
- ("read_only_space", 0x03331): "MinusInfinityValue",
- ("read_only_space", 0x0333d): "SelfReferenceMarker",
- ("read_only_space", 0x0337d): "BasicBlockCountersMarker",
- ("read_only_space", 0x033c1): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x033cd): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x033d9): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x033e5): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x0341d): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x03445): "NativeScopeInfo",
- ("read_only_space", 0x03461): "HashSeed",
+ ("read_only_space", 0x02771): "ArgumentsMarker",
+ ("read_only_space", 0x027d1): "Exception",
+ ("read_only_space", 0x0282d): "TerminationException",
+ ("read_only_space", 0x02895): "OptimizedOut",
+ ("read_only_space", 0x028f5): "StaleRegister",
+ ("read_only_space", 0x0319d): "EmptyPropertyArray",
+ ("read_only_space", 0x031a5): "EmptyByteArray",
+ ("read_only_space", 0x031ad): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x031e1): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x031ed): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x031f5): "EmptySlowElementDictionary",
+ ("read_only_space", 0x03219): "EmptyOrderedHashMap",
+ ("read_only_space", 0x0322d): "EmptyOrderedHashSet",
+ ("read_only_space", 0x03241): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x0324d): "EmptyPropertyDictionary",
+ ("read_only_space", 0x03275): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x0328d): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x032e1): "NoOpInterceptorInfo",
+ ("read_only_space", 0x03309): "EmptyWeakArrayList",
+ ("read_only_space", 0x03315): "InfinityValue",
+ ("read_only_space", 0x03321): "MinusZeroValue",
+ ("read_only_space", 0x0332d): "MinusInfinityValue",
+ ("read_only_space", 0x03339): "SelfReferenceMarker",
+ ("read_only_space", 0x03379): "BasicBlockCountersMarker",
+ ("read_only_space", 0x033bd): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x033c9): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x033d5): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x033e1): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x03415): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x03439): "NativeScopeInfo",
+ ("read_only_space", 0x03451): "HashSeed",
("old_space", 0x02119): "ArgumentsIteratorAccessor",
("old_space", 0x0215d): "ArrayLengthAccessor",
("old_space", 0x021a1): "BoundFunctionLengthAccessor",
diff --git a/deps/v8/tools/v8windbg/BUILD.gn b/deps/v8/tools/v8windbg/BUILD.gn
index 10d06a127f0..5618d2d9455 100644
--- a/deps/v8/tools/v8windbg/BUILD.gn
+++ b/deps/v8/tools/v8windbg/BUILD.gn
@@ -40,6 +40,8 @@ v8_shared_library("v8windbg") {
"base/dbgext.def",
"src/cur-isolate.cc",
"src/cur-isolate.h",
+ "src/js-stack.cc",
+ "src/js-stack.h",
"src/list-chunks.cc",
"src/list-chunks.h",
"src/local-variables.cc",
diff --git a/deps/v8/tools/v8windbg/README.md b/deps/v8/tools/v8windbg/README.md
index dc0c4e10407..de6638e4d43 100644
--- a/deps/v8/tools/v8windbg/README.md
+++ b/deps/v8/tools/v8windbg/README.md
@@ -43,6 +43,8 @@ functions that can be called from within `dx` commands:
current thread has a JavaScript Isolate associated.
- `@$listchunks()` returns a list of the memory chunks in the Heap for the
current Isolate.
+- `@$jsstack()` returns a list of the JS stack frames, including information
+about script and function.
*Tip:*: to see what objects are present in a chunk of heap memory, you can cast
it to an array of `TaggedValue`, like this:
@@ -67,6 +69,8 @@ functions declared in `dbgext.h` to create and destroy the extension instance.
- `cur-isolate.{cc,h}` implements the `IModelMethod` for `@$curisolate()`.
- `list-chunks.{cc,h}` implements the `IModelMethod` for `@$listchunks()`. Its
result is a custom object that supports iteration and indexing.
+- `js-stack.{cc,h}` implements the `IModelMethod` for `@$jsstack()`. Its
+ result is a custom object that supports iteration and indexing.
- `local-variables.{cc,h}` implements the `IModelPropertyAccessor` that provides
content to show in the Locals pane for stack frames corresponding to builtins
or runtime-generated code.
diff --git a/deps/v8/tools/v8windbg/src/cur-isolate.h b/deps/v8/tools/v8windbg/src/cur-isolate.h
index 2be24ce7fd8..ad6b01a9467 100644
--- a/deps/v8/tools/v8windbg/src/cur-isolate.h
+++ b/deps/v8/tools/v8windbg/src/cur-isolate.h
@@ -17,7 +17,7 @@
HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result);
-constexpr wchar_t kIsolateKey[] = L"isolate_key_";
+constexpr wchar_t kIsolateKey[] = L"v8::internal::Isolate::isolate_key_";
constexpr wchar_t kIsolate[] = L"v8::internal::Isolate";
class CurrIsolateAlias
diff --git a/deps/v8/tools/v8windbg/src/js-stack.cc b/deps/v8/tools/v8windbg/src/js-stack.cc
new file mode 100644
index 00000000000..c309fe4aa31
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/js-stack.cc
@@ -0,0 +1,229 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/js-stack.h"
+
+HRESULT GetJSStackFrames(WRL::ComPtr<IModelObject>& sp_result) {
+ sp_result = nullptr;
+
+ // Get the current context
+ WRL::ComPtr<IDebugHostContext> sp_host_context;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_host_context));
+
+ WRL::ComPtr<IModelObject> sp_curr_thread;
+ RETURN_IF_FAIL(GetCurrentThread(sp_host_context, &sp_curr_thread));
+
+ WRL::ComPtr<IModelObject> sp_stack;
+ RETURN_IF_FAIL(sp_curr_thread->GetKeyValue(L"Stack", &sp_stack, nullptr));
+
+ RETURN_IF_FAIL(sp_stack->GetKeyValue(L"Frames", &sp_result, nullptr));
+
+ return S_OK;
+}
+
+// v8windbg!JSStackAlias::Call
+IFACEMETHODIMP JSStackAlias::Call(IModelObject* p_context_object,
+ ULONG64 arg_count,
+ _In_reads_(arg_count)
+ IModelObject** pp_arguments,
+ IModelObject** pp_result,
+ IKeyStore** pp_metadata) noexcept {
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
+
+ WRL::ComPtr<IModelObject> result;
+ RETURN_IF_FAIL(
+ sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &result));
+
+ auto sp_iterator{WRL::Make<StackFrames>()};
+
+ RETURN_IF_FAIL(result->SetConcept(
+ __uuidof(IIndexableConcept),
+ static_cast<IIndexableConcept*>(sp_iterator.Get()), nullptr));
+ RETURN_IF_FAIL(result->SetConcept(
+ __uuidof(IIterableConcept),
+ static_cast<IIterableConcept*>(sp_iterator.Get()), nullptr));
+
+ *pp_result = result.Detach();
+ if (pp_metadata) {
+ *pp_metadata = nullptr;
+ }
+ return S_OK;
+}
+
+FrameData::FrameData() = default;
+FrameData::~FrameData() = default;
+FrameData::FrameData(const FrameData&) = default;
+FrameData::FrameData(FrameData&&) = default;
+FrameData& FrameData::operator=(const FrameData&) = default;
+FrameData& FrameData::operator=(FrameData&&) = default;
+
+StackFrameIterator::StackFrameIterator(
+ WRL::ComPtr<IDebugHostContext>& host_context)
+ : sp_ctx_(host_context) {}
+StackFrameIterator::~StackFrameIterator() = default;
+
+HRESULT StackFrameIterator::PopulateFrameData() {
+ frames_.clear();
+ WRL::ComPtr<IModelObject> sp_frames;
+
+ RETURN_IF_FAIL(GetJSStackFrames(sp_frames));
+
+ // Iterate over the array of frames.
+ WRL::ComPtr<IIterableConcept> sp_iterable;
+ RETURN_IF_FAIL(
+ sp_frames->GetConcept(__uuidof(IIterableConcept), &sp_iterable, nullptr));
+
+ WRL::ComPtr<IModelIterator> sp_frame_iterator;
+ RETURN_IF_FAIL(sp_iterable->GetIterator(sp_frames.Get(), &sp_frame_iterator));
+
+ // Loop through all the frames in the array.
+ WRL::ComPtr<IModelObject> sp_frame;
+ while (sp_frame_iterator->GetNext(&sp_frame, 0, nullptr, nullptr) !=
+ E_BOUNDS) {
+ // Skip non-JS frame (frame that doesn't have a function_name).
+ WRL::ComPtr<IModelObject> sp_local_variables;
+ HRESULT hr =
+ sp_frame->GetKeyValue(L"LocalVariables", &sp_local_variables, nullptr);
+ if (FAILED(hr)) continue;
+
+ WRL::ComPtr<IModelObject> sp_currently_executing_jsfunction;
+ hr = sp_local_variables->GetKeyValue(L"currently_executing_jsfunction",
+ &sp_currently_executing_jsfunction,
+ nullptr);
+ if (FAILED(hr)) continue;
+
+ WRL::ComPtr<IModelObject> sp_function_name, sp_script_name,
+ sp_script_source, sp_function_character_offset;
+ RETURN_IF_FAIL(sp_local_variables->GetKeyValue(L"script_name",
+ &sp_script_name, nullptr));
+ RETURN_IF_FAIL(sp_local_variables->GetKeyValue(L"script_source",
+ &sp_script_source, nullptr));
+ RETURN_IF_FAIL(sp_local_variables->GetKeyValue(L"function_name",
+ &sp_function_name, nullptr));
+ RETURN_IF_FAIL(sp_local_variables->GetKeyValue(
+ L"function_character_offset", &sp_function_character_offset, nullptr));
+
+ FrameData frame_entry;
+ frame_entry.script_name = sp_script_name;
+ frame_entry.script_source = sp_script_source;
+ frame_entry.function_name = sp_function_name;
+ frame_entry.function_character_offset = sp_function_character_offset;
+ frames_.push_back(frame_entry);
+ }
+
+ return S_OK;
+}
+
+IFACEMETHODIMP StackFrameIterator::Reset() noexcept {
+ position_ = 0;
+ return S_OK;
+}
+
+IFACEMETHODIMP StackFrameIterator::GetNext(IModelObject** object,
+ ULONG64 dimensions,
+ IModelObject** indexers,
+ IKeyStore** metadata) noexcept {
+ if (dimensions > 1) return E_INVALIDARG;
+
+ if (position_ == 0) {
+ RETURN_IF_FAIL(PopulateFrameData());
+ }
+
+ if (metadata != nullptr) *metadata = nullptr;
+
+ WRL::ComPtr<IModelObject> sp_index, sp_value;
+
+ if (dimensions == 1) {
+ RETURN_IF_FAIL(CreateULong64(position_, &sp_index));
+ }
+
+ RETURN_IF_FAIL(GetAt(position_, &sp_value));
+
+ // Now update counter and transfer ownership of results, because nothing can
+ // fail from this point onward.
+ ++position_;
+ if (dimensions == 1) {
+ *indexers = sp_index.Detach();
+ }
+ *object = sp_value.Detach();
+ return S_OK;
+}
+
+HRESULT StackFrameIterator::GetAt(uint64_t index, IModelObject** result) const {
+ if (index >= frames_.size()) return E_BOUNDS;
+
+ // Create the synthetic object representing the frame here.
+ const FrameData& curr_frame = frames_.at(index);
+ WRL::ComPtr<IModelObject> sp_value;
+ RETURN_IF_FAIL(
+ sp_data_model_manager->CreateSyntheticObject(sp_ctx_.Get(), &sp_value));
+ RETURN_IF_FAIL(
+ sp_value->SetKey(L"script_name", curr_frame.script_name.Get(), nullptr));
+ RETURN_IF_FAIL(sp_value->SetKey(L"script_source",
+ curr_frame.script_source.Get(), nullptr));
+ RETURN_IF_FAIL(sp_value->SetKey(L"function_name",
+ curr_frame.function_name.Get(), nullptr));
+ RETURN_IF_FAIL(sp_value->SetKey(L"function_character_offset",
+ curr_frame.function_character_offset.Get(),
+ nullptr));
+
+ *result = sp_value.Detach();
+ return S_OK;
+}
+
+StackFrames::StackFrames() = default;
+StackFrames::~StackFrames() = default;
+
+IFACEMETHODIMP StackFrames::GetDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP StackFrames::GetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject** object,
+ IKeyStore** metadata) noexcept {
+ if (indexer_count != 1) return E_INVALIDARG;
+ if (metadata != nullptr) *metadata = nullptr;
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+
+ // This should be instantiated once for each synthetic object returned,
+ // so should be able to cache/reuse an iterator.
+ if (opt_frames_ == nullptr) {
+ opt_frames_ = WRL::Make<StackFrameIterator>(sp_ctx);
+ _ASSERT(opt_frames_ != nullptr);
+ RETURN_IF_FAIL(opt_frames_->PopulateFrameData());
+ }
+
+ uint64_t index;
+ RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, true /*convert*/));
+
+ return opt_frames_->GetAt(index, object);
+}
+
+IFACEMETHODIMP StackFrames::SetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject* value) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP StackFrames::GetDefaultIndexDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP StackFrames::GetIterator(IModelObject* context_object,
+ IModelIterator** iterator) noexcept {
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+ auto sp_memory_iterator{WRL::Make<StackFrameIterator>(sp_ctx)};
+ *iterator = sp_memory_iterator.Detach();
+ return S_OK;
+}
diff --git a/deps/v8/tools/v8windbg/src/js-stack.h b/deps/v8/tools/v8windbg/src/js-stack.h
new file mode 100644
index 00000000000..cc6b11b34b3
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/js-stack.h
@@ -0,0 +1,98 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_JS_STACK_H_
+#define V8_TOOLS_V8WINDBG_SRC_JS_STACK_H_
+
+#include <crtdbg.h>
+#include <wrl/implements.h>
+
+#include <string>
+#include <vector>
+
+#include "src/base/optional.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+class JSStackAlias
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelMethod> {
+ public:
+ IFACEMETHOD(Call)
+ (IModelObject* p_context_object, ULONG64 arg_count,
+ _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+ IKeyStore** pp_metadata);
+};
+
+struct FrameData {
+ FrameData();
+ ~FrameData();
+ FrameData(const FrameData&);
+ FrameData(FrameData&&);
+ FrameData& operator=(const FrameData&);
+ FrameData& operator=(FrameData&&);
+ WRL::ComPtr<IModelObject> script_name;
+ WRL::ComPtr<IModelObject> script_source;
+ WRL::ComPtr<IModelObject> function_name;
+ WRL::ComPtr<IModelObject> function_character_offset;
+};
+
+class StackFrameIterator
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelIterator> {
+ public:
+ StackFrameIterator(WRL::ComPtr<IDebugHostContext>& host_context);
+ ~StackFrameIterator() override;
+
+ HRESULT PopulateFrameData();
+
+ IFACEMETHOD(Reset)();
+
+ IFACEMETHOD(GetNext)
+ (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
+ IKeyStore** metadata);
+
+ HRESULT GetAt(uint64_t index, IModelObject** result) const;
+
+ private:
+ ULONG position_ = 0;
+ std::vector<FrameData> frames_;
+ WRL::ComPtr<IDebugHostContext> sp_ctx_;
+};
+
+class StackFrames
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IIndexableConcept, IIterableConcept> {
+ public:
+ StackFrames();
+ ~StackFrames() override;
+
+ // IIndexableConcept members
+ IFACEMETHOD(GetDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ IFACEMETHOD(GetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ IModelObject** object, IKeyStore** metadata);
+
+ IFACEMETHOD(SetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ IModelObject* value);
+
+ // IIterableConcept
+ IFACEMETHOD(GetDefaultIndexDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ IFACEMETHOD(GetIterator)
+ (IModelObject* context_object, IModelIterator** iterator);
+
+ private:
+ WRL::ComPtr<StackFrameIterator> opt_frames_;
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_JS_STACK_H_
diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
index 58a520cff1f..7fbe39d1920 100644
--- a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
+++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
@@ -8,12 +8,14 @@
#include "tools/v8windbg/base/utilities.h"
#include "tools/v8windbg/src/cur-isolate.h"
+#include "tools/v8windbg/src/js-stack.h"
#include "tools/v8windbg/src/list-chunks.h"
#include "tools/v8windbg/src/local-variables.h"
#include "tools/v8windbg/src/object-inspection.h"
std::unique_ptr<Extension> Extension::current_extension_ = nullptr;
const wchar_t* pcur_isolate = L"curisolate";
+const wchar_t* pjs_stack = L"jsstack";
const wchar_t* plist_chunks = L"listchunks";
const wchar_t* pv8_object = L"v8object";
@@ -260,6 +262,7 @@ HRESULT Extension::Initialize() {
// Register all function aliases.
std::vector<std::pair<const wchar_t*, WRL::ComPtr<IModelMethod>>> functions =
{{pcur_isolate, WRL::Make<CurrIsolateAlias>()},
+ {pjs_stack, WRL::Make<JSStackAlias>()},
{plist_chunks, WRL::Make<ListChunksAlias>()},
{pv8_object, WRL::Make<InspectV8ObjectMethod>()}};
for (const auto& function : functions) {
@@ -371,6 +374,7 @@ Extension::RegistrationType& Extension::RegistrationType::operator=(
Extension::~Extension() {
sp_debug_host_extensibility->DestroyFunctionAlias(pcur_isolate);
+ sp_debug_host_extensibility->DestroyFunctionAlias(pjs_stack);
sp_debug_host_extensibility->DestroyFunctionAlias(plist_chunks);
sp_debug_host_extensibility->DestroyFunctionAlias(pv8_object);
diff --git a/deps/v8/tools/v8windbg/test/v8windbg-test.cc b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
index 6b40af0ff17..bb9e42e06f3 100644
--- a/deps/v8/tools/v8windbg/test/v8windbg-test.cc
+++ b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
@@ -226,12 +226,26 @@ void RunTests() {
"dx object.Value.map.instance_descriptors.descriptors[1].key",
{"\"secondProp\"", "SeqOneByteString"}, &output, p_debug_control.Get());
- RunAndCheckOutput(
- "local variables",
- "dx -r1 @$curthread.Stack.Frames.Where(f => "
- "f.ToDisplayString().Contains(\"InterpreterEntryTrampoline\")).Skip(1)."
- "First().LocalVariables.@\"memory interpreted as Objects\"",
- {"\"hello\""}, &output, p_debug_control.Get());
+ // TODO(v8:11527): enable this when symbol information for the in-Isolate
+ // builtins is available.
+ // RunAndCheckOutput(
+ // "local variables",
+ // "dx -r1 @$curthread.Stack.Frames.Where(f => "
+ // "f.ToDisplayString().Contains(\"InterpreterEntryTrampoline\")).Skip(1)."
+ // "First().LocalVariables.@\"memory interpreted as Objects\"",
+ // {"\"hello\""}, &output, p_debug_control.Get());
+
+ RunAndCheckOutput("js stack", "dx @$jsstack()[0].function_name",
+ {"\"a\"", "SeqOneByteString"}, &output,
+ p_debug_control.Get());
+
+ RunAndCheckOutput("js stack", "dx @$jsstack()[1].function_name",
+ {"\"b\"", "SeqOneByteString"}, &output,
+ p_debug_control.Get());
+
+ RunAndCheckOutput("js stack", "dx @$jsstack()[2].function_name",
+ {"empty_string \"\"", "SeqOneByteString"}, &output,
+ p_debug_control.Get());
// Detach before exiting
hr = p_client->DetachProcesses();
diff --git a/deps/v8/tools/vim/ninja-build.vim b/deps/v8/tools/vim/ninja-build.vim
index 7c885255ce9..e10da37759e 100644
--- a/deps/v8/tools/vim/ninja-build.vim
+++ b/deps/v8/tools/vim/ninja-build.vim
@@ -19,7 +19,7 @@
" Add the following to your .vimrc file:
" so /path/to/src/tools/vim/ninja-build.vim
-python << endpython
+pythonx << endpython
import os
import vim
@@ -47,7 +47,7 @@ def path_to_build_dir(configuration):
"""Returns <v8_root>/<output_dir>/(Release|Debug)."""
v8_root = path_to_source_root()
- sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+ sys.path.append(os.path.join(v8_root, 'tools', 'vim'))
from ninja_output import GetNinjaOutputDirectory
return GetNinjaOutputDirectory(v8_root, configuration)
@@ -75,7 +75,11 @@ endpython
fun! s:MakeWithCustomCommand(build_cmd)
let l:oldmakepgr = &makeprg
let &makeprg=a:build_cmd
- silent make | cwindow
+ if exists(':Make') == 2
+ Make
+ else
+ silent make | cwindow
+ endif
if !has('gui_running')
redraw!
endif
@@ -83,11 +87,11 @@ fun! s:MakeWithCustomCommand(build_cmd)
endfun
fun! s:NinjaCommandForCurrentBuffer()
- python compute_ninja_command_for_current_buffer()
+ pythonx compute_ninja_command_for_current_buffer()
endfun
fun! s:NinjaCommandForTargets(targets)
- python compute_ninja_command_for_targets(vim.eval('a:targets'))
+ pythonx compute_ninja_command_for_targets(vim.eval('a:targets'))
endfun
fun! CrCompileFile()
diff --git a/deps/v8/tools/vim/ninja_output.py b/deps/v8/tools/vim/ninja_output.py
new file mode 100644
index 00000000000..09f1d7871d7
--- /dev/null
+++ b/deps/v8/tools/vim/ninja_output.py
@@ -0,0 +1,72 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import sys
+import os
+import itertools
+import re
+
+try:
+ from exceptions import RuntimeError
+except ImportError:
+ pass
+
+
+def GetNinjaOutputDirectory(v8_root, configuration=None):
+ """Returns <v8_root>/<output_dir>/(Release|Debug|<other>).
+
+ The configuration chosen is the one most recently generated/built, but can be
+ overriden via the <configuration> parameter. Detects a custom output_dir
+ specified by GYP_GENERATOR_FLAGS."""
+
+ output_dirs = []
+
+ generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
+ for flag in generator_flags:
+ name_value = flag.split('=', 1)
+ if (len(name_value) == 2 and name_value[0] == 'output_dir' and
+ os.path.isdir(os.path.join(v8_root, name_value[1]))):
+ output_dirs = [name_value[1]]
+
+ if configuration:
+ output_dir = 'out' if len(output_dirs) == 0 else output_dirs[-1]
+ return os.path.join(os.path.join(v8_root, output_dir), configuration)
+
+ if not output_dirs:
+ for f in os.listdir(v8_root):
+ if re.match(r'out(\b|_)', f):
+ if os.path.isdir(os.path.join(v8_root, f)):
+ output_dirs.append(f)
+
+ def generate_paths():
+ for out_dir in output_dirs:
+ out_path = os.path.join(v8_root, out_dir)
+ for config in os.listdir(out_path):
+ path = os.path.join(out_path, config)
+ if os.path.exists(os.path.join(path, 'build.ninja')):
+ yield path
+
+ def approx_directory_mtime(path):
+ # This is a heuristic; don't recurse into subdirectories.
+ paths = [path] + [os.path.join(path, f) for f in os.listdir(path)]
+ return max(filter(None, [safe_mtime(p) for p in paths]))
+
+ def safe_mtime(path):
+ try:
+ return os.path.getmtime(path)
+ except OSError:
+ return None
+
+ try:
+ return max(generate_paths(), key=approx_directory_mtime)
+ except ValueError:
+ raise RuntimeError('Unable to find a valid ninja output directory.')
+
+
+if __name__ == '__main__':
+ if len(sys.argv) != 2:
+ raise RuntimeError('Expected a single path argument.')
+ print(GetNinjaOutputDirectory(sys.argv[1]))
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 4d352754b73..df5348eb787 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -71,7 +71,7 @@ log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
# Generate the proposal tests.
###############################################################################
-repos='bulk-memory-operations reference-types js-types tail-call simd memory64'
+repos='js-types tail-call simd memory64'
for repo in ${repos}; do
echo "Process ${repo}"
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 3332f7805be..61fd2e94864 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,11 +7,11 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles..........................
+The bartender starts to shake the bottles...........................
I can't add trailing whitespaces, so I'm adding this line...........
I'm starting to think that just adding trailing whitespaces might not be bad.
-Because whitespaces are not that funny.....
+Because whitespaces are not that funny......
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
Today's answer to life the universe and everything is 6728!!