diff --git a/deps/v8/.gitattributes b/deps/v8/.gitattributes new file mode 100644 index 00000000000000..d38eef01ae5203 --- /dev/null +++ b/deps/v8/.gitattributes @@ -0,0 +1,2 @@ +# Automatically normalize line endings (to LF) for all text-based files. +* text=auto diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index d1df5403e9c7a0..ba54db5505663d 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -32,6 +32,7 @@ Facebook, Inc. <*@fb.com> Facebook, Inc. <*@oculus.com> Vewd Software AS <*@vewd.com> Groupon <*@groupon.com> +Cloudflare, Inc. <*@cloudflare.com> Aaron Bieber Abdulla Kamar @@ -41,6 +42,7 @@ Alexander Botero-Lowry Alexander Karpinsky Alexandre Vassalotti Alexis Campailla +Amos Lim Andreas Anyuru Andrew Paprocki Andrei Kashcha @@ -61,6 +63,7 @@ Daniel Andersson Daniel Bevenius Daniel James Deon Dior +Dominic Farolini Douglas Crosher Dusan Milosavljevic Erich Ocean @@ -75,6 +78,7 @@ Gwang Yoon Hwang Henrique Ferreiro Hirofumi Mako Honggyu Kim +Ingvar Stepanyan Ioseb Dzmanashvili Isiah Meadows Jaime Bernardo @@ -82,7 +86,6 @@ Jan de Mooij Jan Krems Jay Freeman James Pike -James M Snell Jianghua Yang Joel Stanley Johan Bergström @@ -126,8 +129,10 @@ Paul Lind Qingyan Li Qiuyi Zhang Rafal Krypa +Ray Glover Refael Ackermann Rene Rebe +Rick Waldron Rob Wu Robert Mustacchi Robert Nagy diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 1d42461ba73128..456a318c1c6052 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -70,7 +70,7 @@ declare_args() { # Enable embedded builtins. # TODO(jgruber,v8:6666): Support ia32 and maybe MSVC. - # TODO(jgruber,v8:6666): Re-enable after the M67 branch point. + # TODO(jgruber,v8:6666): Re-enable. v8_enable_embedded_builtins = false # Enable code-generation-time checking of types in the CodeStubAssembler. @@ -190,7 +190,10 @@ v8_toolset_for_shell = "host" config("internal_config") { visibility = [ ":*" ] # Only targets in this file can depend on this. - include_dirs = [ "." ] + include_dirs = [ + ".", + "$target_gen_dir", + ] if (is_component_build) { defines = [ "BUILDING_V8_SHARED" ] @@ -200,7 +203,10 @@ config("internal_config") { config("internal_config_base") { visibility = [ ":*" ] # Only targets in this file can depend on this. - include_dirs = [ "." ] + include_dirs = [ + ".", + "$target_gen_dir", + ] } # This config should be applied to code using the libplatform. @@ -569,7 +575,14 @@ config("toolchain") { "/wd4703", # Potentially uninitialized local pointer variable. "/wd4709", # Comma operator within array index expr (bugged). "/wd4714", # Function marked forceinline not inlined. + + # MSVC assumes that control can get past an exhaustive switch and then + # warns if there's no return there (see https://crbug.com/v8/7658) + "/wd4715", # Not all control paths return a value. + "/wd4718", # Recursive call has no side-effect. + "/wd4723", # https://crbug.com/v8/7771 + "/wd4724", # https://crbug.com/v8/7771 "/wd4800", # Forcing value to bool. ] } @@ -580,6 +593,10 @@ config("toolchain") { # signed overflow does not occur. Generates false positives (see # http://crbug.com/v8/6341). "-Wno-strict-overflow", + + # GCC assumes that control can get past an exhaustive switch and then + # warns if there's no return there (see https://crbug.com/v8/7658). + "-Wno-return-type", ] } } @@ -827,6 +844,99 @@ action("postmortem-metadata") { rebase_path(sources, root_build_dir) } +torque_files = [ + "src/builtins/base.tq", + "src/builtins/array.tq", + "src/builtins/typed-array.tq", + "test/torque/test-torque.tq", +] + +torque_modules = [ + "base", + "array", + "typed-array", + "test", +] + +action("run_torque") { + visibility = [ + ":*", + "tools/gcmole/:*", + "test/cctest/:*", + ] + + # We reuse the snapshot toolchain for building torque to not build v8_libbase + # on the host more than once. On mips with big endian, the snapshot toolchain + # is the target toolchain and, hence, can't be used. + v8_torque_toolchain = v8_snapshot_toolchain + if (host_cpu == "x64" && + (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { + v8_torque_toolchain = "//build/toolchain/linux:clang_x64" + } + + deps = [ + ":torque($v8_torque_toolchain)", + ] + + script = "tools/run.py" + + sources = torque_files + + outputs = [ + "$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h", + ] + foreach(module, torque_modules) { + outputs += [ + "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.cc", + "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.h", + ] + } + + args = [ + "./" + rebase_path(get_label_info(":torque($v8_torque_toolchain)", + "root_out_dir") + "/torque", + root_build_dir), + "-o", + rebase_path("$target_gen_dir/torque-generated", root_build_dir), + ] + + foreach(file, torque_files) { + args += [ rebase_path(file, root_build_dir) ] + } +} + +v8_source_set("torque_generated_core") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + deps = [ + ":run_torque", + ] + + sources = [ + "$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h", + ] + + configs = [ ":internal_config" ] +} + +v8_source_set("torque_generated_initializers") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + deps = [ + ":run_torque", + ] + + sources = [] + foreach(module, torque_modules) { + sources += [ + "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.cc", + "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.h", + ] + } + + configs = [ ":internal_config" ] +} + # Template to generate different V8 snapshots based on different runtime flags. # Can be invoked with run_mksnapshot(). The target will resolve to # run_mksnapshot_. If is "default", no file suffixes will be used. @@ -1054,6 +1164,9 @@ if (v8_use_snapshot && !v8_use_external_startup_data) { ":run_mksnapshot_default", ] + # Do not publicize any header to remove build dependency. + public = [] + sources = [ "$target_gen_dir/experimental-extras-libraries.cc", "$target_gen_dir/extras-libraries.cc", @@ -1104,6 +1217,9 @@ if (v8_use_snapshot && v8_use_external_startup_data) { "src/snapshot/snapshot-external.cc", ] + # Do not publicize any header to remove build dependency. + public = [] + if (v8_enable_embedded_builtins) { sources += [ "$target_gen_dir/embedded.cc" ] @@ -1130,6 +1246,7 @@ v8_source_set("v8_initializers") { ] deps = [ + ":torque_generated_initializers", ":v8_base", ] @@ -1180,8 +1297,8 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-string-gen.cc", "src/builtins/builtins-string-gen.h", "src/builtins/builtins-symbol-gen.cc", - "src/builtins/builtins-typedarray-gen.cc", - "src/builtins/builtins-typedarray-gen.h", + "src/builtins/builtins-typed-array-gen.cc", + "src/builtins/builtins-typed-array-gen.h", "src/builtins/builtins-utils-gen.h", "src/builtins/builtins-wasm-gen.cc", "src/builtins/growable-fixed-array-gen.cc", @@ -1340,6 +1457,7 @@ v8_source_set("v8_base") { "src/api.h", "src/arguments.cc", "src/arguments.h", + "src/asan.h", "src/asmjs/asm-js.cc", "src/asmjs/asm-js.h", "src/asmjs/asm-names.h", @@ -1351,6 +1469,7 @@ v8_source_set("v8_base") { "src/asmjs/asm-types.h", "src/asmjs/switch-logic.cc", "src/asmjs/switch-logic.h", + "src/assembler-arch.h", "src/assembler-inl.h", "src/assembler.cc", "src/assembler.h", @@ -1420,8 +1539,7 @@ v8_source_set("v8_base") { "src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-string.cc", "src/builtins/builtins-symbol.cc", - "src/builtins/builtins-trace.cc", - "src/builtins/builtins-typedarray.cc", + "src/builtins/builtins-typed-array.cc", "src/builtins/builtins-utils.h", "src/builtins/builtins.cc", "src/builtins/builtins.h", @@ -1439,6 +1557,8 @@ v8_source_set("v8_base") { "src/code-events.h", "src/code-factory.cc", "src/code-factory.h", + "src/code-reference.cc", + "src/code-reference.h", "src/code-stub-assembler.cc", "src/code-stub-assembler.h", "src/code-stubs-utils.h", @@ -1498,6 +1618,8 @@ v8_source_set("v8_base") { "src/compiler/common-operator.h", "src/compiler/compiler-source-position-table.cc", "src/compiler/compiler-source-position-table.h", + "src/compiler/constant-folding-reducer.cc", + "src/compiler/constant-folding-reducer.h", "src/compiler/control-equivalence.cc", "src/compiler/control-equivalence.h", "src/compiler/control-flow-optimizer.cc", @@ -1540,8 +1662,6 @@ v8_source_set("v8_base") { "src/compiler/instruction.h", "src/compiler/int64-lowering.cc", "src/compiler/int64-lowering.h", - "src/compiler/js-builtin-reducer.cc", - "src/compiler/js-builtin-reducer.h", "src/compiler/js-call-reducer.cc", "src/compiler/js-call-reducer.h", "src/compiler/js-context-specialization.cc", @@ -1582,6 +1702,8 @@ v8_source_set("v8_base") { "src/compiler/loop-variable-optimizer.h", "src/compiler/machine-graph-verifier.cc", "src/compiler/machine-graph-verifier.h", + "src/compiler/machine-graph.cc", + "src/compiler/machine-graph.h", "src/compiler/machine-operator-reducer.cc", "src/compiler/machine-operator-reducer.h", "src/compiler/machine-operator.cc", @@ -1597,6 +1719,8 @@ v8_source_set("v8_base") { "src/compiler/node-marker.h", "src/compiler/node-matchers.cc", "src/compiler/node-matchers.h", + "src/compiler/node-origin-table.cc", + "src/compiler/node-origin-table.h", "src/compiler/node-properties.cc", "src/compiler/node-properties.h", "src/compiler/node.cc", @@ -1648,6 +1772,8 @@ v8_source_set("v8_base") { "src/compiler/store-store-elimination.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", + "src/compiler/type-narrowing-reducer.cc", + "src/compiler/type-narrowing-reducer.h", "src/compiler/typed-optimization.cc", "src/compiler/typed-optimization.h", "src/compiler/typer.cc", @@ -1661,7 +1787,6 @@ v8_source_set("v8_base") { "src/compiler/verifier.h", "src/compiler/wasm-compiler.cc", "src/compiler/wasm-compiler.h", - "src/compiler/wasm-linkage.cc", "src/compiler/zone-stats.cc", "src/compiler/zone-stats.h", "src/contexts-inl.h", @@ -1922,11 +2047,11 @@ v8_source_set("v8_base") { "src/lookup-cache.h", "src/lookup.cc", "src/lookup.h", + "src/lsan.h", "src/machine-type.cc", "src/machine-type.h", "src/macro-assembler-inl.h", "src/macro-assembler.h", - "src/managed.h", "src/map-updater.cc", "src/map-updater.h", "src/messages.cc", @@ -1939,6 +2064,8 @@ v8_source_set("v8_base") { "src/objects-printer.cc", "src/objects.cc", "src/objects.h", + "src/objects/api-callbacks-inl.h", + "src/objects/api-callbacks.h", "src/objects/arguments-inl.h", "src/objects/arguments.h", "src/objects/bigint.cc", @@ -1964,6 +2091,9 @@ v8_source_set("v8_base") { "src/objects/js-array.h", "src/objects/js-collection-inl.h", "src/objects/js-collection.h", + "src/objects/js-locale-inl.h", + "src/objects/js-locale.cc", + "src/objects/js-locale.h", "src/objects/js-promise-inl.h", "src/objects/js-promise.h", "src/objects/js-regexp-inl.h", @@ -1973,6 +2103,8 @@ v8_source_set("v8_base") { "src/objects/literal-objects-inl.h", "src/objects/literal-objects.cc", "src/objects/literal-objects.h", + "src/objects/managed.cc", + "src/objects/managed.h", "src/objects/map-inl.h", "src/objects/map.h", "src/objects/maybe-object-inl.h", @@ -1986,6 +2118,9 @@ v8_source_set("v8_base") { "src/objects/name.h", "src/objects/object-macros-undef.h", "src/objects/object-macros.h", + "src/objects/ordered-hash-table-inl.h", + "src/objects/ordered-hash-table.cc", + "src/objects/ordered-hash-table.h", "src/objects/promise-inl.h", "src/objects/promise.h", "src/objects/property-descriptor-object-inl.h", @@ -2002,6 +2137,8 @@ v8_source_set("v8_base") { "src/objects/string.h", "src/objects/template-objects.cc", "src/objects/template-objects.h", + "src/objects/templates-inl.h", + "src/objects/templates.h", "src/optimized-compilation-info.cc", "src/optimized-compilation-info.h", "src/ostreams.cc", @@ -2208,6 +2345,7 @@ v8_source_set("v8_base") { "src/transitions-inl.h", "src/transitions.cc", "src/transitions.h", + "src/trap-handler/handler-inside.cc", "src/trap-handler/handler-outside.cc", "src/trap-handler/handler-shared.cc", "src/trap-handler/trap-handler-internal.h", @@ -2248,13 +2386,14 @@ v8_source_set("v8_base") { "src/wasm/baseline/liftoff-assembler.cc", "src/wasm/baseline/liftoff-assembler.h", "src/wasm/baseline/liftoff-compiler.cc", + "src/wasm/baseline/liftoff-compiler.h", "src/wasm/baseline/liftoff-register.h", - "src/wasm/compilation-manager.cc", - "src/wasm/compilation-manager.h", "src/wasm/decoder.h", "src/wasm/function-body-decoder-impl.h", "src/wasm/function-body-decoder.cc", "src/wasm/function-body-decoder.h", + "src/wasm/function-compiler.cc", + "src/wasm/function-compiler.h", "src/wasm/leb-helper.h", "src/wasm/local-decl-encoder.cc", "src/wasm/local-decl-encoder.h", @@ -2268,6 +2407,7 @@ v8_source_set("v8_base") { "src/wasm/signature-map.h", "src/wasm/streaming-decoder.cc", "src/wasm/streaming-decoder.h", + "src/wasm/value-type.h", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.h", "src/wasm/wasm-code-specialization.cc", @@ -2283,6 +2423,7 @@ v8_source_set("v8_base") { "src/wasm/wasm-js.cc", "src/wasm/wasm-js.h", "src/wasm/wasm-limits.h", + "src/wasm/wasm-linkage.h", "src/wasm/wasm-memory.cc", "src/wasm/wasm-memory.h", "src/wasm/wasm-module-builder.cc", @@ -2389,7 +2530,13 @@ v8_source_set("v8_base") { "src/x64/sse-instr.h", ] if (is_linux) { - sources += [ "src/trap-handler/handler-inside.cc" ] + sources += [ + "src/trap-handler/handler-inside-linux.cc", + "src/trap-handler/handler-outside-linux.cc", + ] + } + if (is_win) { + sources += [ "src/trap-handler/handler-outside-win.cc" ] } } else if (v8_current_cpu == "arm") { sources += [ ### gcmole(arch:arm) ### @@ -2598,6 +2745,7 @@ v8_source_set("v8_base") { defines = [] deps = [ + ":torque_generated_core", ":v8_headers", ":v8_libbase", ":v8_libsampler", @@ -2620,6 +2768,9 @@ v8_source_set("v8_base") { "src/intl.h", "src/objects/intl-objects.cc", "src/objects/intl-objects.h", + "src/objects/js-locale-inl.h", + "src/objects/js-locale.cc", + "src/objects/js-locale.h", "src/runtime/runtime-intl.cc", ] } @@ -2699,6 +2850,8 @@ v8_component("v8_libbase") { public_configs = [ ":libbase_config" ] + public_deps = [] + data = [] data_deps = [] @@ -2770,6 +2923,7 @@ v8_component("v8_libbase") { "src/base/debug/stack_trace_fuchsia.cc", "src/base/platform/platform-fuchsia.cc", ] + public_deps += [ "//third_party/fuchsia-sdk:launchpad" ] } else if (is_mac) { sources += [ "src/base/debug/stack_trace_posix.cc", @@ -2925,6 +3079,73 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) { } } +if (current_toolchain == v8_snapshot_toolchain) { + v8_executable("torque") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + defines = [ "ANTLR4CPP_STATIC" ] + + include_dirs = [ + "third_party/antlr4/runtime/Cpp/runtime/src", + "src/torque", + ] + + sources = [ + "src/torque/TorqueBaseVisitor.cpp", + "src/torque/TorqueBaseVisitor.h", + "src/torque/TorqueLexer.cpp", + "src/torque/TorqueLexer.h", + "src/torque/TorqueParser.cpp", + "src/torque/TorqueParser.h", + "src/torque/TorqueVisitor.cpp", + "src/torque/TorqueVisitor.h", + "src/torque/ast-generator.cc", + "src/torque/ast-generator.h", + "src/torque/ast.h", + "src/torque/contextual.h", + "src/torque/declarable.cc", + "src/torque/declarable.h", + "src/torque/declaration-visitor.cc", + "src/torque/declaration-visitor.h", + "src/torque/declarations.cc", + "src/torque/declarations.h", + "src/torque/file-visitor.cc", + "src/torque/file-visitor.h", + "src/torque/global-context.h", + "src/torque/implementation-visitor.cc", + "src/torque/implementation-visitor.h", + "src/torque/scope.cc", + "src/torque/scope.h", + "src/torque/torque.cc", + "src/torque/type-oracle.h", + "src/torque/types.cc", + "src/torque/types.h", + "src/torque/utils.cc", + "src/torque/utils.h", + ] + + deps = [ + ":v8_libbase", + "third_party/antlr4:antlr4", + "//build/config:exe_and_shlib_deps", + "//build/win:default_exe_manifest", + ] + + remove_configs = [ + "//build/config/compiler:no_rtti", + "//build/config/compiler:no_exceptions", + ] + + configs = [ + "//build/config/compiler:rtti", + "//build/config/compiler:exceptions", + "third_party/antlr4:antlr-compatibility", + ":external_config", + ":internal_config_base", + ] + } +} + ############################################################################### # Public targets # @@ -2942,7 +3163,6 @@ group("gn_all") { ":d8", ":v8_fuzzers", ":v8_hello_world", - ":v8_parser_shell", ":v8_sample_process", "test:gn_all", "tools:gn_all", @@ -3144,26 +3364,6 @@ v8_executable("v8_sample_process") { ] } -v8_executable("v8_parser_shell") { - sources = [ - "tools/parser-shell.cc", - "tools/shell-utils.h", - ] - - configs = [ - ":external_config", - ":internal_config_base", - ] - - deps = [ - ":v8", - ":v8_libbase", - ":v8_libplatform", - "//build/config:exe_and_shlib_deps", - "//build/win:default_exe_manifest", - ] -} - if (want_v8_shell) { v8_executable("v8_shell") { sources = [ @@ -3302,6 +3502,10 @@ v8_source_set("wasm_module_runner") { "test/common/wasm/wasm-module-runner.h", ] + deps = [ + ":torque_generated_core", + ] + configs = [ ":external_config", ":internal_config_base", @@ -3375,6 +3579,10 @@ v8_source_set("lib_wasm_fuzzer_common") { "test/fuzzer/wasm-fuzzer-common.h", ] + deps = [ + ":torque_generated_core", + ] + configs = [ ":external_config", ":internal_config_base", diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index e6b825092c4a1d..437c09b4d0fb04 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,1388 @@ +2018-05-22: Version 6.8.275 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.274 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.273 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.272 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.271 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.270 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.269 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.268 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.267 + + Performance and stability improvements on all platforms. + + +2018-05-22: Version 6.8.266 + + Performance and stability improvements on all platforms. + + +2018-05-21: Version 6.8.265 + + Performance and stability improvements on all platforms. + + +2018-05-21: Version 6.8.264 + + Performance and stability improvements on all platforms. + + +2018-05-21: Version 6.8.263 + + Performance and stability improvements on all platforms. + + +2018-05-21: Version 6.8.262 + + Performance and stability improvements on all platforms. + + +2018-05-19: Version 6.8.261 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.260 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.259 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.258 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.257 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.256 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.255 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.254 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.253 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.252 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.251 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.250 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.249 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.248 + + Performance and stability improvements on all platforms. + + +2018-05-18: Version 6.8.247 + + Performance and stability improvements on all platforms. + + +2018-05-17: Version 6.8.246 + + Performance and stability improvements on all platforms. + + +2018-05-17: Version 6.8.245 + + Performance and stability improvements on all platforms. + + +2018-05-17: Version 6.8.244 + + Performance and stability improvements on all platforms. + + +2018-05-17: Version 6.8.243 + + Performance and stability improvements on all platforms. + + +2018-05-17: Version 6.8.242 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.241 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.240 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.239 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.238 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.237 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.236 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.235 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.234 + + Performance and stability improvements on all platforms. + + +2018-05-16: Version 6.8.233 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.232 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.231 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.230 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.229 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.228 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.227 + + Performance and stability improvements on all platforms. + + +2018-05-15: Version 6.8.226 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.225 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.224 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.223 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.222 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.221 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.220 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.219 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.218 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.217 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.216 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.215 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.214 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.213 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.212 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.211 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.210 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.209 + + Performance and stability improvements on all platforms. + + +2018-05-14: Version 6.8.208 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.207 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.206 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.205 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.204 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.203 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.202 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.201 + + Performance and stability improvements on all platforms. + + +2018-05-11: Version 6.8.200 + + Performance and stability improvements on all platforms. + + +2018-05-10: Version 6.8.199 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.198 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.197 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.196 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.195 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.194 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.193 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.192 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.191 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.190 + + Performance and stability improvements on all platforms. + + +2018-05-08: Version 6.8.189 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.188 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.187 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.186 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.185 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.184 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.183 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.182 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.181 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.180 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.179 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.178 + + Performance and stability improvements on all platforms. + + +2018-05-07: Version 6.8.177 + + Performance and stability improvements on all platforms. + + +2018-05-05: Version 6.8.176 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.175 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.174 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.173 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.172 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.171 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.170 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.169 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.168 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.167 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.166 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.165 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.164 + + Performance and stability improvements on all platforms. + + +2018-05-04: Version 6.8.163 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.162 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.161 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.160 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.159 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.158 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.157 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.156 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.155 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.154 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.153 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.152 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.151 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.150 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.149 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.148 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.147 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.146 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.145 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.144 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.143 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.142 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.141 + + Performance and stability improvements on all platforms. + + +2018-05-03: Version 6.8.140 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.139 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.138 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.137 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.136 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.135 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.134 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.133 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.132 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.131 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.130 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.129 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.128 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.127 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.126 + + Performance and stability improvements on all platforms. + + +2018-05-02: Version 6.8.125 + + Performance and stability improvements on all platforms. + + +2018-05-01: Version 6.8.124 + + Performance and stability improvements on all platforms. + + +2018-05-01: Version 6.8.123 + + Performance and stability improvements on all platforms. + + +2018-05-01: Version 6.8.122 + + Performance and stability improvements on all platforms. + + +2018-05-01: Version 6.8.121 + + Performance and stability improvements on all platforms. + + +2018-05-01: Version 6.8.120 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.119 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.118 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.117 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.116 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.115 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.114 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.113 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.112 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.111 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.110 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.109 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.108 + + Performance and stability improvements on all platforms. + + +2018-04-30: Version 6.8.107 + + Performance and stability improvements on all platforms. + + +2018-04-29: Version 6.8.106 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.105 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.104 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.103 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.102 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.101 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.100 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.99 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.98 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.97 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.96 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.95 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.94 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.93 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.92 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.91 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.90 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.89 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.88 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.87 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.86 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.85 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.84 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.83 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.82 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.81 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.80 + + Performance and stability improvements on all platforms. + + +2018-04-27: Version 6.8.79 + + Performance and stability improvements on all platforms. + + +2018-04-26: Version 6.8.78 + + Performance and stability improvements on all platforms. + + +2018-04-26: Version 6.8.77 + + Performance and stability improvements on all platforms. + + +2018-04-26: Version 6.8.76 + + Performance and stability improvements on all platforms. + + +2018-04-26: Version 6.8.75 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.74 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.73 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.72 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.71 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.70 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.69 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.68 + + Performance and stability improvements on all platforms. + + +2018-04-25: Version 6.8.67 + + Performance and stability improvements on all platforms. + + +2018-04-24: Version 6.8.66 + + Performance and stability improvements on all platforms. + + +2018-04-24: Version 6.8.65 + + Performance and stability improvements on all platforms. + + +2018-04-24: Version 6.8.64 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.63 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.62 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.61 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.60 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.59 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.58 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.57 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.56 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.55 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.54 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.53 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.52 + + Performance and stability improvements on all platforms. + + +2018-04-23: Version 6.8.51 + + Performance and stability improvements on all platforms. + + +2018-04-20: Version 6.8.50 + + Performance and stability improvements on all platforms. + + +2018-04-20: Version 6.8.49 + + Performance and stability improvements on all platforms. + + +2018-04-20: Version 6.8.48 + + Performance and stability improvements on all platforms. + + +2018-04-20: Version 6.8.47 + + Performance and stability improvements on all platforms. + + +2018-04-20: Version 6.8.46 + + Performance and stability improvements on all platforms. + + +2018-04-19: Version 6.8.45 + + Performance and stability improvements on all platforms. + + +2018-04-19: Version 6.8.44 + + Performance and stability improvements on all platforms. + + +2018-04-19: Version 6.8.43 + + Performance and stability improvements on all platforms. + + +2018-04-19: Version 6.8.42 + + Performance and stability improvements on all platforms. + + +2018-04-19: Version 6.8.41 + + Performance and stability improvements on all platforms. + + +2018-04-18: Version 6.8.40 + + Performance and stability improvements on all platforms. + + +2018-04-18: Version 6.8.39 + + Performance and stability improvements on all platforms. + + +2018-04-18: Version 6.8.38 + + Performance and stability improvements on all platforms. + + +2018-04-18: Version 6.8.37 + + Performance and stability improvements on all platforms. + + +2018-04-18: Version 6.8.36 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.35 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.34 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.33 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.32 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.31 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.30 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.29 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.28 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.27 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.26 + + Performance and stability improvements on all platforms. + + +2018-04-17: Version 6.8.25 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.24 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.23 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.22 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.21 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.20 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.19 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.18 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.17 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.16 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.15 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.14 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.13 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.12 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.11 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.10 + + Performance and stability improvements on all platforms. + + +2018-04-16: Version 6.8.9 + + Performance and stability improvements on all platforms. + + +2018-04-15: Version 6.8.8 + + Performance and stability improvements on all platforms. + + +2018-04-15: Version 6.8.7 + + Performance and stability improvements on all platforms. + + +2018-04-14: Version 6.8.6 + + Performance and stability improvements on all platforms. + + +2018-04-14: Version 6.8.5 + + Performance and stability improvements on all platforms. + + +2018-04-13: Version 6.8.4 + + Performance and stability improvements on all platforms. + + +2018-04-13: Version 6.8.3 + + Performance and stability improvements on all platforms. + + +2018-04-12: Version 6.8.2 + + Performance and stability improvements on all platforms. + + +2018-04-12: Version 6.8.1 + + Performance and stability improvements on all platforms. + + +2018-04-11: Version 6.7.290 + + Performance and stability improvements on all platforms. + + +2018-04-11: Version 6.7.289 + + Performance and stability improvements on all platforms. + + 2018-04-11: Version 6.7.288 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 7c4fe68361593d..4bac02255de459 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -12,21 +12,21 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '73e352e758d90603e23bdc84734c12aa5817ab5f', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b5df2518f091eea3d358f30757dec3e33db56156', 'v8/tools/gyp': Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '024a3317597b06418efea2d45aa54dd2a7030c8a', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '083eb25f9acbe034db94a1bd5c1659125b6ebf98', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'd888fd2a1be890f4d35e43f68d6d79f42519a357', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f61e46dbee9d539a32551493e3bcc1dea92f83ec', 'v8/third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '323cf32193caecbf074d1a0cb5b02b905f163e0f', 'v8/buildtools': - Var('chromium_url') + '/chromium/buildtools.git' + '@' + 'e8aa02ea839e087f2db66100d02c3b5d47993852', + Var('chromium_url') + '/chromium/buildtools.git' + '@' + '94288c26d2ffe3aec9848c147839afee597acefd', 'v8/base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '211b3ed9d0481b4caddbee1322321b86a483ca1f', 'v8/third_party/android_ndk': { - 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '635bc380968a76f6948fee65f80a0b28db53ae81', + 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '5cd86312e794bdf542a3685c6f10cbb96072990b', 'condition': 'checkout_android', }, 'v8/third_party/android_tools': { @@ -34,21 +34,25 @@ deps = { 'condition': 'checkout_android', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '2c59f678c7ede8a844fb687525d594b71aabe3dd', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '49edbd3a2b582cbab0a912cb1989062e9b8453ff', 'condition': 'checkout_android', }, 'v8/third_party/colorama/src': { 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', 'condition': 'checkout_android', }, + 'v8/third_party/fuchsia-sdk': { + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'afac8ecd6300c9903009e6f233f61aae401aced6', + 'condition': 'checkout_fuchsia', + }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '7e5f90d3780d553cb86771141fb81349f3a63508', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '08d5b1f33af8c18785fb8ca02792b5fac81e248f', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '45571de473282bd1d8b63a8dfcb1fd268d0635d2', 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '88229872dd17e71658fe96763feaa77915d8cbd6', + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '833f5ebf894be1e3e6d13678d5de8479bf12ff28', 'v8/test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', 'v8/test/mozilla/data': @@ -58,11 +62,11 @@ deps = { 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd', 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'd7c36b0ae001a5cc31f2da79a430154916a3def2', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c893c7eec4706f8c7fc244ee254b1dadd8f8d158', 'v8/tools/luci-go': Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'ff0709d4283b1f233dcf0c9fec1672c6ecaed2f1', 'v8/test/wasm-js': - Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '586d34770c6445bfb93c9bae8ac50baade7ee168', + Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '27d63f22e72395248d314520b3ad5b1e0943fc10', } recursedeps = [ @@ -84,6 +88,17 @@ skip_child_includes = [ ] hooks = [ + { + # Ensure that the DEPS'd "depot_tools" has its self-update capability + # disabled. + 'name': 'disable_depot_tools_selfupdate', + 'pattern': '.', + 'action': [ + 'python', + 'v8/third_party/depot_tools/update_depot_tools_toggle.py', + '--disable', + ], + }, { # This clobbers when necessary (based on get_landmines.py). It must be the # first hook so that other things that get/generate into the output diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index e6dbb7939568fd..055027c7bb6050 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -289,6 +289,13 @@ def FilterFile(affected_file): def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" results = [] + # TODO(machenbach): Replace some of those checks, e.g. owners and copyright, + # with the canned PanProjectChecks. Need to make sure that the checks all + # pass on all existing files. + results.extend(input_api.canned_checks.CheckOwnersFormat( + input_api, output_api)) + results.extend(input_api.canned_checks.CheckOwners( + input_api, output_api)) results.extend(_CheckCommitMessageBugEntry(input_api, output_api)) results.extend(input_api.canned_checks.CheckPatchFormatted( input_api, output_api)) diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 211f15aab944d4..1cbba9fefdd795 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -96,7 +96,7 @@ if (is_debug && !v8_optimized_debug) { # TODO(crbug.com/621335) Rework this so that we don't have the confusion # between "optimize_speed" and "optimize_max". - if (is_posix && !is_android && !using_sanitizer) { + if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) { v8_add_configs += [ "//build/config/compiler:optimize_speed" ] } else { v8_add_configs += [ "//build/config/compiler:optimize_max" ] @@ -110,7 +110,7 @@ if (v8_code_coverage && !is_clang) { ] } -if (is_posix && (v8_enable_backtrace || v8_monolithic)) { +if ((is_posix || is_fuchsia) && (v8_enable_backtrace || v8_monolithic)) { v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ] v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ] } diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index 2c830bf834b786..a381b97f889749 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -62,7 +62,7 @@ V8_PLATFORM_EXPORT bool PumpMessageLoop( v8::Platform* platform, v8::Isolate* isolate, MessageLoopBehavior behavior = MessageLoopBehavior::kDoNotWait); -V8_PLATFORM_EXPORT V8_DEPRECATE_SOON( +V8_PLATFORM_EXPORT V8_DEPRECATED( "This function has become obsolete and is essentially a nop", void EnsureEventLoopInitialized(v8::Platform* platform, v8::Isolate* isolate)); diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 6de8234fb83bcf..d879a94373e427 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -99,6 +99,7 @@ class V8_EXPORT V8ContextInfo { class V8_EXPORT V8StackTrace { public: + virtual StringView firstNonEmptySourceURL() const = 0; virtual bool isEmpty() const = 0; virtual StringView topSourceURL() const = 0; virtual int topLineNumber() const = 0; diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index ddc200abab0dd9..cfeb13b65829f9 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -207,6 +207,7 @@ class PageAllocator { */ enum Permission { kNoAccess, + kRead, kReadWrite, // TODO(hpayer): Remove this flag. Memory should never be rwx. kReadWriteExecute, @@ -245,16 +246,6 @@ class PageAllocator { */ class Platform { public: - /** - * This enum is used to indicate whether a task is potentially long running, - * or causes a long wait. The embedder might want to use this hint to decide - * whether to execute the task on a dedicated thread. - */ - enum ExpectedRuntime { - kShortRunningTask, - kLongRunningTask - }; - virtual ~Platform() = default; /** @@ -289,101 +280,25 @@ class Platform { virtual bool OnCriticalMemoryPressure(size_t length) { return false; } /** - * Gets the number of worker threads used by GetWorkerThreadsTaskRunner() and - * CallOnWorkerThread(). This can be used to estimate the number of tasks a - * work package should be split into. A return value of 0 means that there are - * no worker threads available. Note that a value of 0 won't prohibit V8 from - * posting tasks using |CallOnWorkerThread|. - */ - virtual int NumberOfWorkerThreads() { - return static_cast(NumberOfAvailableBackgroundThreads()); - } - - /** - * Deprecated. Use NumberOfWorkerThreads() instead. - * TODO(gab): Remove this when all embedders override - * NumberOfWorkerThreads() instead. + * Gets the number of worker threads used by + * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number + * of tasks a work package should be split into. A return value of 0 means + * that there are no worker threads available. Note that a value of 0 won't + * prohibit V8 from posting tasks using |CallOnWorkerThread|. */ - V8_DEPRECATE_SOON( - "NumberOfAvailableBackgroundThreads() is deprecated, use " - "NumberOfAvailableBackgroundThreads() instead.", - virtual size_t NumberOfAvailableBackgroundThreads()) { - return 0; - } + virtual int NumberOfWorkerThreads() = 0; /** * Returns a TaskRunner which can be used to post a task on the foreground. * This function should only be called from a foreground thread. */ virtual std::shared_ptr GetForegroundTaskRunner( - Isolate* isolate) { - // TODO(ahaas): Make this function abstract after it got implemented on all - // platforms. - return {}; - } - - /** - * Returns a TaskRunner which can be used to post a task on a background. - * This function should only be called from a foreground thread. - */ - V8_DEPRECATE_SOON( - "GetBackgroundTaskRunner() is deprecated, use " - "GetWorkerThreadsTaskRunner() " - "instead.", - virtual std::shared_ptr GetBackgroundTaskRunner( - Isolate* isolate)) { - // TODO(gab): Remove this method when all embedders have moved to - // GetWorkerThreadsTaskRunner(). - - // An implementation needs to be provided here because this is called by the - // default GetWorkerThreadsTaskRunner() implementation below. In practice - // however, all code either: - // - Overrides GetWorkerThreadsTaskRunner() (thus not making this call) -- - // i.e. all v8 code. - // - Overrides this method (thus not making this call) -- i.e. all - // unadapted embedders. - abort(); - } - - /** - * Returns a TaskRunner which can be used to post async tasks on a worker. - * This function should only be called from a foreground thread. - */ - virtual std::shared_ptr GetWorkerThreadsTaskRunner( - Isolate* isolate) { - // TODO(gab): Make this function abstract after it got implemented on all - // platforms. - return GetBackgroundTaskRunner(isolate); - } - - /** - * Schedules a task to be invoked on a background thread. |expected_runtime| - * indicates that the task will run a long time. The Platform implementation - * takes ownership of |task|. There is no guarantee about order of execution - * of tasks wrt order of scheduling, nor is there a guarantee about the - * thread the task will be run on. - */ - V8_DEPRECATE_SOON( - "ExpectedRuntime is deprecated, use CallOnWorkerThread() instead.", - virtual void CallOnBackgroundThread(Task* task, - ExpectedRuntime expected_runtime)) { - // An implementation needs to be provided here because this is called by the - // default implementation below. In practice however, all code either: - // - Overrides the new method (thus not making this call) -- i.e. all v8 - // code. - // - Overrides this method (thus not making this call) -- i.e. all - // unadapted embedders. - abort(); - } + Isolate* isolate) = 0; /** * Schedules a task to be invoked on a worker thread. - * TODO(gab): Make pure virtual when all embedders override this instead of - * CallOnBackgroundThread(). */ - virtual void CallOnWorkerThread(std::unique_ptr task) { - CallOnBackgroundThread(task.release(), kShortRunningTask); - } + virtual void CallOnWorkerThread(std::unique_ptr task) = 0; /** * Schedules a task that blocks the main thread to be invoked with @@ -395,6 +310,13 @@ class Platform { CallOnWorkerThread(std::move(task)); } + /** + * Schedules a task to be invoked on a worker thread after |delay_in_seconds| + * expires. + */ + virtual void CallDelayedOnWorkerThread(std::unique_ptr task, + double delay_in_seconds) = 0; + /** * Schedules a task to be invoked on a foreground thread wrt a specific * |isolate|. Tasks posted for the same isolate should be execute in order of @@ -420,14 +342,14 @@ class Platform { * The definition of "foreground" is opaque to V8. */ virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) { - // TODO(ulan): Make this function abstract after V8 roll in Chromium. + // This must be overriden if |IdleTasksEnabled()|. + abort(); } /** * Returns true if idle tasks are enabled for the given |isolate|. */ virtual bool IdleTasksEnabled(Isolate* isolate) { - // TODO(ulan): Make this function abstract after V8 roll in Chromium. return false; } diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index c61027b3b94e45..34ad2b9cea5445 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -54,7 +54,11 @@ namespace v8 { */ class V8_EXPORT TracingCpuProfiler { public: - static std::unique_ptr Create(Isolate*); + V8_DEPRECATE_SOON( + "The profiler is created automatically with the isolate.\n" + "No need to create it explicitly.", + static std::unique_ptr Create(Isolate*)); + virtual ~TracingCpuProfiler() = default; protected: @@ -636,7 +640,7 @@ class V8_EXPORT AllocationProfile { * Usage: * 1) Define derived class of EmbedderGraph::Node for embedder objects. * 2) Set the build embedder graph callback on the heap profiler using - * HeapProfiler::AddBuildEmbedderGraphCallback. + * HeapProfiler::SetBuildEmbedderGraphCallback. * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from * node1 to node2. * 4) To represent references from/to V8 object, construct V8 nodes using @@ -736,12 +740,7 @@ class V8_EXPORT HeapProfiler { * The callback must not trigger garbage collection in V8. */ typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate, - v8::EmbedderGraph* graph, - void* data); - - /** TODO(addaleax): Remove */ - typedef void (*LegacyBuildEmbedderGraphCallback)(v8::Isolate* isolate, - v8::EmbedderGraph* graph); + v8::EmbedderGraph* graph); /** Returns the number of snapshots taken. */ int GetSnapshotCount(); @@ -883,22 +882,15 @@ class V8_EXPORT HeapProfiler { /** Binds a callback to embedder's class ID. */ V8_DEPRECATED( - "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes", + "Use SetBuildEmbedderGraphCallback to provide info about embedder nodes", void SetWrapperClassInfoProvider(uint16_t class_id, WrapperInfoCallback callback)); V8_DEPRECATED( - "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes", + "Use SetBuildEmbedderGraphCallback to provide info about embedder nodes", void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback)); - V8_DEPRECATE_SOON( - "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes", - void SetBuildEmbedderGraphCallback( - LegacyBuildEmbedderGraphCallback callback)); - void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback, - void* data); - void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback, - void* data); + void SetBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback); /** * Default value of persistent handle class ID. Must not be used to @@ -1000,76 +992,6 @@ struct HeapStatsUpdate { uint32_t size; // New value of size field for the interval with this index. }; -#define CODE_EVENTS_LIST(V) \ - V(Builtin) \ - V(Callback) \ - V(Eval) \ - V(Function) \ - V(InterpretedFunction) \ - V(Handler) \ - V(BytecodeHandler) \ - V(LazyCompile) \ - V(RegExp) \ - V(Script) \ - V(Stub) - -/** - * Note that this enum may be extended in the future. Please include a default - * case if this enum is used in a switch statement. - */ -enum CodeEventType { - kUnknownType = 0 -#define V(Name) , k##Name##Type - CODE_EVENTS_LIST(V) -#undef V -}; - -/** - * Representation of a code creation event - */ -class V8_EXPORT CodeEvent { - public: - uintptr_t GetCodeStartAddress(); - size_t GetCodeSize(); - Local GetFunctionName(); - Local GetScriptName(); - int GetScriptLine(); - int GetScriptColumn(); - /** - * NOTE (mmarchini): We can't allocate objects in the heap when we collect - * existing code, and both the code type and the comment are not stored in the - * heap, so we return those as const char*. - */ - CodeEventType GetCodeType(); - const char* GetComment(); - - static const char* GetCodeEventTypeName(CodeEventType code_event_type); -}; - -/** - * Interface to listen to code creation events. - */ -class V8_EXPORT CodeEventHandler { - public: - /** - * Creates a new listener for the |isolate|. The isolate must be initialized. - * The listener object must be disposed after use by calling |Dispose| method. - * Multiple listeners can be created for the same isolate. - */ - explicit CodeEventHandler(Isolate* isolate); - virtual ~CodeEventHandler(); - - virtual void Handle(CodeEvent* code_event) = 0; - - void Enable(); - void Disable(); - - private: - CodeEventHandler(); - CodeEventHandler(const CodeEventHandler&); - CodeEventHandler& operator=(const CodeEventHandler&); - void* internal_listener_; -}; } // namespace v8 diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 09c47d7e91196d..e57efc3084acba 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 6 -#define V8_MINOR_VERSION 7 -#define V8_BUILD_NUMBER 288 -#define V8_PATCH_LEVEL 49 +#define V8_MINOR_VERSION 8 +#define V8_BUILD_NUMBER 275 +#define V8_PATCH_LEVEL 24 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 389a7c01b0583a..b68d9fbbfc3c86 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -156,6 +156,95 @@ class GlobalHandles; namespace wasm { class StreamingDecoder; } // namespace wasm + +/** + * Configuration of tagging scheme. + */ +const int kApiPointerSize = sizeof(void*); // NOLINT +const int kApiDoubleSize = sizeof(double); // NOLINT +const int kApiIntSize = sizeof(int); // NOLINT +const int kApiInt64Size = sizeof(int64_t); // NOLINT + +// Tag information for HeapObject. +const int kHeapObjectTag = 1; +const int kWeakHeapObjectTag = 3; +const int kHeapObjectTagSize = 2; +const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; + +// Tag information for Smi. +const int kSmiTag = 0; +const int kSmiTagSize = 1; +const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; + +template +struct SmiTagging; + +template +V8_INLINE internal::Object* IntToSmi(int value) { + int smi_shift_bits = kSmiTagSize + kSmiShiftSize; + uintptr_t tagged_value = + (static_cast(value) << smi_shift_bits) | kSmiTag; + return reinterpret_cast(tagged_value); +} + +// Smi constants for 32-bit systems. +template <> +struct SmiTagging<4> { + enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; + static int SmiShiftSize() { return kSmiShiftSize; } + static int SmiValueSize() { return kSmiValueSize; } + V8_INLINE static int SmiToInt(const internal::Object* value) { + int shift_bits = kSmiTagSize + kSmiShiftSize; + // Throw away top 32 bits and shift down (requires >> to be sign extending). + return static_cast(reinterpret_cast(value)) >> shift_bits; + } + V8_INLINE static internal::Object* IntToSmi(int value) { + return internal::IntToSmi(value); + } + V8_INLINE static bool IsValidSmi(intptr_t value) { + // To be representable as an tagged small integer, the two + // most-significant bits of 'value' must be either 00 or 11 due to + // sign-extension. To check this we add 01 to the two + // most-significant bits, and check if the most-significant bit is 0 + // + // CAUTION: The original code below: + // bool result = ((value + 0x40000000) & 0x80000000) == 0; + // may lead to incorrect results according to the C language spec, and + // in fact doesn't work correctly with gcc4.1.1 in some cases: The + // compiler may produce undefined results in case of signed integer + // overflow. The computation must be done w/ unsigned ints. + return static_cast(value) + 0x40000000U < 0x80000000U; + } +}; + +// Smi constants for 64-bit systems. +template <> +struct SmiTagging<8> { + enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; + static int SmiShiftSize() { return kSmiShiftSize; } + static int SmiValueSize() { return kSmiValueSize; } + V8_INLINE static int SmiToInt(const internal::Object* value) { + int shift_bits = kSmiTagSize + kSmiShiftSize; + // Shift down and throw away top 32 bits. + return static_cast(reinterpret_cast(value) >> shift_bits); + } + V8_INLINE static internal::Object* IntToSmi(int value) { + return internal::IntToSmi(value); + } + V8_INLINE static bool IsValidSmi(intptr_t value) { + // To be representable as a long smi, the value must be a 32-bit integer. + return (value == static_cast(value)); + } +}; + +typedef SmiTagging PlatformSmiTagging; +const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; +const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; +const int kSmiMinValue = (static_cast(-1)) << (kSmiValueSize - 1); +const int kSmiMaxValue = -(kSmiMinValue + 1); +constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } +constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } + } // namespace internal namespace debug { @@ -1123,6 +1212,13 @@ class V8_EXPORT UnboundScript { static const int kNoScriptId = 0; }; +/** + * A compiled JavaScript module, not yet tied to a Context. + */ +class V8_EXPORT UnboundModuleScript { + // Only used as a container for code caching. +}; + /** * A location in JavaScript source. */ @@ -1222,6 +1318,14 @@ class V8_EXPORT Module { * The module's status must be at least kInstantiated. */ Local GetModuleNamespace(); + + /** + * Returns the corresponding context-unbound module script. + * + * The module must be unevaluated, i.e. its status must not be kEvaluating, + * kEvaluated or kErrored. + */ + Local GetUnboundModuleScript(); }; /** @@ -1580,9 +1684,17 @@ class V8_EXPORT ScriptCompiler { */ static CachedData* CreateCodeCache(Local unbound_script); - // Deprecated. - static CachedData* CreateCodeCache(Local unbound_script, - Local source); + /** + * Creates and returns code cache for the specified unbound_module_script. + * This will return nullptr if the script cannot be serialized. The + * CachedData returned by this function should be owned by the caller. + */ + static CachedData* CreateCodeCache( + Local unbound_module_script); + + V8_DEPRECATED("Source string is no longer required", + static CachedData* CreateCodeCache( + Local unbound_script, Local source)); /** * Creates and returns code cache for the specified function that was @@ -1592,9 +1704,9 @@ class V8_EXPORT ScriptCompiler { */ static CachedData* CreateCodeCacheForFunction(Local function); - // Deprecated. - static CachedData* CreateCodeCacheForFunction(Local function, - Local source); + V8_DEPRECATED("Source string is no longer required", + static CachedData* CreateCodeCacheForFunction( + Local function, Local source)); private: static V8_WARN_UNUSED_RESULT MaybeLocal CompileUnboundInternal( @@ -1913,12 +2025,16 @@ class V8_EXPORT ValueSerializer { * If the memory cannot be allocated, nullptr should be returned. * |actual_size| will be ignored. It is assumed that |old_buffer| is still * valid in this case and has not been modified. + * + * The default implementation uses the stdlib's `realloc()` function. */ virtual void* ReallocateBufferMemory(void* old_buffer, size_t size, size_t* actual_size); /** * Frees a buffer allocated with |ReallocateBufferMemory|. + * + * The default implementation uses the stdlib's `free()` function. */ virtual void FreeBufferMemory(void* buffer); }; @@ -1946,9 +2062,9 @@ class V8_EXPORT ValueSerializer { /** * Returns the stored data (allocated using the delegate's - * AllocateBufferMemory) and its size. This serializer should not be used once - * the buffer is released. The contents are undefined if a previous write has - * failed. + * ReallocateBufferMemory) and its size. This serializer should not be used + * once the buffer is released. The contents are undefined if a previous write + * has failed. Ownership of the buffer is transferred to the caller. */ V8_WARN_UNUSED_RESULT std::pair Release(); @@ -2538,8 +2654,9 @@ enum class NewStringType { */ class V8_EXPORT String : public Name { public: - static constexpr int kMaxLength = - sizeof(void*) == 4 ? (1 << 28) - 16 : (1 << 30) - 1 - 24; + static constexpr int kMaxLength = internal::kApiPointerSize == 4 + ? (1 << 28) - 16 + : internal::kSmiMaxValue / 2 - 24; enum Encoding { UNKNOWN_ENCODING = 0x1, @@ -3048,48 +3165,6 @@ class V8_EXPORT Uint32 : public Integer { class V8_EXPORT BigInt : public Primitive { public: static Local New(Isolate* isolate, int64_t value); - static Local NewFromUnsigned(Isolate* isolate, uint64_t value); - /** - * Creates a new BigInt object using a specified sign bit and a - * specified list of digits/words. - * The resulting number is calculated as: - * - * (-1)^sign_bit * (words[0] * (2^64)^0 + words[1] * (2^64)^1 + ...) - */ - static MaybeLocal NewFromWords(Local context, int sign_bit, - int word_count, const uint64_t* words); - - /** - * Returns the value of this BigInt as an unsigned 64-bit integer. - * If `lossless` is provided, it will reflect whether the return value was - * truncated or wrapped around. In particular, it is set to `false` if this - * BigInt is negative. - */ - uint64_t Uint64Value(bool* lossless = nullptr) const; - - /** - * Returns the value of this BigInt as a signed 64-bit integer. - * If `lossless` is provided, it will reflect whether this BigInt was - * truncated or not. - */ - int64_t Int64Value(bool* lossless = nullptr) const; - - /** - * Returns the number of 64-bit words needed to store the result of - * ToWordsArray(). - */ - int WordCount() const; - - /** - * Writes the contents of this BigInt to a specified memory location. - * `sign_bit` must be provided and will be set to 1 if this BigInt is - * negative. - * `*word_count` has to be initialized to the length of the `words` array. - * Upon return, it will be set to the actual number of words that would - * be needed to store this BigInt (i.e. the return value of `WordCount()`). - */ - void ToWordsArray(int* sign_bit, int* word_count, uint64_t* words) const; - V8_INLINE static BigInt* Cast(v8::Value* obj); private: @@ -3958,6 +4033,15 @@ class V8_EXPORT Function : public Object { return NewInstance(context, 0, nullptr); } + /** + * When side effect checks are enabled, passing kHasNoSideEffect allows the + * constructor to be invoked without throwing. Calls made within the + * constructor are still checked. + */ + V8_WARN_UNUSED_RESULT MaybeLocal NewInstanceWithSideEffectType( + Local context, int argc, Local argv[], + SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const; + V8_DEPRECATE_SOON("Use maybe version", Local Call(Local recv, int argc, Local argv[])); @@ -4308,8 +4392,6 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final { ~WasmModuleObjectBuilderStreaming(); private: - typedef std::pair, size_t> Buffer; - WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) = delete; WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) = @@ -4332,8 +4414,6 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final { #else Persistent promise_; #endif - std::vector received_buffers_; - size_t total_size_ = 0; std::shared_ptr streaming_decoder_; }; @@ -4584,8 +4664,7 @@ class V8_EXPORT TypedArray : public ArrayBufferView { /* * The largest typed array size that can be constructed using New. */ - static constexpr size_t kMaxLength = - sizeof(void*) == 4 ? (1u << 30) - 1 : (1u << 31) - 1; + static constexpr size_t kMaxLength = internal::kSmiMaxValue; /** * Number of elements in this typed array @@ -5170,22 +5249,25 @@ class V8_EXPORT Template : public Data { // TODO(dcarney): gcc can't handle Local below Local data = Local(), PropertyAttribute attribute = None, Local signature = Local(), - AccessControl settings = DEFAULT); + AccessControl settings = DEFAULT, + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); void SetNativeDataProperty( Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = 0, // TODO(dcarney): gcc can't handle Local below Local data = Local(), PropertyAttribute attribute = None, Local signature = Local(), - AccessControl settings = DEFAULT); + AccessControl settings = DEFAULT, + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); /** * Like SetNativeDataProperty, but V8 will replace the native data property * with a real data property on first access. */ - void SetLazyDataProperty(Local name, AccessorNameGetterCallback getter, - Local data = Local(), - PropertyAttribute attribute = None); + void SetLazyDataProperty( + Local name, AccessorNameGetterCallback getter, + Local data = Local(), PropertyAttribute attribute = None, + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); /** * During template instantiation, sets the value with the intrinsic property @@ -5909,12 +5991,14 @@ class V8_EXPORT ObjectTemplate : public Template { Local name, AccessorGetterCallback getter, AccessorSetterCallback setter = 0, Local data = Local(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None, - Local signature = Local()); + Local signature = Local(), + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); void SetAccessor( Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = 0, Local data = Local(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None, - Local signature = Local()); + Local signature = Local(), + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); /** * Sets a named property handler on the object template. @@ -6394,9 +6478,7 @@ typedef void (*PromiseHook)(PromiseHookType type, Local promise, // --- Promise Reject Callback --- enum PromiseRejectEvent { kPromiseRejectWithNoHandler = 0, - kPromiseHandlerAddedAfterReject = 1, - kPromiseRejectAfterResolved = 2, - kPromiseResolveAfterResolved = 3, + kPromiseHandlerAddedAfterReject = 1 }; class PromiseRejectMessage { @@ -6643,10 +6725,12 @@ class V8_EXPORT HeapCodeStatistics { HeapCodeStatistics(); size_t code_and_metadata_size() { return code_and_metadata_size_; } size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; } + size_t external_script_source_size() { return external_script_source_size_; } private: size_t code_and_metadata_size_; size_t bytecode_and_metadata_size_; + size_t external_script_source_size_; friend class Isolate; }; @@ -6946,7 +7030,8 @@ class V8_EXPORT Isolate { add_histogram_sample_callback(nullptr), array_buffer_allocator(nullptr), external_references(nullptr), - allow_atomics_wait(true) {} + allow_atomics_wait(true), + only_terminate_in_safe_scope(false) {} /** * The optional entry_hook allows the host application to provide the @@ -7009,6 +7094,11 @@ class V8_EXPORT Isolate { * this isolate. This can also be configured via SetAllowAtomicsWait. */ bool allow_atomics_wait; + + /** + * Termination is postponed when there is no active SafeForTerminationScope. + */ + bool only_terminate_in_safe_scope; }; @@ -7093,6 +7183,24 @@ class V8_EXPORT Isolate { internal::Isolate* const isolate_; }; + /** + * This scope allows terminations inside direct V8 API calls and forbid them + * inside any recursice API calls without explicit SafeForTerminationScope. + */ + class V8_EXPORT SafeForTerminationScope { + public: + explicit SafeForTerminationScope(v8::Isolate* isolate); + ~SafeForTerminationScope(); + + // Prevent copying of Scope objects. + SafeForTerminationScope(const SafeForTerminationScope&) = delete; + SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete; + + private: + internal::Isolate* isolate_; + bool prev_value_; + }; + /** * Types of garbage collections that can be requested via * RequestGarbageCollectionForTesting. @@ -7156,6 +7264,7 @@ class V8_EXPORT Isolate { kErrorStackTraceLimit = 45, kWebAssemblyInstantiation = 46, kDeoptimizerDisableSpeculation = 47, + kArrayPrototypeSortJSArrayModifiedPrototype = 48, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to @@ -7176,6 +7285,26 @@ class V8_EXPORT Isolate { typedef void (*UseCounterCallback)(Isolate* isolate, UseCounterFeature feature); + /** + * Allocates a new isolate but does not initialize it. Does not change the + * currently entered isolate. + * + * Only Isolate::GetData() and Isolate::SetData(), which access the + * embedder-controlled parts of the isolate, are allowed to be called on the + * uninitialized isolate. To initialize the isolate, call + * Isolate::Initialize(). + * + * When an isolate is no longer used its resources should be freed + * by calling Dispose(). Using the delete operator is not allowed. + * + * V8::Initialize() must have run prior to this. + */ + static Isolate* Allocate(); + + /** + * Initialize an Isolate previously allocated by Isolate::Allocate(). + */ + static void Initialize(Isolate* isolate, const CreateParams& params); /** * Creates a new isolate. Does not change the currently entered @@ -8024,7 +8153,9 @@ class V8_EXPORT V8 { * Returns { NULL, 0 } on failure. * The caller acquires ownership of the data array in the return value. */ - static StartupData CreateSnapshotDataBlob(const char* embedded_source = NULL); + V8_DEPRECATED("Use SnapshotCreator", + static StartupData CreateSnapshotDataBlob( + const char* embedded_source = NULL)); /** * Bootstrap an isolate and a context from the cold startup blob, run the @@ -8034,8 +8165,9 @@ class V8_EXPORT V8 { * The caller acquires ownership of the data array in the return value. * The argument startup blob is untouched. */ - static StartupData WarmUpSnapshotDataBlob(StartupData cold_startup_blob, - const char* warmup_source); + V8_DEPRECATED("Use SnapshotCreator", + static StartupData WarmUpSnapshotDataBlob( + StartupData cold_startup_blob, const char* warmup_source)); /** Set the callback to invoke in case of Dcheck failures. */ static void SetDcheckErrorHandler(DcheckErrorCallback that); @@ -8230,6 +8362,18 @@ class V8_EXPORT SnapshotCreator { public: enum class FunctionCodeHandling { kClear, kKeep }; + /** + * Initialize and enter an isolate, and set it up for serialization. + * The isolate is either created from scratch or from an existing snapshot. + * The caller keeps ownership of the argument snapshot. + * \param existing_blob existing snapshot from which to create this one. + * \param external_references a null-terminated array of external references + * that must be equivalent to CreateParams::external_references. + */ + SnapshotCreator(Isolate* isolate, + const intptr_t* external_references = nullptr, + StartupData* existing_blob = nullptr); + /** * Create and enter an isolate, and set it up for serialization. * The isolate is either created from scratch or from an existing snapshot. @@ -8990,85 +9134,6 @@ class V8_EXPORT Locker { namespace internal { -const int kApiPointerSize = sizeof(void*); // NOLINT -const int kApiIntSize = sizeof(int); // NOLINT -const int kApiInt64Size = sizeof(int64_t); // NOLINT - -// Tag information for HeapObject. -const int kHeapObjectTag = 1; -const int kWeakHeapObjectTag = 3; -const int kHeapObjectTagSize = 2; -const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; - -// Tag information for Smi. -const int kSmiTag = 0; -const int kSmiTagSize = 1; -const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; - -template struct SmiTagging; - -template -V8_INLINE internal::Object* IntToSmi(int value) { - int smi_shift_bits = kSmiTagSize + kSmiShiftSize; - uintptr_t tagged_value = - (static_cast(value) << smi_shift_bits) | kSmiTag; - return reinterpret_cast(tagged_value); -} - -// Smi constants for 32-bit systems. -template <> struct SmiTagging<4> { - enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; - static int SmiShiftSize() { return kSmiShiftSize; } - static int SmiValueSize() { return kSmiValueSize; } - V8_INLINE static int SmiToInt(const internal::Object* value) { - int shift_bits = kSmiTagSize + kSmiShiftSize; - // Throw away top 32 bits and shift down (requires >> to be sign extending). - return static_cast(reinterpret_cast(value)) >> shift_bits; - } - V8_INLINE static internal::Object* IntToSmi(int value) { - return internal::IntToSmi(value); - } - V8_INLINE static bool IsValidSmi(intptr_t value) { - // To be representable as an tagged small integer, the two - // most-significant bits of 'value' must be either 00 or 11 due to - // sign-extension. To check this we add 01 to the two - // most-significant bits, and check if the most-significant bit is 0 - // - // CAUTION: The original code below: - // bool result = ((value + 0x40000000) & 0x80000000) == 0; - // may lead to incorrect results according to the C language spec, and - // in fact doesn't work correctly with gcc4.1.1 in some cases: The - // compiler may produce undefined results in case of signed integer - // overflow. The computation must be done w/ unsigned ints. - return static_cast(value + 0x40000000U) < 0x80000000U; - } -}; - -// Smi constants for 64-bit systems. -template <> struct SmiTagging<8> { - enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; - static int SmiShiftSize() { return kSmiShiftSize; } - static int SmiValueSize() { return kSmiValueSize; } - V8_INLINE static int SmiToInt(const internal::Object* value) { - int shift_bits = kSmiTagSize + kSmiShiftSize; - // Shift down and throw away top 32 bits. - return static_cast(reinterpret_cast(value) >> shift_bits); - } - V8_INLINE static internal::Object* IntToSmi(int value) { - return internal::IntToSmi(value); - } - V8_INLINE static bool IsValidSmi(intptr_t value) { - // To be representable as a long smi, the value must be a 32-bit integer. - return (value == static_cast(value)); - } -}; - -typedef SmiTagging PlatformSmiTagging; -const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; -const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; -V8_INLINE static bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } -V8_INLINE static bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } - /** * This class exports constants and functionality from within v8 that * is necessary to implement inline functions in the v8 api. Don't @@ -9082,7 +9147,7 @@ class Internals { static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; static const int kStringResourceOffset = 3 * kApiPointerSize; - static const int kOddballKindOffset = 4 * kApiPointerSize + sizeof(double); + static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize; static const int kForeignAddressOffset = kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg index 6159b3f1cdeca4..49c13c000bcbf9 100644 --- a/deps/v8/infra/config/cq.cfg +++ b/deps/v8/infra/config/cq.cfg @@ -30,6 +30,11 @@ verifiers { name: "v8_linux64_asan_rel_ng_triggered" triggered_by: "v8_linux64_asan_rel_ng" } + builders { name: "v8_linux64_dbg_ng" } + builders { + name: "v8_linux64_dbg_ng_triggered" + triggered_by: "v8_linux64_dbg_ng" + } builders { name: "v8_linux64_gcc_compile_dbg" } builders { name: "v8_linux64_jumbo_compile_rel" } builders { name: "v8_linux64_rel_ng" } @@ -61,11 +66,6 @@ verifiers { experiment_percentage: 100 } builders { name: "v8_linux_chromium_gn_rel" } - builders { name: "v8_linux_dbg_ng" } - builders { - name: "v8_linux_dbg_ng_triggered" - triggered_by: "v8_linux_dbg_ng" - } builders { name: "v8_linux_gcc_compile_rel" } builders { name: "v8_linux_mips64el_compile_rel" } builders { name: "v8_linux_mipsel_compile_rel" } diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 70a7ad5754e0cb..23b00624557606 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -43,12 +43,6 @@ 'x64.optdebug': 'default_optdebug_x64', 'x64.release': 'default_release_x64', }, - - 'client.dart.fyi': { - 'v8-linux-release': 'release_x86_disassembler', - 'v8-win-release': 'release_x86_disassembler', - 'v8-mac-release': 'release_x86_disassembler', - }, 'client.dynamorio': { 'linux-v8-dr': 'release_x64', }, @@ -110,37 +104,39 @@ 'V8 Random Deopt Fuzzer - debug': 'debug_x64', }, 'client.v8.clusterfuzz': { - 'V8 Win64 ASAN - release builder': + 'V8 Clusterfuzz Win64 ASAN - release builder': 'release_x64_asan_no_lsan_verify_heap', # Note this is called a debug builder, but it uses a release build # configuration with dchecks (which enables DEBUG in V8), since win-asan # debug is not supported. - 'V8 Win64 ASAN - debug builder': + 'V8 Clusterfuzz Win64 ASAN - debug builder': 'release_x64_asan_no_lsan_verify_heap_dchecks', - 'V8 Mac64 ASAN - release builder': + 'V8 Clusterfuzz Mac64 ASAN - release builder': 'release_x64_asan_no_lsan_edge_verify_heap', - 'V8 Mac64 ASAN - debug builder': + 'V8 Clusterfuzz Mac64 ASAN - debug builder': 'debug_x64_asan_no_lsan_static_edge', - 'V8 Linux64 - release builder': 'release_x64_correctness_fuzzer', - 'V8 Linux64 - debug builder': 'debug_x64', - 'V8 Linux64 - nosnap release builder': 'release_x64_no_snap', - 'V8 Linux64 - nosnap debug builder': 'debug_x64_no_snap', - 'V8 Linux64 ASAN no inline - release builder': + 'V8 Clusterfuzz Linux64 - release builder': + 'release_x64_correctness_fuzzer', + 'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64', + 'V8 Clusterfuzz Linux64 - nosnap release builder': 'release_x64_no_snap', + 'V8 Clusterfuzz Linux64 - nosnap debug builder': 'debug_x64_no_snap', + 'V8 Clusterfuzz Linux64 ASAN no inline - release builder': 'release_x64_asan_symbolized_edge_verify_heap', - 'V8 Linux64 ASAN - debug builder': 'debug_x64_asan_edge', - 'V8 Linux64 ASAN arm64 - debug builder': + 'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan_edge', + 'V8 Clusterfuzz Linux64 ASAN arm64 - debug builder': 'debug_simulate_arm64_asan_edge', - 'V8 Linux ASAN arm - debug builder': + 'V8 Clusterfuzz Linux ASAN arm - debug builder': 'debug_simulate_arm_asan_edge', - 'V8 Linux ASAN mipsel - debug builder': + 'V8 Clusterfuzz Linux ASAN mipsel - debug builder': 'debug_simulate_mipsel_asan_edge', - 'V8 Linux64 CFI - release builder': 'release_x64_cfi_clusterfuzz', - 'V8 Linux MSAN no origins': + 'V8 Clusterfuzz Linux64 CFI - release builder': + 'release_x64_cfi_clusterfuzz', + 'V8 Clusterfuzz Linux MSAN no origins': 'release_simulate_arm64_msan_no_origins_edge', - 'V8 Linux MSAN chained origins': + 'V8 Clusterfuzz Linux MSAN chained origins': 'release_simulate_arm64_msan_edge', - 'V8 Linux64 TSAN - release builder': 'release_x64_tsan', - 'V8 Linux64 UBSanVptr - release builder': + 'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan', + 'V8 Clusterfuzz Linux64 UBSanVptr - release builder': 'release_x64_ubsan_vptr_recover_edge', }, 'client.v8.ports': { @@ -207,6 +203,7 @@ 'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols', 'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols', 'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap', + 'v8_linux64_dbg_ng': 'debug_x64_trybot', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', 'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot', 'v8_linux64_rel_ng': 'release_x64_test_features_trybot', @@ -238,6 +235,7 @@ 'v8_mac64_dbg': 'debug_x64', 'v8_mac64_dbg_ng': 'debug_x64', 'v8_mac64_asan_rel': 'release_x64_asan_no_lsan', + 'v8_mips_compile_rel': 'release_mips_no_snap_no_i18n', 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', 'v8_linux_arm_dbg': 'debug_simulate_arm', 'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot', @@ -417,7 +415,7 @@ 'release_x64_fuchsia_trybot': [ 'release_trybot', 'x64', 'fuchsia'], 'release_x64_gcc_coverage': [ - 'release_bot', 'x64', 'coverage', 'gcc'], + 'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx', 'no_sysroot'], 'release_x64_internal': [ 'release_bot', 'x64', 'v8_enable_embedded_builtins', 'v8_snapshot_internal'], @@ -503,8 +501,6 @@ # Release configs for x86. 'release_x86': [ 'release_bot', 'x86'], - 'release_x86_disassembler': [ - 'release_bot', 'x86', 'v8_enable_disassembler'], 'release_x86_gcc': [ 'release_bot', 'x86', 'gcc'], 'release_x86_gcc_minimal_symbols': [ @@ -566,14 +562,14 @@ 'cfi': { 'mixins': ['v8_enable_test_features'], - 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true ' - 'use_cfi_recover=false'), + 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true ' + 'use_cfi_diag=true use_cfi_recover=false'), }, 'cfi_clusterfuzz': { 'mixins': ['v8_enable_test_features'], - 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true ' - 'use_cfi_recover=true'), + 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true ' + 'use_cfi_diag=true use_cfi_recover=true'), }, 'clang': { @@ -669,6 +665,10 @@ 'gn_args': 'is_clang=false', }, + 'no_custom_libcxx': { + 'gn_args': 'use_custom_libcxx=false', + }, + 'no_sysroot': { 'gn_args': 'use_sysroot=false', }, @@ -763,10 +763,6 @@ 'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true', }, - 'v8_enable_disassembler': { - 'gn_args': 'v8_enable_disassembler=true', - }, - 'v8_enable_embedded_builtins': { 'gn_args': 'v8_enable_embedded_builtins=true', }, diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py new file mode 100644 index 00000000000000..9f242a929943af --- /dev/null +++ b/deps/v8/infra/testing/PRESUBMIT.py @@ -0,0 +1,183 @@ +# Copyright 2018 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Presubmit checks for the validity of V8-side test specifications in pyl files. + +For simplicity, we check all pyl files on any changes in this folder. +""" + +import ast +import os + + +SUPPORTED_BUILDER_SPEC_KEYS = [ + 'swarming_dimensions', + 'swarming_task_attrs', + 'tests', +] + +# This is not an exhaustive list. It only reflects what we currently use. If +# there's need to specify a different dimension, just add it here. +SUPPORTED_SWARMING_DIMENSIONS = [ + 'cores', + 'cpu', + 'os', +] + +# This is not an exhaustive list. It only reflects what we currently use. If +# there's need to specify a different property, just add it here. +SUPPORTED_SWARMING_TASK_ATTRS = [ + 'expiration', + 'hard_timeout', + 'priority', +] + +SUPPORTED_TEST_KEYS = [ + 'name', + 'shards', + 'suffix', + 'swarming_dimensions', + 'swarming_task_attrs', + 'test_args', + 'variant', +] + +def check_keys(error_msg, src_dict, supported_keys): + errors = [] + for key in src_dict.keys(): + if key not in supported_keys: + errors += error_msg('Key "%s" must be one of %s' % (key, supported_keys)) + return errors + + +def _check_properties(error_msg, src_dict, prop_name, supported_keys): + properties = src_dict.get(prop_name, {}) + if not isinstance(properties, dict): + return error_msg('Value for %s must be a dict' % prop_name) + return check_keys(error_msg, properties, supported_keys) + + +def _check_int_range(error_msg, src_dict, prop_name, lower_bound=None, + upper_bound=None): + if prop_name not in src_dict: + # All properties are optional. + return [] + try: + value = int(src_dict[prop_name]) + except ValueError: + return error_msg('If specified, %s must be an int' % prop_name) + if lower_bound is not None and value < lower_bound: + return error_msg('If specified, %s must be >=%d' % (prop_name, lower_bound)) + if upper_bound is not None and value > upper_bound: + return error_msg('If specified, %s must be <=%d' % (prop_name, upper_bound)) + return [] + + +def _check_swarming_task_attrs(error_msg, src_dict): + errors = [] + task_attrs = src_dict.get('swarming_task_attrs', {}) + errors += _check_int_range( + error_msg, task_attrs, 'priority', lower_bound=25, upper_bound=100) + errors += _check_int_range( + error_msg, task_attrs, 'expiration', lower_bound=1) + errors += _check_int_range( + error_msg, task_attrs, 'hard_timeout', lower_bound=1) + return errors + + +def _check_swarming_config(error_msg, src_dict): + errors = [] + errors += _check_properties( + error_msg, src_dict, 'swarming_dimensions', + SUPPORTED_SWARMING_DIMENSIONS) + errors += _check_properties( + error_msg, src_dict, 'swarming_task_attrs', + SUPPORTED_SWARMING_TASK_ATTRS) + errors += _check_swarming_task_attrs(error_msg, src_dict) + return errors + + +def _check_test(error_msg, test): + if not isinstance(test, dict): + return error_msg('Each test must be specified with a dict') + errors = check_keys(error_msg, test, SUPPORTED_TEST_KEYS) + if not test.get('name'): + errors += error_msg('A test requires a name') + errors += _check_swarming_config(error_msg, test) + + test_args = test.get('test_args', []) + if not isinstance(test_args, list): + errors += error_msg('If specified, test_args must be a list of arguments') + if not all(isinstance(x, basestring) for x in test_args): + errors += error_msg('If specified, all test_args must be strings') + + # Limit shards to 10 to avoid erroneous resource exhaustion. + errors += _check_int_range( + error_msg, test, 'shards', lower_bound=1, upper_bound=10) + + variant = test.get('variant', 'default') + if not variant or not isinstance(variant, basestring): + errors += error_msg('If specified, variant must be a non-empty string') + + return errors + + +def _check_test_spec(file_path, raw_pyl): + def error_msg(msg): + return ['Error in %s:\n%s' % (file_path, msg)] + + try: + # Eval python literal file. + full_test_spec = ast.literal_eval(raw_pyl) + except SyntaxError as e: + return error_msg('Pyl parsing failed with:\n%s' % e) + + if not isinstance(full_test_spec, dict): + return error_msg('Test spec must be a dict') + + errors = [] + for buildername, builder_spec in full_test_spec.iteritems(): + def error_msg(msg): + return ['Error in %s for builder %s:\n%s' % (file_path, buildername, msg)] + + if not isinstance(buildername, basestring) or not buildername: + errors += error_msg('Buildername must be a non-empty string') + + if not isinstance(builder_spec, dict) or not builder_spec: + errors += error_msg('Value must be a non-empty dict') + continue + + errors += check_keys(error_msg, builder_spec, SUPPORTED_BUILDER_SPEC_KEYS) + errors += _check_swarming_config(error_msg, builder_spec) + + for test in builder_spec.get('tests', []): + errors += _check_test(error_msg, test) + + return errors + + + +def CheckChangeOnCommit(input_api, output_api): + def file_filter(regexp): + return lambda f: input_api.FilterSourceFile(f, white_list=(regexp,)) + + # Calculate which files are affected. + if input_api.AffectedFiles(False, file_filter(r'.*PRESUBMIT\.py')): + # If PRESUBMIT.py itself was changed, check also the test spec. + affected_files = [ + os.path.join(input_api.PresubmitLocalPath(), 'builders.pyl'), + ] + else: + # Otherwise, check test spec only when changed. + affected_files = [ + f.AbsoluteLocalPath() + for f in input_api.AffectedFiles(False, file_filter(r'.*builders\.pyl')) + ] + + errors = [] + for file_path in affected_files: + with open(file_path) as f: + errors += _check_test_spec(file_path, f.read()) + return [output_api.PresubmitError(r) for r in errors] diff --git a/deps/v8/infra/testing/README.md b/deps/v8/infra/testing/README.md index 8658768cac9102..438ba2e6d064c4 100644 --- a/deps/v8/infra/testing/README.md +++ b/deps/v8/infra/testing/README.md @@ -7,38 +7,65 @@ variants specified [here](https://chromium.googlesource.com/v8/v8/+/master/tools Changes to src-side test specifications go through CQ like any other CL and require tests added for specific trybots to pass. -The test specifications are defined in a V8-side folder called infra/testing. -Every master has an optional file named `.pyl`. E.g. -`tryserver.v8.pyl`. +The test specifications are defined in a V8-side python-literal file +`infra/testing/builders.pyl`. -The structure of each file is: +The structure of the file is: ``` { - : [ - { - 'name': , - 'variant': , - 'shards': , - }, - ... - ], + : { + 'tests': [ + { + 'name': , + 'suffix': , + 'variant': , + 'shards': , + 'test_args': , + 'swarming_task_attrs': {...}, + 'swarming_dimensions': {...}, + }, + ... + ], + 'swarming_task_attrs': {...}, + 'swarming_dimensions': {...}, + }, ... } ``` The `` is a string name of the builder to execute the tests. `` is a label defining a test specification matching the [infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58). -The `` is a testing variant specified +The optional `suffix` will be appended to test-step names for disambiguation. +The optional `variant` is a testing variant specified [here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py). -`` is optional (default 1), but can be provided to increase -the swarming shards for long-running tests. +The optional `shards` (default 1) can be provided to increase the swarming +shards for long-running tests. +The optional `test_args` is a list of string flags that will be passed to the +V8 test driver. +The optional `swarming_task_attrs` is a dict allowing to override the defaults +for `priority`, `expiration` and `hard_timeout`. +The optional `swarming_dimensions` is a dict allowing to override the defaults +for `cpu`, `cores` and `os`. +Both `swarming_task_attrs` and `swarming_dimensions` can be defined per builder +and per test, whereas the latter takes precedence. Example: ``` { - 'v8_linux64_rel_ng_triggered': [ - {'name': 'v8testing', 'variant': 'nooptimization', 'shards': 2}, - ], + 'v8_linux64_rel_ng_triggered': { + 'tests': [ + { + 'name': 'v8testing', + 'suffix': 'stress', + 'variant': 'nooptimization', + 'shards': 2, + 'test_args': ['--gc-stress'], + 'swarming_dimensions': {'os': 'Ubuntu-14.4'}, + }, + ], + 'swarming_properties': {'priority': 35}, + 'swarming_dimensions': {'os': 'Ubuntu'}, + }, } ``` diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl new file mode 100644 index 00000000000000..4d76d696b5dccd --- /dev/null +++ b/deps/v8/infra/testing/builders.pyl @@ -0,0 +1,432 @@ +# Copyright 2018 The V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Please keep builder names, builder configs and test definitions sorted. +# Builder names should be sorted alphabetically. Builder configs should have +# keys sorted in the alphabetical order except 'tests' key, which should always +# come last. Test definitions must have keys in the following order, but omit +# optional fields: +# * name (required) +# * variant +# * test_args +# * shards +# * suffix +# * swarming_dimensions +# * swarming_task_attrs +# +# Please also format test definitions as a single line with ', ' separating +# fields, e.g. +# +# {'name': 'v8testing', 'variant': 'extra', 'shards': 2} +# +# After formatting test definitions this way, please sort them alphabetically by +# test name. For all variants of the test with the same name, the +# least-qualified test (no variant, no test args) should come first. You may +# also deviate from the alphabetical order if necessary and group tests +# differently, but in this case please add a comment before each group and +# continue to sort tests using the rules above within each group. + +{ + ############################################################################## + ### luci.v8.try + ############################################################################## + # Linux32 + 'v8_linux_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'extra'}, + {'name': 'test262'}, + {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + ], + }, + 'v8_linux_gc_stress_dbg': { + 'tests': [ + {'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2}, + {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 5}, + ], + }, + 'v8_linux_gcc_rel': { + 'tests': [ + {'name': 'v8testing'}, + ], + }, + 'v8_linux_nodcheck_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'extra'}, + {'name': 'test262_variants', 'shards': 2}, + {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, + ], + }, + 'v8_linux_noi18n_rel_ng_triggered': { + 'tests': [ + {'name': 'mozilla', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default'}, + {'name': 'v8testing', 'variant': 'default', 'shards': 2}, + ], + }, + 'v8_linux_nosnap_rel': { + 'tests': [ + {'name': 'v8testing', 'variant': 'default', 'shards': 4}, + ], + }, + 'v8_linux_nosnap_dbg': { + 'swarming_task_attrs': { + 'hard_timeout': 3600, + }, + 'tests': [ + {'name': 'v8testing', 'variant': 'default', 'shards': 9}, + ], + }, + 'v8_linux_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'gcmole'}, + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'extra'}, + {'name': 'optimize_for_size'}, + {'name': 'test262_variants', 'shards': 4}, + {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, + ], + }, + 'v8_linux_verify_csa_rel_ng_triggered': { + 'tests': [ + {'name': 'v8testing'}, + ], + }, + ############################################################################## + # Linux32 with arm simulators + 'v8_linux_arm_dbg': { + 'tests': [ + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 7}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + ], + }, + 'v8_linux_arm_rel_ng_triggered': { + 'tests': [ + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 7}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + ], + }, + ############################################################################## + # Linux64 + 'v8_linux64_asan_rel_ng_triggered': { + 'tests': [ + {'name': 'test262_variants', 'shards': 7}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'slow_path'}, + ], + }, + 'v8_linux64_cfi_rel_ng_triggered': { + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'mozilla'}, + {'name': 'optimize_for_size'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 2}, + ], + }, + 'v8_linux64_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'extra'}, + {'name': 'test262'}, + {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, + ], + }, + 'v8_linux64_fyi_rel_ng_triggered': { + 'tests': [ + # Stress sampling. + {'name': 'mjsunit', 'variant': 'stress_sampling'}, + {'name': 'webkit', 'variant': 'stress_sampling'}, + # Infra staging. + {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2}, + {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, + ], + }, + 'v8_linux64_msan_rel': { + 'tests': [ + {'name': 'test262', 'shards': 2}, + {'name': 'v8testing', 'shards': 5}, + ], + }, + 'v8_linux64_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + # TODO(machenbach): Add benchmarks. + # TODO(machenbach): Add mozilla tests. + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'optimize_for_size'}, + {'name': 'test262_variants', 'shards': 4}, + {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, + {'name': 'v8initializers'}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, + {'name': 'v8testing', 'variant': 'minor_mc'}, + {'name': 'v8testing', 'variant': 'slow_path'}, + ], + }, + 'v8_linux64_tsan_rel': { + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'mozilla'}, + {'name': 'test262', 'shards': 3}, + {'name': 'v8testing', 'shards': 5}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + {'name': 'v8testing', 'variant': 'slow_path'}, + ], + }, + 'v8_linux64_ubsan_rel_ng_triggered': { + 'tests': [ + {'name': 'v8testing', 'shards': 2}, + ], + }, + 'v8_linux64_verify_csa_rel_ng_triggered': { + 'tests': [ + {'name': 'v8testing'}, + ], + }, + ############################################################################## + # Linux64 with arm64 simulators + 'v8_linux_arm64_dbg': { + 'tests': [ + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 7}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + ], + }, + 'v8_linux_arm64_gc_stress_dbg': { + 'tests': [ + {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 8}, + ], + }, + 'v8_linux_arm64_rel_ng_triggered': { + 'tests': [ + {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 7}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + ], + }, + ############################################################################## + # Win32 + 'v8_win_dbg': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'mozilla'}, + {'name': 'v8testing', 'shards': 3}, + ], + }, + 'v8_win_nosnap_shared_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'v8testing', 'variant': 'default', 'shards': 3}, + ], + }, + 'v8_win_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'test262'}, + {'name': 'v8testing'}, + ], + }, + ############################################################################## + # Win64 + 'v8_win64_asan_rel_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Windows-10', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 5}, + ], + }, + 'v8_win64_dbg': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'mozilla'}, + {'name': 'test262', 'shards': 2}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + ], + }, + 'v8_win64_msvc_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing'}, + ], + }, + 'v8_win64_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Windows-7-SP1', + }, + 'tests': [ + {'name': 'test262'}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, + ], + }, + ############################################################################## + # Mac64 + 'v8_mac64_asan_rel': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 4}, + ], + }, + 'v8_mac64_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + ], + }, + 'v8_mac64_gc_stress_dbg': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 4}, + ], + }, + 'v8_mac64_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'mozilla'}, + {'name': 'test262'}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, + ], + }, + ############################################################################## + ### luci.v8.ci + ############################################################################## + # Linux32 + 'V8 Linux - debug': { + 'tests': [ + {'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1}, + ], + }, + 'V8 Linux - gc stress': { + 'tests': [ + {'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2}, + ], + }, + ############################################################################## + # Linux64 + 'V8 Linux64': { + 'tests': [ + {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, + ], + }, + 'V8 Linux64 - debug': { + 'tests': [ + {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, + {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, + ], + }, + 'V8 Linux64 - debug - fyi': { + 'tests': [ + # Infra staging. + {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 3}, + {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, + # Stress sampling. + {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1}, + {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1}, + ], + }, + 'V8 Linux64 - fyi': { + 'tests': [ + {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1}, + {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2}, + {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1}, + {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1}, + ], + }, + 'V8 Linux64 ASAN': { + 'tests': [ + {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, + ], + }, + 'V8 Linux64 TSAN': { + 'tests': [ + {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, + ], + }, +} diff --git a/deps/v8/infra/testing/client.v8.pyl b/deps/v8/infra/testing/client.v8.pyl deleted file mode 100644 index ab1744fc78b33e..00000000000000 --- a/deps/v8/infra/testing/client.v8.pyl +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2017 The V8 project authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - ### Example configuration for CI bots (please keep as reference). - # 'V8 Linux64': [ - # {'name': 'benchmarks', 'variant': 'default', 'shards': 1}, - # ], - # 'V8 Linux64 - debug': [ - # {'name': 'benchmarks', 'variant': 'default', 'shards': 1}, - # ], - - 'V8 Linux - debug': [ - {'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1}, - {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, - {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1}, - {'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1}, - ], - 'V8 Linux - gc stress': [ - {'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2}, - ], - 'V8 Linux64': [ - {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, - ], - 'V8 Linux64 - debug': [ - {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], - 'V8 Linux64 ASAN': [ - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], - 'V8 Linux64 TSAN': [ - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], - 'V8 Linux64 - fyi': [ - {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1}, - {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1}, - {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1}, - ], - 'V8 Linux64 - debug - fyi': [ - {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 3}, - {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1}, - {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1}, - ], -} diff --git a/deps/v8/infra/testing/tryserver.v8.pyl b/deps/v8/infra/testing/tryserver.v8.pyl deleted file mode 100644 index ee6abae5d533d3..00000000000000 --- a/deps/v8/infra/testing/tryserver.v8.pyl +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2017 The V8 project authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - ### Example configuration for trybots (please keep as reference). - # 'v8_linux64_rel_ng_triggered': [ - # {'name': 'benchmarks', 'variant': 'default', 'shards': 1}, - # ], - - 'v8_linux64_fyi_rel_ng_triggered': [ - {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1}, - {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1}, - ], - 'v8_linux64_rel_ng_triggered': [ - {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], - 'v8_linux_gc_stress_dbg': [ - {'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2}, - ], - 'v8_linux64_asan_rel_ng_triggered': [ - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], - 'v8_linux64_tsan_rel': [ - {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, - ], -} diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 050f91d6d6d990..275595d0d80481 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -27,7 +27,8 @@ include_rules = [ "+src/trap-handler/trap-handler.h", "+testing/gtest/include/gtest/gtest_prod.h", "-src/libplatform", - "-include/libplatform" + "-include/libplatform", + "+torque-generated" ] specific_include_rules = { diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index f292988b8e1e5d..565c019092b8a1 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -12,6 +12,7 @@ #include "src/heap/factory.h" #include "src/isolate-inl.h" #include "src/messages.h" +#include "src/objects/api-callbacks.h" #include "src/property-details.h" #include "src/prototype.h" @@ -37,7 +38,7 @@ Handle Accessors::MakeAccessor( info->set_getter(*get); info->set_setter(*set); Address redirected = info->redirected_getter(); - if (redirected != nullptr) { + if (redirected != kNullAddress) { Handle js_get = v8::FromCData(isolate, redirected); info->set_js_getter(*js_get); } @@ -76,12 +77,12 @@ bool Accessors::IsJSObjectFieldAccessor(Handle map, Handle name, } } - -namespace { - -V8_WARN_UNUSED_RESULT MaybeHandle ReplaceAccessorWithDataProperty( - Isolate* isolate, Handle receiver, Handle holder, - Handle name, Handle value) { +V8_WARN_UNUSED_RESULT MaybeHandle +Accessors::ReplaceAccessorWithDataProperty(Isolate* isolate, + Handle receiver, + Handle holder, + Handle name, + Handle value) { LookupIterator it(receiver, name, holder, LookupIterator::OWN_SKIP_INTERCEPTOR); // Skip any access checks we might hit. This accessor should never hit in a @@ -96,7 +97,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle ReplaceAccessorWithDataProperty( return value; } -} // namespace // // Accessors::ReconfigureToDataProperty @@ -113,8 +113,8 @@ void Accessors::ReconfigureToDataProperty( Handle::cast(Utils::OpenHandle(*info.Holder())); Handle name = Utils::OpenHandle(*key); Handle value = Utils::OpenHandle(*val); - MaybeHandle result = - ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value); + MaybeHandle result = Accessors::ReplaceAccessorWithDataProperty( + isolate, receiver, holder, name, value); if (result.is_null()) { isolate->OptionalRescheduleException(false); } else { @@ -122,17 +122,6 @@ void Accessors::ReconfigureToDataProperty( } } -void Accessors::ReconfigureToDataPropertyGetter( - v8::Local name, const v8::PropertyCallbackInfo& info) { - UNREACHABLE(); -} - -Handle Accessors::MakeReconfigureToDataPropertyInfo( - Isolate* isolate) { - Handle name = isolate->factory()->ReconfigureToDataProperty_string(); - return MakeAccessor(isolate, name, &ReconfigureToDataPropertyGetter, - &ReconfigureToDataProperty); -} // // Accessors::ArgumentsIterator @@ -1194,8 +1183,8 @@ void Accessors::ErrorStackGetter( Utils::OpenHandle(*v8::Local(info.This())); Handle name = Utils::OpenHandle(*key); if (IsAccessor(receiver, name, holder)) { - result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, - formatted_stack_trace); + result = Accessors::ReplaceAccessorWithDataProperty( + isolate, receiver, holder, name, formatted_stack_trace); if (result.is_null()) { isolate->OptionalRescheduleException(false); return; diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 1911f92dbf93dd..4a1a67e93e6822 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -33,7 +33,6 @@ class JavaScriptFrame; V(function_name, FunctionName) \ V(function_length, FunctionLength) \ V(function_prototype, FunctionPrototype) \ - V(reconfigure_to_data_property, ReconfigureToDataProperty) \ V(script_column_offset, ScriptColumnOffset) \ V(script_compilation_type, ScriptCompilationType) \ V(script_context_data, ScriptContextData) \ @@ -110,6 +109,10 @@ class Accessors : public AllStatic { static bool IsJSObjectFieldAccessor(Handle map, Handle name, FieldIndex* field_index); + static MaybeHandle ReplaceAccessorWithDataProperty( + Isolate* isolate, Handle receiver, Handle holder, + Handle name, Handle value); + // Create an AccessorInfo. The setter is optional (can be nullptr). // // Note that the type of setter is AccessorNameBooleanSetterCallback instead diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h index e2b815daff0aba..f3e2770847b156 100644 --- a/deps/v8/src/address-map.h +++ b/deps/v8/src/address-map.h @@ -34,13 +34,21 @@ class PointerToIndexHashMap } private: - static uintptr_t Key(Type value) { - return reinterpret_cast(value); - } + static inline uintptr_t Key(Type value); static uint32_t Hash(uintptr_t key) { return static_cast(key); } }; +template <> +inline uintptr_t PointerToIndexHashMap
::Key(Address value) { + return static_cast(value); +} + +template +inline uintptr_t PointerToIndexHashMap::Key(Type value) { + return reinterpret_cast(value); +} + class AddressToIndexHashMap : public PointerToIndexHashMap
{}; class HeapObjectToIndexHashMap : public PointerToIndexHashMap {}; diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index f63c2f292f5b00..55c68dea89482e 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -206,15 +206,15 @@ bool OnCriticalMemoryPressure(size_t length) { return true; } -VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {} +VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {} VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment) - : address_(nullptr), size_(0) { + : address_(kNullAddress), size_(0) { size_t page_size = AllocatePageSize(); size_t alloc_size = RoundUp(size, page_size); - address_ = - AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess); - if (address_ != nullptr) { + address_ = reinterpret_cast
( + AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess)); + if (address_ != kNullAddress) { size_ = alloc_size; } } @@ -226,31 +226,29 @@ VirtualMemory::~VirtualMemory() { } void VirtualMemory::Reset() { - address_ = nullptr; + address_ = kNullAddress; size_ = 0; } -bool VirtualMemory::SetPermissions(void* address, size_t size, +bool VirtualMemory::SetPermissions(Address address, size_t size, PageAllocator::Permission access) { CHECK(InVM(address, size)); bool result = v8::internal::SetPermissions(address, size, access); DCHECK(result); - USE(result); return result; } -size_t VirtualMemory::Release(void* free_start) { +size_t VirtualMemory::Release(Address free_start) { DCHECK(IsReserved()); - DCHECK(IsAddressAligned(static_cast
(free_start), CommitPageSize())); + DCHECK(IsAddressAligned(free_start, CommitPageSize())); // Notice: Order is important here. The VirtualMemory object might live // inside the allocated region. - const size_t free_size = size_ - (reinterpret_cast(free_start) - - reinterpret_cast(address_)); + const size_t free_size = size_ - (free_start - address_); CHECK(InVM(free_start, free_size)); DCHECK_LT(address_, free_start); - DCHECK_LT(free_start, reinterpret_cast( - reinterpret_cast(address_) + size_)); - CHECK(ReleasePages(address_, size_, size_ - free_size)); + DCHECK_LT(free_start, address_ + size_); + CHECK(ReleasePages(reinterpret_cast(address_), size_, + size_ - free_size)); size_ -= free_size; return free_size; } @@ -259,13 +257,14 @@ void VirtualMemory::Free() { DCHECK(IsReserved()); // Notice: Order is important here. The VirtualMemory object might live // inside the allocated region. - void* address = address_; + Address address = address_; size_t size = size_; CHECK(InVM(address, size)); Reset(); // FreePages expects size to be aligned to allocation granularity. Trimming // may leave size at only commit granularity. Align it here. - CHECK(FreePages(address, RoundUp(size, AllocatePageSize()))); + CHECK(FreePages(reinterpret_cast(address), + RoundUp(size, AllocatePageSize()))); } void VirtualMemory::TakeControl(VirtualMemory* from) { diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h index 13dc3e508f3d3c..67a510c611c866 100644 --- a/deps/v8/src/allocation.h +++ b/deps/v8/src/allocation.h @@ -126,6 +126,10 @@ V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size, V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size, PageAllocator::Permission access); +inline bool SetPermissions(Address address, size_t size, + PageAllocator::Permission access) { + return SetPermissions(reinterpret_cast(address), size, access); +} // Convenience function that allocates a single system page with read and write // permissions. |address| is a hint. Returns the base address of the memory and @@ -151,14 +155,15 @@ class V8_EXPORT_PRIVATE VirtualMemory { // Construct a virtual memory by assigning it some already mapped address // and size. - VirtualMemory(void* address, size_t size) : address_(address), size_(size) {} + VirtualMemory(Address address, size_t size) + : address_(address), size_(size) {} // Releases the reserved memory, if any, controlled by this VirtualMemory // object. ~VirtualMemory(); // Returns whether the memory has been reserved. - bool IsReserved() const { return address_ != nullptr; } + bool IsReserved() const { return address_ != kNullAddress; } // Initialize or resets an embedded VirtualMemory object. void Reset(); @@ -167,15 +172,14 @@ class V8_EXPORT_PRIVATE VirtualMemory { // If the memory was reserved with an alignment, this address is not // necessarily aligned. The user might need to round it up to a multiple of // the alignment to get the start of the aligned block. - void* address() const { + Address address() const { DCHECK(IsReserved()); return address_; } - void* end() const { + Address end() const { DCHECK(IsReserved()); - return reinterpret_cast(reinterpret_cast(address_) + - size_); + return address_ + size_; } // Returns the size of the reserved memory. The returned value is only @@ -186,11 +190,11 @@ class V8_EXPORT_PRIVATE VirtualMemory { // Sets permissions according to the access argument. address and size must be // multiples of CommitPageSize(). Returns true on success, otherwise false. - bool SetPermissions(void* address, size_t size, + bool SetPermissions(Address address, size_t size, PageAllocator::Permission access); // Releases memory after |free_start|. Returns the number of bytes released. - size_t Release(void* free_start); + size_t Release(Address free_start); // Frees all memory. void Free(); @@ -199,15 +203,12 @@ class V8_EXPORT_PRIVATE VirtualMemory { // The old object is no longer functional (IsReserved() returns false). void TakeControl(VirtualMemory* from); - bool InVM(void* address, size_t size) { - return (reinterpret_cast(address_) <= - reinterpret_cast(address)) && - ((reinterpret_cast(address_) + size_) >= - (reinterpret_cast(address) + size)); + bool InVM(Address address, size_t size) { + return (address_ <= address) && ((address_ + size_) >= (address + size)); } private: - void* address_; // Start address of the virtual memory. + Address address_; // Start address of the virtual memory. size_t size_; // Size of the virtual memory. }; diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h index 1cf9662b94997b..503cea8dcb65ab 100644 --- a/deps/v8/src/api-arguments-inl.h +++ b/deps/v8/src/api-arguments-inl.h @@ -7,6 +7,7 @@ #include "src/api-arguments.h" +#include "src/objects/api-callbacks.h" #include "src/tracing/trace-event.h" #include "src/vm-state-inl.h" diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc index 981f592f5a08ed..1b6df15d7a69d4 100644 --- a/deps/v8/src/api-natives.cc +++ b/deps/v8/src/api-natives.cc @@ -8,6 +8,9 @@ #include "src/isolate-inl.h" #include "src/lookup.h" #include "src/messages.h" +#include "src/objects/api-callbacks.h" +#include "src/objects/hash-table-inl.h" +#include "src/objects/templates.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 192ad90f83e55c..89bcb2e4fa556f 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -5,10 +5,7 @@ #include "src/api.h" #include // For memcpy, strlen. -#ifdef V8_USE_ADDRESS_SANITIZER -#include -#endif // V8_USE_ADDRESS_SANITIZER -#include // For isnan. +#include // For isnan. #include #include #include "include/v8-profiler.h" @@ -49,6 +46,9 @@ #include "src/json-stringifier.h" #include "src/messages.h" #include "src/objects-inl.h" +#include "src/objects/api-callbacks.h" +#include "src/objects/ordered-hash-table-inl.h" +#include "src/objects/templates.h" #include "src/parsing/parser.h" #include "src/parsing/scanner-character-streams.h" #include "src/pending-compilation-error-handler.h" @@ -79,7 +79,6 @@ #include "src/value-serializer.h" #include "src/version.h" #include "src/vm-state-inl.h" -#include "src/wasm/compilation-manager.h" #include "src/wasm/streaming-decoder.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-objects-inl.h" @@ -233,10 +232,20 @@ template class CallDepthScope { public: explicit CallDepthScope(i::Isolate* isolate, Local context) - : isolate_(isolate), context_(context), escaped_(false) { + : isolate_(isolate), + context_(context), + escaped_(false), + safe_for_termination_(isolate->next_v8_call_is_safe_for_termination()), + interrupts_scope_(isolate_, i::StackGuard::TERMINATE_EXECUTION, + isolate_->only_terminate_in_safe_scope() + ? (safe_for_termination_ + ? i::InterruptsScope::kRunInterrupts + : i::InterruptsScope::kPostponeInterrupts) + : i::InterruptsScope::kNoop) { // TODO(dcarney): remove this when blink stops crashing. DCHECK(!isolate_->external_caught_exception()); isolate_->handle_scope_implementer()->IncrementCallDepth(); + isolate_->set_next_v8_call_is_safe_for_termination(false); if (!context.IsEmpty()) { i::Handle env = Utils::OpenHandle(*context); i::HandleScopeImplementer* impl = isolate->handle_scope_implementer(); @@ -261,6 +270,7 @@ class CallDepthScope { #ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY if (do_callback) CheckMicrotasksScopesConsistency(isolate_); #endif + isolate_->set_next_v8_call_is_safe_for_termination(safe_for_termination_); } void Escape() { @@ -277,6 +287,8 @@ class CallDepthScope { Local context_; bool escaped_; bool do_callback_; + bool safe_for_termination_; + i::InterruptsScope interrupts_scope_; }; } // namespace @@ -332,7 +344,9 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location, memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats)); // Note that the embedder's oom handler won't be called in this case. We // just crash. - FATAL("API fatal error handler returned after process out of memory"); + FATAL( + "API fatal error handler returned after process out of memory on the " + "background thread"); UNREACHABLE(); } @@ -341,6 +355,10 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location, intptr_t start_marker; heap_stats.start_marker = &start_marker; + size_t ro_space_size; + heap_stats.ro_space_size = &ro_space_size; + size_t ro_space_capacity; + heap_stats.ro_space_capacity = &ro_space_capacity; size_t new_space_size; heap_stats.new_space_size = &new_space_size; size_t new_space_capacity; @@ -539,14 +557,15 @@ struct SnapshotCreatorData { } // namespace -SnapshotCreator::SnapshotCreator(const intptr_t* external_references, +SnapshotCreator::SnapshotCreator(Isolate* isolate, + const intptr_t* external_references, StartupData* existing_snapshot) { - i::Isolate* internal_isolate = new i::Isolate(true); - Isolate* isolate = reinterpret_cast(internal_isolate); SnapshotCreatorData* data = new SnapshotCreatorData(isolate); data->isolate_ = isolate; + i::Isolate* internal_isolate = reinterpret_cast(isolate); internal_isolate->set_array_buffer_allocator(&data->allocator_); internal_isolate->set_api_external_references(external_references); + internal_isolate->enable_serializer(); isolate->Enter(); const StartupData* blob = existing_snapshot ? existing_snapshot @@ -560,6 +579,11 @@ SnapshotCreator::SnapshotCreator(const intptr_t* external_references, data_ = data; } +SnapshotCreator::SnapshotCreator(const intptr_t* external_references, + StartupData* existing_snapshot) + : SnapshotCreator(reinterpret_cast(new i::Isolate()), + external_references, existing_snapshot) {} + SnapshotCreator::~SnapshotCreator() { SnapshotCreatorData* data = SnapshotCreatorData::cast(data_); DCHECK(data->created_); @@ -710,6 +734,8 @@ StartupData SnapshotCreator::CreateBlob( i::GarbageCollectionReason::kSnapshotCreator); isolate->heap()->CompactFixedArraysOfWeakCells(); + isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded(); + i::DisallowHeapAllocation no_gc_from_here_on; int num_contexts = num_additional_contexts + 1; @@ -753,7 +779,7 @@ StartupData SnapshotCreator::CreateBlob( if (shared->CanFlushCompiled()) { shared->FlushCompiled(); } - DCHECK(shared->HasCodeObject() || shared->HasBuiltinId() || + DCHECK(shared->HasWasmExportedFunctionData() || shared->HasBuiltinId() || shared->IsApiFunction()); } } @@ -1527,7 +1553,9 @@ i::Handle MakeAccessorInfo( } SET_FIELD_WRAPPED(obj, set_setter, setter); i::Address redirected = obj->redirected_getter(); - if (redirected != nullptr) SET_FIELD_WRAPPED(obj, set_js_getter, redirected); + if (redirected != i::kNullAddress) { + SET_FIELD_WRAPPED(obj, set_js_getter, redirected); + } if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(isolate)); } @@ -1694,13 +1722,11 @@ static i::Handle EnsureConstructor( } template -static void TemplateSetAccessor(Template* template_obj, v8::Local name, - Getter getter, Setter setter, Data data, - AccessControl settings, - PropertyAttribute attribute, - v8::Local signature, - bool is_special_data_property, - bool replace_on_access) { +static void TemplateSetAccessor( + Template* template_obj, v8::Local name, Getter getter, Setter setter, + Data data, AccessControl settings, PropertyAttribute attribute, + v8::Local signature, bool is_special_data_property, + bool replace_on_access, SideEffectType getter_side_effect_type) { auto info = Utils::OpenHandle(template_obj); auto isolate = info->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); @@ -1710,40 +1736,38 @@ static void TemplateSetAccessor(Template* template_obj, v8::Local name, is_special_data_property, replace_on_access); accessor_info->set_initial_property_attributes( static_cast(attribute)); + accessor_info->set_has_no_side_effect(getter_side_effect_type == + SideEffectType::kHasNoSideEffect); i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info); } - -void Template::SetNativeDataProperty(v8::Local name, - AccessorGetterCallback getter, - AccessorSetterCallback setter, - v8::Local data, - PropertyAttribute attribute, - v8::Local signature, - AccessControl settings) { +void Template::SetNativeDataProperty( + v8::Local name, AccessorGetterCallback getter, + AccessorSetterCallback setter, v8::Local data, + PropertyAttribute attribute, v8::Local signature, + AccessControl settings, SideEffectType getter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, true, false); + signature, true, false, getter_side_effect_type); } - -void Template::SetNativeDataProperty(v8::Local name, - AccessorNameGetterCallback getter, - AccessorNameSetterCallback setter, - v8::Local data, - PropertyAttribute attribute, - v8::Local signature, - AccessControl settings) { +void Template::SetNativeDataProperty( + v8::Local name, AccessorNameGetterCallback getter, + AccessorNameSetterCallback setter, v8::Local data, + PropertyAttribute attribute, v8::Local signature, + AccessControl settings, SideEffectType getter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, true, false); + signature, true, false, getter_side_effect_type); } void Template::SetLazyDataProperty(v8::Local name, AccessorNameGetterCallback getter, v8::Local data, - PropertyAttribute attribute) { - TemplateSetAccessor( - this, name, getter, static_cast(nullptr), - data, DEFAULT, attribute, Local(), true, true); + PropertyAttribute attribute, + SideEffectType getter_side_effect_type) { + TemplateSetAccessor(this, name, getter, + static_cast(nullptr), data, + DEFAULT, attribute, Local(), true, + true, getter_side_effect_type); } void Template::SetIntrinsicDataProperty(Local name, Intrinsic intrinsic, @@ -1757,26 +1781,28 @@ void Template::SetIntrinsicDataProperty(Local name, Intrinsic intrinsic, static_cast(attribute)); } - void ObjectTemplate::SetAccessor(v8::Local name, AccessorGetterCallback getter, AccessorSetterCallback setter, v8::Local data, AccessControl settings, PropertyAttribute attribute, - v8::Local signature) { + v8::Local signature, + SideEffectType getter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, i::FLAG_disable_old_api_accessors, false); + signature, i::FLAG_disable_old_api_accessors, false, + getter_side_effect_type); } - void ObjectTemplate::SetAccessor(v8::Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter, v8::Local data, AccessControl settings, PropertyAttribute attribute, - v8::Local signature) { + v8::Local signature, + SideEffectType getter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, i::FLAG_disable_old_api_accessors, false); + signature, i::FLAG_disable_old_api_accessors, false, + getter_side_effect_type); } template Module::GetModuleNamespace() { return ToApiHandle(module_namespace); } +Local Module::GetUnboundModuleScript() { + Utils::ApiCheck( + GetStatus() < kEvaluating, "v8::Module::GetUnboundScript", + "v8::Module::GetUnboundScript must be used on an unevaluated module"); + i::Handle self = Utils::OpenHandle(this); + return ToApiHandle( + i::Handle(self->GetSharedFunctionInfo())); +} + int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } Maybe Module::InstantiateModule(Local context, @@ -2505,8 +2540,10 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( options == CompileOptions::kNoCompileOptions); i::Handle context = Utils::OpenHandle(*v8_context); - i::Handle outer_info(context->closure()->shared(), - isolate); + + DCHECK(context->IsNativeContext()); + i::Handle outer_info( + context->empty_function()->shared(), isolate); i::Handle fun; i::Handle arguments_list = @@ -2522,9 +2559,8 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( i::Handle extension = Utils::OpenHandle(*context_extensions[i]); if (!extension->IsJSObject()) return Local(); - i::Handle closure(context->closure(), isolate); context = isolate->factory()->NewWithContext( - closure, context, + context, i::ScopeInfo::CreateForWithScope( isolate, context->IsNativeContext() ? i::Handle::null() @@ -2638,6 +2674,16 @@ ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache( return i::CodeSerializer::Serialize(shared); } +// static +ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache( + Local unbound_module_script) { + i::Handle shared = + i::Handle::cast( + Utils::OpenHandle(*unbound_module_script)); + DCHECK(shared->is_toplevel()); + return i::CodeSerializer::Serialize(shared); +} + ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction( Local function, Local source) { return CreateCodeCacheForFunction(function); @@ -5079,9 +5125,15 @@ Local Function::New(Isolate* v8_isolate, FunctionCallback callback, .FromMaybe(Local()); } - MaybeLocal Function::NewInstance(Local context, int argc, v8::Local argv[]) const { + return NewInstanceWithSideEffectType(context, argc, argv, + SideEffectType::kHasSideEffect); +} + +MaybeLocal Function::NewInstanceWithSideEffectType( + Local context, int argc, v8::Local argv[], + SideEffectType side_effect_type) const { auto isolate = reinterpret_cast(context->GetIsolate()); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal(), @@ -5089,10 +5141,39 @@ MaybeLocal Function::NewInstance(Local context, int argc, i::TimerEventScope timer_scope(isolate); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Object**)); + bool should_set_has_no_side_effect = + side_effect_type == SideEffectType::kHasNoSideEffect && + isolate->debug_execution_mode() == i::DebugInfo::kSideEffects; + if (should_set_has_no_side_effect) { + CHECK(self->IsJSFunction() && + i::JSFunction::cast(*self)->shared()->IsApiFunction()); + i::Object* obj = + i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code(); + if (obj->IsCallHandlerInfo()) { + i::CallHandlerInfo* handler_info = i::CallHandlerInfo::cast(obj); + if (!handler_info->IsSideEffectFreeCallHandlerInfo()) { + handler_info->SetNextCallHasNoSideEffect(); + } + } + } i::Handle* args = reinterpret_cast*>(argv); Local result; has_pending_exception = !ToLocal( i::Execution::New(isolate, self, self, argc, args), &result); + if (should_set_has_no_side_effect) { + i::Object* obj = + i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code(); + if (obj->IsCallHandlerInfo()) { + i::CallHandlerInfo* handler_info = i::CallHandlerInfo::cast(obj); + if (has_pending_exception) { + // Restore the map if an exception prevented restoration. + handler_info->NextCallHasNoSideEffect(); + } else { + DCHECK(handler_info->IsSideEffectCallHandlerInfo() || + handler_info->IsSideEffectFreeCallHandlerInfo()); + } + } + } RETURN_ON_FAILED_EXECUTION(Object); RETURN_ESCAPED(result); } @@ -5403,202 +5484,28 @@ bool String::ContainsOnlyOneByte() const { } -class Utf8LengthHelper : public i::AllStatic { - public: - enum State { - kEndsWithLeadingSurrogate = 1 << 0, - kStartsWithTrailingSurrogate = 1 << 1, - kLeftmostEdgeIsCalculated = 1 << 2, - kRightmostEdgeIsCalculated = 1 << 3, - kLeftmostEdgeIsSurrogate = 1 << 4, - kRightmostEdgeIsSurrogate = 1 << 5 - }; - - static const uint8_t kInitialState = 0; - - static inline bool EndsWithSurrogate(uint8_t state) { - return state & kEndsWithLeadingSurrogate; - } - - static inline bool StartsWithSurrogate(uint8_t state) { - return state & kStartsWithTrailingSurrogate; - } - - class Visitor { - public: - Visitor() : utf8_length_(0), state_(kInitialState) {} - - void VisitOneByteString(const uint8_t* chars, int length) { - int utf8_length = 0; - // Add in length 1 for each non-Latin1 character. - for (int i = 0; i < length; i++) { - utf8_length += *chars++ >> 7; - } - // Add in length 1 for each character. - utf8_length_ = utf8_length + length; - state_ = kInitialState; - } - - void VisitTwoByteString(const uint16_t* chars, int length) { - int utf8_length = 0; - int last_character = unibrow::Utf16::kNoPreviousCharacter; - for (int i = 0; i < length; i++) { - uint16_t c = chars[i]; - utf8_length += unibrow::Utf8::Length(c, last_character); - last_character = c; - } - utf8_length_ = utf8_length; - uint8_t state = 0; - if (unibrow::Utf16::IsTrailSurrogate(chars[0])) { - state |= kStartsWithTrailingSurrogate; - } - if (unibrow::Utf16::IsLeadSurrogate(chars[length-1])) { - state |= kEndsWithLeadingSurrogate; - } - state_ = state; - } - - static i::ConsString* VisitFlat(i::String* string, - int* length, - uint8_t* state) { - Visitor visitor; - i::ConsString* cons_string = i::String::VisitFlat(&visitor, string); - *length = visitor.utf8_length_; - *state = visitor.state_; - return cons_string; - } - - private: - int utf8_length_; - uint8_t state_; - DISALLOW_COPY_AND_ASSIGN(Visitor); - }; - - static inline void MergeLeafLeft(int* length, - uint8_t* state, - uint8_t leaf_state) { - bool edge_surrogate = StartsWithSurrogate(leaf_state); - if (!(*state & kLeftmostEdgeIsCalculated)) { - DCHECK(!(*state & kLeftmostEdgeIsSurrogate)); - *state |= kLeftmostEdgeIsCalculated - | (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0); - } else if (EndsWithSurrogate(*state) && edge_surrogate) { - *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; - } - if (EndsWithSurrogate(leaf_state)) { - *state |= kEndsWithLeadingSurrogate; - } else { - *state &= ~kEndsWithLeadingSurrogate; - } - } - - static inline void MergeLeafRight(int* length, - uint8_t* state, - uint8_t leaf_state) { - bool edge_surrogate = EndsWithSurrogate(leaf_state); - if (!(*state & kRightmostEdgeIsCalculated)) { - DCHECK(!(*state & kRightmostEdgeIsSurrogate)); - *state |= (kRightmostEdgeIsCalculated - | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0)); - } else if (edge_surrogate && StartsWithSurrogate(*state)) { - *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; - } - if (StartsWithSurrogate(leaf_state)) { - *state |= kStartsWithTrailingSurrogate; - } else { - *state &= ~kStartsWithTrailingSurrogate; - } - } - - static inline void MergeTerminal(int* length, - uint8_t state, - uint8_t* state_out) { - DCHECK((state & kLeftmostEdgeIsCalculated) && - (state & kRightmostEdgeIsCalculated)); - if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) { - *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; - } - *state_out = kInitialState | - (state & kLeftmostEdgeIsSurrogate ? kStartsWithTrailingSurrogate : 0) | - (state & kRightmostEdgeIsSurrogate ? kEndsWithLeadingSurrogate : 0); - } - - static int Calculate(i::ConsString* current, uint8_t* state_out) { - using internal::ConsString; - int total_length = 0; - uint8_t state = kInitialState; - while (true) { - i::String* left = current->first(); - i::String* right = current->second(); - uint8_t right_leaf_state; - uint8_t left_leaf_state; - int leaf_length; - ConsString* left_as_cons = - Visitor::VisitFlat(left, &leaf_length, &left_leaf_state); - if (left_as_cons == nullptr) { - total_length += leaf_length; - MergeLeafLeft(&total_length, &state, left_leaf_state); - } - ConsString* right_as_cons = - Visitor::VisitFlat(right, &leaf_length, &right_leaf_state); - if (right_as_cons == nullptr) { - total_length += leaf_length; - MergeLeafRight(&total_length, &state, right_leaf_state); - if (left_as_cons != nullptr) { - // 1 Leaf node. Descend in place. - current = left_as_cons; - continue; - } else { - // Terminal node. - MergeTerminal(&total_length, state, state_out); - return total_length; - } - } else if (left_as_cons == nullptr) { - // 1 Leaf node. Descend in place. - current = right_as_cons; - continue; - } - // Both strings are ConsStrings. - // Recurse on smallest. - if (left->length() < right->length()) { - total_length += Calculate(left_as_cons, &left_leaf_state); - MergeLeafLeft(&total_length, &state, left_leaf_state); - current = right_as_cons; - } else { - total_length += Calculate(right_as_cons, &right_leaf_state); - MergeLeafRight(&total_length, &state, right_leaf_state); - current = left_as_cons; - } - } - UNREACHABLE(); - } - - static inline int Calculate(i::ConsString* current) { - uint8_t state = kInitialState; - return Calculate(current, &state); - } - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8LengthHelper); -}; - - -static int Utf8Length(i::String* str, i::Isolate* isolate) { - int length = str->length(); - if (length == 0) return 0; - uint8_t state; - i::ConsString* cons_string = - Utf8LengthHelper::Visitor::VisitFlat(str, &length, &state); - if (cons_string == nullptr) return length; - return Utf8LengthHelper::Calculate(cons_string); -} - - int String::Utf8Length() const { i::Handle str = Utils::OpenHandle(this); str = i::String::Flatten(str); - i::Isolate* isolate = str->GetIsolate(); - return v8::Utf8Length(*str, isolate); + int length = str->length(); + if (length == 0) return 0; + i::DisallowHeapAllocation no_gc; + i::String::FlatContent flat = str->GetFlatContent(); + DCHECK(flat.IsFlat()); + int utf8_length = 0; + if (flat.IsOneByte()) { + for (uint8_t c : flat.ToOneByteVector()) { + utf8_length += c >> 7; + } + utf8_length += length; + } else { + int last_character = unibrow::Utf16::kNoPreviousCharacter; + for (uint16_t c : flat.ToUC16Vector()) { + utf8_length += unibrow::Utf8::Length(c, last_character); + last_character = c; + } + } + return utf8_length; } @@ -5823,7 +5730,7 @@ int String::WriteUtf8(char* buffer, if (success) return writer.CompleteWrite(write_null, nchars_ref); } else if (capacity >= string_length) { // First check that the buffer is large enough. - int utf8_bytes = v8::Utf8Length(*str, isolate); + int utf8_bytes = Utf8Length(); if (utf8_bytes <= capacity) { // one-byte fast path. if (utf8_bytes == string_length) { @@ -5843,8 +5750,6 @@ int String::WriteUtf8(char* buffer, return WriteUtf8(buffer, -1, nchars_ref, options); } } - // Recursive slow path can potentially be unreasonable slow. Flatten. - str = i::String::Flatten(str); Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8); i::String::VisitFlat(&writer, *str); return writer.CompleteWrite(write_null, nchars_ref); @@ -6083,7 +5988,7 @@ static void* ExternalValue(i::Object* obj) { return nullptr; } i::Object* foreign = i::JSObject::cast(obj)->GetEmbedderField(0); - return i::Foreign::cast(foreign)->foreign_address(); + return reinterpret_cast(i::Foreign::cast(foreign)->foreign_address()); } @@ -6120,7 +6025,7 @@ bool V8::TryHandleSignal(int signum, void* info, void* context) { #endif bool V8::RegisterDefaultSignalHandler() { - return v8::internal::trap_handler::RegisterDefaultSignalHandler(); + return v8::internal::trap_handler::RegisterDefaultTrapHandler(); } bool V8::EnableWebAssemblyTrapHandler(bool use_v8_signal_handler) { @@ -6173,7 +6078,9 @@ HeapObjectStatistics::HeapObjectStatistics() object_size_(0) {} HeapCodeStatistics::HeapCodeStatistics() - : code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {} + : code_and_metadata_size_(0), + bytecode_and_metadata_size_(0), + external_script_source_size_(0) {} bool v8::V8::InitializeICU(const char* icu_data_file) { return i::InitializeICU(icu_data_file); @@ -6867,8 +6774,9 @@ bool v8::String::CanMakeExternal() { if (obj->IsExternalString()) return false; // Old space strings should be externalized. - i::Isolate* isolate = obj->GetIsolate(); - return !isolate->heap()->new_space()->Contains(*obj); + i::Heap* heap = obj->GetIsolate()->heap(); + return !heap->new_space()->Contains(*obj) && + !heap->read_only_space()->Contains(*obj); } @@ -7509,9 +7417,7 @@ MaybeLocal Proxy::New(Local context, Local local_target, Local WasmCompiledModule::GetWasmWireBytes() { i::Handle obj = i::Handle::cast(Utils::OpenHandle(this)); - i::Handle compiled_part = - i::handle(obj->compiled_module()); - i::Handle wire_bytes(compiled_part->shared()->module_bytes()); + i::Handle wire_bytes(obj->shared()->module_bytes()); return Local::Cast(Utils::ToLocal(wire_bytes)); } @@ -7550,7 +7456,13 @@ WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() { i::Handle::cast(Utils::OpenHandle(this)); i::Handle compiled_part = i::handle(i::WasmCompiledModule::cast(obj->compiled_module())); - return i::wasm::SerializeNativeModule(obj->GetIsolate(), compiled_part); + size_t buffer_size = + i::wasm::GetSerializedNativeModuleSize(obj->GetIsolate(), compiled_part); + std::unique_ptr buffer(new uint8_t[buffer_size]); + if (i::wasm::SerializeNativeModule(obj->GetIsolate(), compiled_part, + {buffer.get(), buffer_size})) + return {std::move(buffer), buffer_size}; + return {}; } MaybeLocal WasmCompiledModule::Deserialize( @@ -7558,17 +7470,16 @@ MaybeLocal WasmCompiledModule::Deserialize( const WasmCompiledModule::CallerOwnedBuffer& serialized_module, const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) { i::Isolate* i_isolate = reinterpret_cast(isolate); - i::MaybeHandle maybe_compiled_module = + i::MaybeHandle maybe_module_object = i::wasm::DeserializeNativeModule( i_isolate, {serialized_module.first, serialized_module.second}, {wire_bytes.first, wire_bytes.second}); - i::Handle compiled_module; - if (!maybe_compiled_module.ToHandle(&compiled_module)) { + i::Handle module_object; + if (!maybe_module_object.ToHandle(&module_object)) { return MaybeLocal(); } return Local::Cast( - Utils::ToLocal(i::Handle::cast( - i::WasmModuleObject::New(i_isolate, compiled_module)))); + Utils::ToLocal(i::Handle::cast(module_object))); } MaybeLocal WasmCompiledModule::DeserializeOrCompile( @@ -7607,15 +7518,10 @@ WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming( Local resolver = maybe_resolver.ToLocalChecked(); promise_.Reset(isolate, resolver->GetPromise()); - if (i::FLAG_wasm_stream_compilation) { - i::Handle promise = Utils::OpenHandle(*GetPromise()); - i::Isolate* i_isolate = reinterpret_cast(isolate); - streaming_decoder_ = - i_isolate->wasm_engine() - ->compilation_manager() - ->StartStreamingCompilation(i_isolate, handle(i_isolate->context()), - promise); - } + i::Handle promise = Utils::OpenHandle(*GetPromise()); + i::Isolate* i_isolate = reinterpret_cast(isolate); + streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation( + i_isolate, handle(i_isolate->context()), promise); } Local WasmModuleObjectBuilderStreaming::GetPromise() { @@ -7624,38 +7530,11 @@ Local WasmModuleObjectBuilderStreaming::GetPromise() { void WasmModuleObjectBuilderStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) { - if (i::FLAG_wasm_stream_compilation) { - streaming_decoder_->OnBytesReceived(i::Vector(bytes, size)); - return; - } - std::unique_ptr cloned_bytes(new uint8_t[size]); - memcpy(cloned_bytes.get(), bytes, size); - received_buffers_.push_back( - Buffer(std::unique_ptr( - const_cast(cloned_bytes.release())), - size)); - total_size_ += size; + streaming_decoder_->OnBytesReceived(i::Vector(bytes, size)); } void WasmModuleObjectBuilderStreaming::Finish() { - if (i::FLAG_wasm_stream_compilation) { - streaming_decoder_->Finish(); - return; - } - std::unique_ptr wire_bytes(new uint8_t[total_size_]); - uint8_t* insert_at = wire_bytes.get(); - - for (size_t i = 0; i < received_buffers_.size(); ++i) { - const Buffer& buff = received_buffers_[i]; - memcpy(insert_at, buff.first.get(), buff.second); - insert_at += buff.second; - } - // AsyncCompile makes its own copy of the wire bytes. This inefficiency - // will be resolved when we move to true streaming compilation. - auto i_isolate = reinterpret_cast(isolate_); - i_isolate->wasm_engine()->AsyncCompile( - i_isolate, Utils::OpenHandle(*promise_.Get(isolate_)), - {wire_bytes.get(), wire_bytes.get() + total_size_}, false); + streaming_decoder_->Finish(); } void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal exception) { @@ -7663,7 +7542,7 @@ void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal exception) { // The promise has already been resolved, e.g. because of a compilation // error. if (promise->State() != v8::Promise::kPending) return; - if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort(); + streaming_decoder_->Abort(); // If no exception value is provided, we do not reject the promise. This can // happen when streaming compilation gets aborted when no script execution is @@ -8121,49 +8000,6 @@ Local v8::BigInt::New(Isolate* isolate, int64_t value) { return Utils::ToLocal(result); } -Local v8::BigInt::NewFromUnsigned(Isolate* isolate, uint64_t value) { - CHECK(i::FLAG_harmony_bigint); - i::Isolate* internal_isolate = reinterpret_cast(isolate); - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate); - i::Handle result = i::BigInt::FromUint64(internal_isolate, value); - return Utils::ToLocal(result); -} - -MaybeLocal v8::BigInt::NewFromWords(Local context, - int sign_bit, int word_count, - const uint64_t* words) { - CHECK(i::FLAG_harmony_bigint); - i::Isolate* isolate = reinterpret_cast(context->GetIsolate()); - ENTER_V8_NO_SCRIPT(isolate, context, BigInt, NewFromWords, - MaybeLocal(), InternalEscapableScope); - i::MaybeHandle result = - i::BigInt::FromWords64(isolate, sign_bit, word_count, words); - has_pending_exception = result.is_null(); - RETURN_ON_FAILED_EXECUTION(BigInt); - RETURN_ESCAPED(Utils::ToLocal(result.ToHandleChecked())); -} - -uint64_t v8::BigInt::Uint64Value(bool* lossless) const { - i::Handle handle = Utils::OpenHandle(this); - return handle->AsUint64(lossless); -} - -int64_t v8::BigInt::Int64Value(bool* lossless) const { - i::Handle handle = Utils::OpenHandle(this); - return handle->AsInt64(lossless); -} - -int BigInt::WordCount() const { - i::Handle handle = Utils::OpenHandle(this); - return handle->Words64Count(); -} - -void BigInt::ToWordsArray(int* sign_bit, int* word_count, - uint64_t* words) const { - i::Handle handle = Utils::OpenHandle(this); - return handle->ToWordsArray64(sign_bit, word_count, words); -} - void Isolate::ReportExternalAllocationLimitReached() { i::Heap* heap = reinterpret_cast(this)->heap(); if (heap->gc_state() != i::Heap::NOT_IN_GC) return; @@ -8185,7 +8021,7 @@ HeapProfiler* Isolate::GetHeapProfiler() { CpuProfiler* Isolate::GetCpuProfiler() { i::CpuProfiler* cpu_profiler = - reinterpret_cast(this)->cpu_profiler(); + reinterpret_cast(this)->EnsureCpuProfiler(); return reinterpret_cast(cpu_profiler); } @@ -8363,22 +8199,22 @@ Isolate* Isolate::GetCurrent() { return reinterpret_cast(isolate); } - -Isolate* Isolate::New(const Isolate::CreateParams& params) { - i::Isolate* isolate = new i::Isolate(false); - return IsolateNewImpl(isolate, params); +// static +Isolate* Isolate::Allocate() { + return reinterpret_cast(new i::Isolate()); } +// static // This is separate so that tests can provide a different |isolate|. -Isolate* IsolateNewImpl(internal::Isolate* isolate, - const v8::Isolate::CreateParams& params) { - Isolate* v8_isolate = reinterpret_cast(isolate); +void Isolate::Initialize(Isolate* isolate, + const v8::Isolate::CreateParams& params) { + i::Isolate* i_isolate = reinterpret_cast(isolate); CHECK_NOT_NULL(params.array_buffer_allocator); - isolate->set_array_buffer_allocator(params.array_buffer_allocator); + i_isolate->set_array_buffer_allocator(params.array_buffer_allocator); if (params.snapshot_blob != nullptr) { - isolate->set_snapshot_blob(params.snapshot_blob); + i_isolate->set_snapshot_blob(params.snapshot_blob); } else { - isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob()); + i_isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob()); } if (params.entry_hook) { #ifdef V8_USE_SNAPSHOT @@ -8387,7 +8223,7 @@ Isolate* IsolateNewImpl(internal::Isolate* isolate, false, "v8::Isolate::New", "Setting a FunctionEntryHook is only supported in no-snapshot builds."); #endif - isolate->set_function_entry_hook(params.entry_hook); + i_isolate->set_function_entry_hook(params.entry_hook); } auto code_event_handler = params.code_event_handler; #ifdef ENABLE_GDB_JIT_INTERFACE @@ -8396,44 +8232,50 @@ Isolate* IsolateNewImpl(internal::Isolate* isolate, } #endif // ENABLE_GDB_JIT_INTERFACE if (code_event_handler) { - isolate->InitializeLoggingAndCounters(); - isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault, - code_event_handler); + i_isolate->InitializeLoggingAndCounters(); + i_isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault, + code_event_handler); } if (params.counter_lookup_callback) { - v8_isolate->SetCounterFunction(params.counter_lookup_callback); + isolate->SetCounterFunction(params.counter_lookup_callback); } if (params.create_histogram_callback) { - v8_isolate->SetCreateHistogramFunction(params.create_histogram_callback); + isolate->SetCreateHistogramFunction(params.create_histogram_callback); } if (params.add_histogram_sample_callback) { - v8_isolate->SetAddHistogramSampleFunction( + isolate->SetAddHistogramSampleFunction( params.add_histogram_sample_callback); } - isolate->set_api_external_references(params.external_references); - isolate->set_allow_atomics_wait(params.allow_atomics_wait); + i_isolate->set_api_external_references(params.external_references); + i_isolate->set_allow_atomics_wait(params.allow_atomics_wait); - SetResourceConstraints(isolate, params.constraints); + SetResourceConstraints(i_isolate, params.constraints); // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. - Isolate::Scope isolate_scope(v8_isolate); - if (params.entry_hook || !i::Snapshot::Initialize(isolate)) { + Isolate::Scope isolate_scope(isolate); + if (params.entry_hook || !i::Snapshot::Initialize(i_isolate)) { // If snapshot data was provided and we failed to deserialize it must // have been corrupted. - CHECK_NULL(isolate->snapshot_blob()); + CHECK_NULL(i_isolate->snapshot_blob()); base::ElapsedTimer timer; if (i::FLAG_profile_deserialization) timer.Start(); - isolate->Init(nullptr); + i_isolate->Init(nullptr); if (i::FLAG_profile_deserialization) { double ms = timer.Elapsed().InMillisecondsF(); i::PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms); } } - return v8_isolate; + i_isolate->set_only_terminate_in_safe_scope( + params.only_terminate_in_safe_scope); } +Isolate* Isolate::New(const Isolate::CreateParams& params) { + Isolate* isolate = Allocate(); + Initialize(isolate, params); + return isolate; +} void Isolate::Dispose() { i::Isolate* isolate = reinterpret_cast(this); @@ -8540,6 +8382,16 @@ Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() { isolate_->handle_scope_implementer()->DecrementCallDepth(); } +Isolate::SafeForTerminationScope::SafeForTerminationScope(v8::Isolate* isolate) + : isolate_(reinterpret_cast(isolate)), + prev_value_(isolate_->next_v8_call_is_safe_for_termination()) { + isolate_->set_next_v8_call_is_safe_for_termination(true); +} + +Isolate::SafeForTerminationScope::~SafeForTerminationScope() { + isolate_->set_next_v8_call_is_safe_for_termination(prev_value_); +} + i::Object** Isolate::GetDataFromSnapshotOnce(size_t index) { i::Isolate* i_isolate = reinterpret_cast(this); i::FixedArray* list = i_isolate->heap()->serialized_objects(); @@ -8635,6 +8487,8 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( code_statistics->code_and_metadata_size_ = isolate->code_and_metadata_size(); code_statistics->bytecode_and_metadata_size_ = isolate->bytecode_and_metadata_size(); + code_statistics->external_script_source_size_ = + isolate->external_script_source_size(); return true; } @@ -8835,7 +8689,7 @@ int Isolate::ContextDisposedNotification(bool dependant_context) { if (!dependant_context) { // We left the current context, we can abort all running WebAssembly // compilations. - isolate->wasm_engine()->compilation_manager()->AbortAllJobs(); + isolate->wasm_engine()->AbortAllCompileJobs(); } // TODO(ahaas): move other non-heap activity out of the heap call. return isolate->heap()->NotifyContextDisposed(dependant_context); @@ -8898,7 +8752,8 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) { void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) { i::Isolate* isolate = reinterpret_cast(this); if (isolate->heap()->memory_allocator()->code_range()->valid()) { - *start = isolate->heap()->memory_allocator()->code_range()->start(); + *start = reinterpret_cast( + isolate->heap()->memory_allocator()->code_range()->start()); *length_in_bytes = isolate->heap()->memory_allocator()->code_range()->size(); } else { @@ -9095,8 +8950,7 @@ String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local obj) TryCatch try_catch(isolate); Local str; if (!obj->ToString(context).ToLocal(&str)) return; - i::Handle i_str = Utils::OpenHandle(*str); - length_ = v8::Utf8Length(*i_str, i_isolate); + length_ = str->Utf8Length(); str_ = i::NewArray(length_ + 1); str->WriteUtf8(str_); } @@ -9413,7 +9267,7 @@ bool debug::Script::GetPossibleBreakpoints( i::Handle script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM) { i::WasmSharedModuleData* shared = - i::WasmCompiledModule::cast(script->wasm_compiled_module())->shared(); + i::WasmModuleObject::cast(script->wasm_module_object())->shared(); return shared->GetPossibleBreakpoints(start, end, locations); } @@ -9462,7 +9316,7 @@ bool debug::Script::GetPossibleBreakpoints( int debug::Script::GetSourceOffset(const debug::Location& location) const { i::Handle script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM) { - return i::WasmCompiledModule::cast(script->wasm_compiled_module()) + return i::WasmModuleObject::cast(script->wasm_module_object()) ->shared() ->GetFunctionOffset(location.GetLineNumber()) + location.GetColumnNumber(); @@ -9522,6 +9376,10 @@ void debug::RemoveBreakpoint(Isolate* v8_isolate, BreakpointId id) { isolate->debug()->RemoveBreakpoint(id); } +v8::Platform* debug::GetCurrentPlatform() { + return i::V8::GetCurrentPlatform(); +} + debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) { CHECK(script->IsWasm()); return static_cast(script); @@ -9531,9 +9389,9 @@ int debug::WasmScript::NumFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmCompiledModule* compiled_module = - i::WasmCompiledModule::cast(script->wasm_compiled_module()); - i::wasm::WasmModule* module = compiled_module->shared()->module(); + i::WasmModuleObject* module_object = + i::WasmModuleObject::cast(script->wasm_module_object()); + i::wasm::WasmModule* module = module_object->shared()->module(); DCHECK_GE(i::kMaxInt, module->functions.size()); return static_cast(module->functions.size()); } @@ -9542,9 +9400,9 @@ int debug::WasmScript::NumImportedFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmCompiledModule* compiled_module = - i::WasmCompiledModule::cast(script->wasm_compiled_module()); - i::wasm::WasmModule* module = compiled_module->shared()->module(); + i::WasmModuleObject* module_object = + i::WasmModuleObject::cast(script->wasm_module_object()); + i::wasm::WasmModule* module = module_object->shared()->module(); DCHECK_GE(i::kMaxInt, module->num_imported_functions); return static_cast(module->num_imported_functions); } @@ -9554,9 +9412,9 @@ std::pair debug::WasmScript::GetFunctionRange( i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmCompiledModule* compiled_module = - i::WasmCompiledModule::cast(script->wasm_compiled_module()); - i::wasm::WasmModule* module = compiled_module->shared()->module(); + i::WasmModuleObject* module_object = + i::WasmModuleObject::cast(script->wasm_module_object()); + i::wasm::WasmModule* module = module_object->shared()->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); i::wasm::WasmFunction& func = module->functions[function_index]; @@ -9570,13 +9428,13 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmCompiledModule* compiled_module = - i::WasmCompiledModule::cast(script->wasm_compiled_module()); - i::wasm::WasmModule* module = compiled_module->shared()->module(); + i::WasmModuleObject* module_object = + i::WasmModuleObject::cast(script->wasm_module_object()); + i::wasm::WasmModule* module = module_object->shared()->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); i::wasm::WasmFunction& func = module->functions[function_index]; - i::SeqOneByteString* module_bytes = compiled_module->shared()->module_bytes(); + i::SeqOneByteString* module_bytes = module_object->shared()->module_bytes(); i::wasm::ModuleWireBytes wire_bytes( module_bytes->GetFlatContent().ToOneByteVector()); i::Vector function_bytes = wire_bytes.GetFunctionBytes(&func); @@ -9590,9 +9448,9 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction( i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmCompiledModule* compiled_module = - i::WasmCompiledModule::cast(script->wasm_compiled_module()); - return compiled_module->shared()->DisassembleFunction(function_index); + i::WasmModuleObject* module_object = + i::WasmModuleObject::cast(script->wasm_module_object()); + return module_object->shared()->DisassembleFunction(function_index); } debug::Location::Location(int line_number, int column_number) @@ -9951,15 +9809,7 @@ Local CpuProfileNode::GetFunctionName() const { const i::CodeEntry* entry = node->entry(); i::Handle name = isolate->factory()->InternalizeUtf8String(entry->name()); - if (!entry->has_name_prefix()) { - return ToApiHandle(name); - } else { - // We do not expect this to fail. Change this if it does. - i::Handle cons = isolate->factory()->NewConsString( - isolate->factory()->InternalizeUtf8String(entry->name_prefix()), - name).ToHandleChecked(); - return ToApiHandle(cons); - } + return ToApiHandle(name); } int debug::Coverage::BlockData::StartOffset() const { return block_->start; } @@ -10254,74 +10104,6 @@ void CpuProfiler::SetIdle(bool is_idle) { isolate->SetIdle(is_idle); } -uintptr_t CodeEvent::GetCodeStartAddress() { - return reinterpret_cast(this)->code_start_address; -} - -size_t CodeEvent::GetCodeSize() { - return reinterpret_cast(this)->code_size; -} - -Local CodeEvent::GetFunctionName() { - return ToApiHandle( - reinterpret_cast(this)->function_name); -} - -Local CodeEvent::GetScriptName() { - return ToApiHandle( - reinterpret_cast(this)->script_name); -} - -int CodeEvent::GetScriptLine() { - return reinterpret_cast(this)->script_line; -} - -int CodeEvent::GetScriptColumn() { - return reinterpret_cast(this)->script_column; -} - -CodeEventType CodeEvent::GetCodeType() { - return reinterpret_cast(this)->code_type; -} - -const char* CodeEvent::GetComment() { - return reinterpret_cast(this)->comment; -} - -const char* CodeEvent::GetCodeEventTypeName(CodeEventType code_event_type) { - switch (code_event_type) { - case kUnknownType: - return "Unknown"; -#define V(Name) \ - case k##Name##Type: \ - return #Name; - CODE_EVENTS_LIST(V) -#undef V - } - // The execution should never pass here - UNREACHABLE(); - // NOTE(mmarchini): Workaround to fix a compiler failure on GCC 4.9 - return "Unknown"; -} - -CodeEventHandler::CodeEventHandler(Isolate* isolate) { - internal_listener_ = - new i::ExternalCodeEventListener(reinterpret_cast(isolate)); -} - -CodeEventHandler::~CodeEventHandler() { - delete reinterpret_cast(internal_listener_); -} - -void CodeEventHandler::Enable() { - reinterpret_cast(internal_listener_) - ->StartListening(this); -} - -void CodeEventHandler::Disable() { - reinterpret_cast(internal_listener_) - ->StopListening(); -} static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) { return const_cast( @@ -10558,25 +10340,9 @@ void HeapProfiler::SetGetRetainerInfosCallback( } void HeapProfiler::SetBuildEmbedderGraphCallback( - LegacyBuildEmbedderGraphCallback callback) { - reinterpret_cast(this)->AddBuildEmbedderGraphCallback( - [](v8::Isolate* isolate, v8::EmbedderGraph* graph, void* data) { - reinterpret_cast(data)(isolate, - graph); - }, - reinterpret_cast(callback)); -} - -void HeapProfiler::AddBuildEmbedderGraphCallback( - BuildEmbedderGraphCallback callback, void* data) { - reinterpret_cast(this)->AddBuildEmbedderGraphCallback( - callback, data); -} - -void HeapProfiler::RemoveBuildEmbedderGraphCallback( - BuildEmbedderGraphCallback callback, void* data) { - reinterpret_cast(this)->RemoveBuildEmbedderGraphCallback( - callback, data); + BuildEmbedderGraphCallback callback) { + reinterpret_cast(this)->SetBuildEmbedderGraphCallback( + callback); } v8::Testing::StressType internal::Testing::stress_type_ = @@ -10804,8 +10570,7 @@ void InvokeAccessorGetterCallback( Isolate* isolate = reinterpret_cast(info.GetIsolate()); RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kAccessorGetterCallback); - Address getter_address = reinterpret_cast
(reinterpret_cast( - getter)); + Address getter_address = reinterpret_cast
(getter); VMState state(isolate); ExternalCallbackScope call_scope(isolate, getter_address); getter(property, info); @@ -10817,8 +10582,7 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, Isolate* isolate = reinterpret_cast(info.GetIsolate()); RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kInvokeFunctionCallback); - Address callback_address = - reinterpret_cast
(reinterpret_cast(callback)); + Address callback_address = reinterpret_cast
(callback); VMState state(isolate); ExternalCallbackScope call_scope(isolate, callback_address); callback(info); diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index c67de0482df66c..d1297c8f38c65a 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -31,10 +31,14 @@ template inline T ToCData(v8::internal::Object* obj) { STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); if (obj == v8::internal::Smi::kZero) return nullptr; return reinterpret_cast( - reinterpret_cast( - v8::internal::Foreign::cast(obj)->foreign_address())); + v8::internal::Foreign::cast(obj)->foreign_address()); } +template <> +inline v8::internal::Address ToCData(v8::internal::Object* obj) { + if (obj == v8::internal::Smi::kZero) return v8::internal::kNullAddress; + return v8::internal::Foreign::cast(obj)->foreign_address(); +} template inline v8::internal::Handle FromCData( @@ -42,9 +46,17 @@ inline v8::internal::Handle FromCData( STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); if (obj == nullptr) return handle(v8::internal::Smi::kZero, isolate); return isolate->factory()->NewForeign( - reinterpret_cast(reinterpret_cast(obj))); + reinterpret_cast(obj)); } +template <> +inline v8::internal::Handle FromCData( + v8::internal::Isolate* isolate, v8::internal::Address obj) { + if (obj == v8::internal::kNullAddress) { + return handle(v8::internal::Smi::kZero, isolate); + } + return isolate->factory()->NewForeign(obj); +} class ApiFunction { public: @@ -100,6 +112,7 @@ class RegisteredExtension { V(String, String) \ V(Symbol, Symbol) \ V(Script, JSFunction) \ + V(UnboundModuleScript, SharedFunctionInfo) \ V(UnboundScript, SharedFunctionInfo) \ V(Module, Module) \ V(Function, JSReceiver) \ @@ -114,7 +127,6 @@ class RegisteredExtension { V(Promise, JSPromise) \ V(Primitive, Object) \ V(PrimitiveArray, FixedArray) \ - V(BigInt, BigInt) \ V(ScriptOrModule, Script) class Utils { @@ -365,9 +377,6 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE) #undef MAKE_OPEN_HANDLE #undef OPEN_HANDLE_LIST -extern Isolate* IsolateNewImpl(internal::Isolate* isolate, - const Isolate::CreateParams& params); - namespace internal { class V8_EXPORT_PRIVATE DeferredHandles { diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 280e4ddfae7444..4c4eb00ec284ca 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -76,7 +76,7 @@ Address RelocInfo::target_address_address() { IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) || IsOffHeapTarget(rmode_)); if (Assembler::IsMovW(Memory::int32_at(pc_))) { - return reinterpret_cast
(pc_); + return pc_; } else { DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_))); return constant_pool_entry_address(); @@ -141,7 +141,7 @@ Address RelocInfo::target_internal_reference() { Address RelocInfo::target_internal_reference_address() { DCHECK(rmode_ == INTERNAL_REFERENCE); - return reinterpret_cast
(pc_); + return pc_; } void RelocInfo::set_wasm_code_table_entry(Address target, @@ -174,9 +174,9 @@ void RelocInfo::WipeOut() { IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsInternalReference(rmode_)); if (IsInternalReference(rmode_)) { - Memory::Address_at(pc_) = nullptr; + Memory::Address_at(pc_) = kNullAddress; } else { - Assembler::set_target_address_at(pc_, constant_pool_, nullptr); + Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); } } @@ -206,7 +206,7 @@ Operand Operand::Zero() { return Operand(static_cast(0)); } Operand::Operand(const ExternalReference& f) : rmode_(RelocInfo::EXTERNAL_REFERENCE) { - value_.immediate = reinterpret_cast(f.address()); + value_.immediate = static_cast(f.address()); } Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE) { @@ -298,6 +298,10 @@ void Assembler::deserialization_set_special_target_at( Memory::Address_at(constant_pool_entry) = target; } +int Assembler::deserialization_special_target_size(Address location) { + return kSpecialTargetSize; +} + void Assembler::deserialization_set_target_internal_reference_at( Address pc, Address target, RelocInfo::Mode mode) { Memory::Address_at(pc) = target; @@ -327,9 +331,8 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { IsMovT(Memory::int32_at(pc + kInstrSize))); Instruction* movw_instr = Instruction::At(pc); Instruction* movt_instr = Instruction::At(pc + kInstrSize); - return reinterpret_cast
( - (movt_instr->ImmedMovwMovtValue() << 16) | - movw_instr->ImmedMovwMovtValue()); + return static_cast
((movt_instr->ImmedMovwMovtValue() << 16) | + movw_instr->ImmedMovwMovtValue()); } else { // This is an mov / orr immediate load. Return the immediate. DCHECK(IsMovImmed(Memory::int32_at(pc)) && @@ -340,7 +343,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { Instr orr_instr_1 = instr_at(pc + kInstrSize); Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize); Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize); - Address ret = reinterpret_cast
( + Address ret = static_cast
( DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) | DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3)); return ret; @@ -367,7 +370,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, DCHECK(IsMovW(Memory::int32_at(pc))); DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); uint32_t* instr_ptr = reinterpret_cast(pc); - uint32_t immediate = reinterpret_cast(target); + uint32_t immediate = static_cast(target); instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF); instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16); DCHECK(IsMovW(Memory::int32_at(pc))); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 87acd59e84e997..da5e4663450a98 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -374,7 +374,7 @@ Address RelocInfo::js_to_wasm_address() const { Operand::Operand(Handle handle) { rm_ = no_reg; - value_.immediate = reinterpret_cast(handle.address()); + value_.immediate = static_cast(handle.address()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } @@ -491,7 +491,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { object = request.code_stub()->GetCode(); break; } - Address pc = buffer_ + request.offset(); + Address pc = reinterpret_cast
(buffer_) + request.offset(); Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) = object.address(); } @@ -5152,7 +5152,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here - RelocInfo rinfo(pc_, rmode, data, nullptr); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, nullptr); reloc_info_writer.Write(&rinfo); } @@ -5191,7 +5191,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, value != 0) { // Sharing entries here relies on canonicalized handles - without them, we // will miss the optimisation opportunity. - Address handle_address = reinterpret_cast
(value); + Address handle_address = static_cast
(value); auto existing = handle_to_index_map_.find(handle_address); if (existing != handle_to_index_map_.end()) { int index = existing->second; @@ -5476,9 +5476,7 @@ PatchingAssembler::~PatchingAssembler() { DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_); } -void PatchingAssembler::Emit(Address addr) { - emit(reinterpret_cast(addr)); -} +void PatchingAssembler::Emit(Address addr) { emit(static_cast(addr)); } UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) : assembler_(assembler), diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index bbe0ba753e8639..4a424ccea22ee7 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -689,6 +689,9 @@ class Assembler : public AssemblerBase { inline static void deserialization_set_special_target_at( Address constant_pool_entry, Code* code, Address target); + // Get the size of the special target encoded at 'location'. + inline static int deserialization_special_target_size(Address location); + // This sets the internal reference at the pc. inline static void deserialization_set_target_internal_reference_at( Address pc, Address target, @@ -1496,8 +1499,8 @@ class Assembler : public AssemblerBase { void instr_at_put(int pos, Instr instr) { *reinterpret_cast(buffer_ + pos) = instr; } - static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } - static void instr_at_put(byte* pc, Instr instr) { + static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } + static void instr_at_put(Address pc, Instr instr) { *reinterpret_cast(pc) = instr; } static Condition GetCondition(Instr instr); diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 814d341f7e55db..8267da47030360 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -17,6 +17,7 @@ #include "src/ic/ic.h" #include "src/ic/stub-cache.h" #include "src/isolate.h" +#include "src/objects/api-callbacks.h" #include "src/objects/regexp-match-info.h" #include "src/regexp/jsregexp.h" #include "src/regexp/regexp-macro-assembler.h" @@ -38,355 +39,11 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kNewArray); } - -void DoubleToIStub::Generate(MacroAssembler* masm) { - Label negate, done; - Register result_reg = destination(); - - UseScratchRegisterScope temps(masm); - Register double_low = GetRegisterThatIsNotOneOf(result_reg); - Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low); - LowDwVfpRegister double_scratch = temps.AcquireLowD(); - - // Save the old values from these temporary registers on the stack. - __ Push(double_high, double_low); - - // Account for saved regs. - const int kArgumentOffset = 2 * kPointerSize; - - // Load double input. - __ vldr(double_scratch, MemOperand(sp, kArgumentOffset)); - __ vmov(double_low, double_high, double_scratch); - // Try to convert with a FPU convert instruction. This handles all - // non-saturating cases. - __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done); - - Register scratch = temps.Acquire(); - __ Ubfx(scratch, double_high, HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // Load scratch with exponent - 1. This is faster than loading - // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. - STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); - __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); - // If exponent is greater than or equal to 84, the 32 less significant - // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), - // the result is 0. - // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is - // greater than this, the conversion is out of range, so return zero. - __ cmp(scratch, Operand(83)); - __ mov(result_reg, Operand::Zero(), LeaveCC, ge); - __ b(ge, &done); - - // If we reach this code, 30 <= exponent <= 83. - // `TryInlineTruncateDoubleToI` above will have truncated any double with an - // exponent lower than 30. - if (masm->emit_debug_code()) { - // Scratch is exponent - 1. - __ cmp(scratch, Operand(30 - 1)); - __ Check(ge, AbortReason::kUnexpectedValue); - } - - // We don't have to handle cases where 0 <= exponent <= 20 for which we would - // need to shift right the high part of the mantissa. - // Scratch contains exponent - 1. - // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). - __ rsb(scratch, scratch, Operand(51), SetCC); - - // 52 <= exponent <= 83, shift only double_low. - // On entry, scratch contains: 52 - exponent. - __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls); - __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls); - __ b(ls, &negate); - - // 21 <= exponent <= 51, shift double_low and double_high - // to generate the result. - __ mov(double_low, Operand(double_low, LSR, scratch)); - // Scratch contains: 52 - exponent. - // We needs: exponent - 20. - // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. - __ rsb(scratch, scratch, Operand(32)); - __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord); - // Set the implicit 1 before the mantissa part in double_high. - __ orr(result_reg, result_reg, - Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch)); - - __ bind(&negate); - // If input was positive, double_high ASR 31 equals 0 and - // double_high LSR 31 equals zero. - // New result = (result eor 0) + 0 = result. - // If the input was negative, we have to negate the result. - // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1. - // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result. - __ eor(result_reg, result_reg, Operand(double_high, ASR, 31)); - __ add(result_reg, result_reg, Operand(double_high, LSR, 31)); - - __ bind(&done); - - // Restore registers corrupted in this routine and return. - __ Pop(double_high, double_low); - __ Ret(); -} - - -void MathPowStub::Generate(MacroAssembler* masm) { - const Register exponent = MathPowTaggedDescriptor::exponent(); - DCHECK(exponent == r2); - const LowDwVfpRegister double_base = d0; - const LowDwVfpRegister double_exponent = d1; - const LowDwVfpRegister double_result = d2; - const LowDwVfpRegister double_scratch = d3; - const SwVfpRegister single_scratch = s6; - const Register scratch = r9; - const Register scratch2 = r4; - - Label call_runtime, done, int_exponent; - - // Detect integer exponents stored as double. - __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch); - __ b(eq, &int_exponent); - - __ push(lr); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(0, 2); - __ MovToFloatParameters(double_base, double_exponent); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 0, 2); - } - __ pop(lr); - __ MovFromFloatResult(double_result); - __ b(&done); - - // Calculate power with integer exponent. - __ bind(&int_exponent); - - // Get two copies of exponent in the registers scratch and exponent. - // Exponent has previously been stored into scratch as untagged integer. - __ mov(exponent, scratch); - - __ vmov(double_scratch, double_base); // Back up base. - __ vmov(double_result, Double(1.0), scratch2); - - // Get absolute value of exponent. - __ cmp(scratch, Operand::Zero()); - __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi); - - Label while_true; - __ bind(&while_true); - __ mov(scratch, Operand(scratch, LSR, 1), SetCC); - __ vmul(double_result, double_result, double_scratch, cs); - __ vmul(double_scratch, double_scratch, double_scratch, ne); - __ b(ne, &while_true); - - __ cmp(exponent, Operand::Zero()); - __ b(ge, &done); - __ vmov(double_scratch, Double(1.0), scratch); - __ vdiv(double_result, double_scratch, double_result); - // Test whether result is zero. Bail out to check for subnormal result. - // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. - __ VFPCompareAndSetFlags(double_result, 0.0); - __ b(ne, &done); - // double_exponent may not containe the exponent value if the input was a - // smi. We set it with exponent value before bailing out. - __ vmov(single_scratch, exponent); - __ vcvt_f64_s32(double_exponent, single_scratch); - - // Returning or bailing out. - __ push(lr); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(0, 2); - __ MovToFloatParameters(double_base, double_exponent); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 0, 2); - } - __ pop(lr); - __ MovFromFloatResult(double_result); - - __ bind(&done); - __ Ret(); -} - -Movability CEntryStub::NeedsImmovableCode() { return kImmovable; } - void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { - CEntryStub::GenerateAheadOfTime(isolate); CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); StoreFastElementStub::GenerateAheadOfTime(isolate); } - -void CodeStub::GenerateFPStubs(Isolate* isolate) { - // Generate if not already in cache. - SaveFPRegsMode mode = kSaveFPRegs; - CEntryStub(isolate, 1, mode).GetCode(); -} - - -void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(isolate, 1, kDontSaveFPRegs); - stub.GetCode(); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function. - // r0: number of arguments including receiver - // r1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - // - // If argv_in_register(): - // r2: pointer to the first argument - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - __ mov(r5, Operand(r1)); - - if (argv_in_register()) { - // Move argv into the correct register. - __ mov(r1, Operand(r2)); - } else { - // Compute the argv pointer in a callee-saved register. - __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); - __ sub(r1, r1, Operand(kPointerSize)); - } - - // Enter the exit frame that transitions from JavaScript to C++. - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles(), 0, is_builtin_exit() - ? StackFrame::BUILTIN_EXIT - : StackFrame::EXIT); - - // Store a copy of argc in callee-saved registers for later. - __ mov(r4, Operand(r0)); - - // r0, r4: number of arguments including receiver (C callee-saved) - // r1: pointer to the first argument (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - -#if V8_HOST_ARCH_ARM - int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - int frame_alignment_mask = frame_alignment - 1; - if (FLAG_debug_code) { - if (frame_alignment > kPointerSize) { - Label alignment_as_expected; - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - __ tst(sp, Operand(frame_alignment_mask)); - __ b(eq, &alignment_as_expected); - // Don't use Check here, as it will call Runtime_Abort re-entering here. - __ stop("Unexpected alignment"); - __ bind(&alignment_as_expected); - } - } -#endif - - // Call C built-in. - // r0 = argc, r1 = argv, r2 = isolate - __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); - - // To let the GC traverse the return address of the exit frames, we need to - // know where the return address is. The CEntryStub is unmovable, so - // we can store the address on the stack to be able to find it again and - // we never have to restore it, because it will not change. - // Compute the return address in lr to return to after the jump below. Pc is - // already at '+ 8' from the current instruction but return is after three - // instructions so add another 4 to pc to get the return address. - { - // Prevent literal pool emission before return address. - Assembler::BlockConstPoolScope block_const_pool(masm); - __ add(lr, pc, Operand(4)); - __ str(lr, MemOperand(sp)); - __ Call(r5); - } - - // Result returned in r0 or r1:r0 - do not destroy these registers! - - // Check result for exception sentinel. - Label exception_returned; - __ CompareRoot(r0, Heap::kExceptionRootIndex); - __ b(eq, &exception_returned); - - // Check that there is no pending exception, otherwise we - // should have returned the exception sentinel. - if (FLAG_debug_code) { - Label okay; - ExternalReference pending_exception_address( - IsolateAddressId::kPendingExceptionAddress, isolate()); - __ mov(r3, Operand(pending_exception_address)); - __ ldr(r3, MemOperand(r3)); - __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); - // Cannot use check here as it attempts to generate call into runtime. - __ b(eq, &okay); - __ stop("Unexpected pending exception"); - __ bind(&okay); - } - - // Exit C frame and return. - // r0:r1: result - // sp: stack pointer - // fp: frame pointer - Register argc = argv_in_register() - // We don't want to pop arguments so set argc to no_reg. - ? no_reg - // Callee-saved register r4 still holds argc. - : r4; - __ LeaveExitFrame(save_doubles(), argc); - __ mov(pc, lr); - - // Handling of exception. - __ bind(&exception_returned); - - ExternalReference pending_handler_context_address( - IsolateAddressId::kPendingHandlerContextAddress, isolate()); - ExternalReference pending_handler_entrypoint_address( - IsolateAddressId::kPendingHandlerEntrypointAddress, isolate()); - ExternalReference pending_handler_fp_address( - IsolateAddressId::kPendingHandlerFPAddress, isolate()); - ExternalReference pending_handler_sp_address( - IsolateAddressId::kPendingHandlerSPAddress, isolate()); - - // Ask the runtime for help to determine the handler. This will set r0 to - // contain the current pending exception, don't clobber it. - ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler, - isolate()); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(3, 0); - __ mov(r0, Operand(0)); - __ mov(r1, Operand(0)); - __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); - __ CallCFunction(find_handler, 3); - } - - // Retrieve the handler context, SP and FP. - __ mov(cp, Operand(pending_handler_context_address)); - __ ldr(cp, MemOperand(cp)); - __ mov(sp, Operand(pending_handler_sp_address)); - __ ldr(sp, MemOperand(sp)); - __ mov(fp, Operand(pending_handler_fp_address)); - __ ldr(fp, MemOperand(fp)); - - // If the handler is a JS frame, restore the context to the frame. Note that - // the context will be set to (cp == 0) for non-JS frames. - __ cmp(cp, Operand(0)); - __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); - - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with - // both configurations. It is safe to always do this, because the underlying - // register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - - // Compute the handler entry address and jump to it. - ConstantPoolUnavailableScope constant_pool_unavailable(masm); - __ mov(r1, Operand(pending_handler_entrypoint_address)); - __ ldr(r1, MemOperand(r1)); - __ Jump(r1); -} - - void JSEntryStub::Generate(MacroAssembler* masm) { // r0: code entry // r1: function @@ -396,19 +53,23 @@ void JSEntryStub::Generate(MacroAssembler* masm) { Label invoke, handler_entry, exit; - ProfileEntryHookStub::MaybeCallEntryHook(masm); + { + NoRootArrayScope no_root_array(masm); + + ProfileEntryHookStub::MaybeCallEntryHook(masm); - // Called from C, so do not pop argc and args on exit (preserve sp) - // No need to save register-passed args - // Save callee-saved registers (incl. cp and fp), sp, and lr - __ stm(db_w, sp, kCalleeSaved | lr.bit()); + // Called from C, so do not pop argc and args on exit (preserve sp) + // No need to save register-passed args + // Save callee-saved registers (incl. cp and fp), sp, and lr + __ stm(db_w, sp, kCalleeSaved | lr.bit()); - // Save callee-saved vfp registers. - __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - // Set up the reserved register for 0.0. - __ vmov(kDoubleRegZero, Double(0.0)); + // Save callee-saved vfp registers. + __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); + // Set up the reserved register for 0.0. + __ vmov(kDoubleRegZero, Double(0.0)); - __ InitializeRootRegister(); + __ InitializeRootRegister(); + } // Get address of argv, see stm above. // r0: code entry @@ -430,8 +91,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { StackFrame::Type marker = type(); __ mov(r7, Operand(StackFrame::TypeToMarker(marker))); __ mov(r6, Operand(StackFrame::TypeToMarker(marker))); - __ mov(r5, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + __ mov(r5, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate()))); __ ldr(r5, MemOperand(r5)); { UseScratchRegisterScope temps(masm); @@ -449,7 +110,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; - ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate()); + ExternalReference js_entry_sp = + ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate()); __ mov(r5, Operand(ExternalReference(js_entry_sp))); __ ldr(scratch, MemOperand(r5)); __ cmp(scratch, Operand::Zero()); @@ -479,8 +141,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. __ mov(scratch, - Operand(ExternalReference(IsolateAddressId::kPendingExceptionAddress, - isolate()))); + Operand(ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, isolate()))); } __ str(r0, MemOperand(scratch)); __ LoadRoot(r0, Heap::kExceptionRootIndex); @@ -523,8 +185,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Restore the top frame descriptors from the stack. __ pop(r3); - __ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + __ mov(scratch, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate()))); __ str(r3, MemOperand(scratch)); // Reset the stack to the callee saved registers. @@ -638,9 +300,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); - __ mov(scratch, - Operand(ExternalReference( - &dispatcher, ExternalReference::BUILTIN_CALL, isolate()))); + __ mov(scratch, Operand(ExternalReference::Create( + &dispatcher, ExternalReference::BUILTIN_CALL))); #endif __ Call(scratch); } @@ -849,7 +510,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ add(r0, r0, Operand(3)); __ Push(r3, r2); - __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); + __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray)); } @@ -982,8 +643,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ PushSafepointRegisters(); __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); - __ CallCFunction(ExternalReference::log_enter_external_function(isolate), - 1); + __ CallCFunction(ExternalReference::log_enter_external_function(), 1); __ PopSafepointRegisters(); } @@ -998,8 +658,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ PushSafepointRegisters(); __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); - __ CallCFunction(ExternalReference::log_leave_external_function(isolate), - 1); + __ CallCFunction(ExternalReference::log_leave_external_function(), 1); __ PopSafepointRegisters(); } @@ -1054,8 +713,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ mov(r4, r0); __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); - __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), - 1); + __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); __ mov(r0, r4); __ jmp(&leave_exit_frame); } @@ -1131,8 +789,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { __ mov(scratch0, Operand(argc())); __ str(scratch0, MemOperand(r0, 2 * kPointerSize)); - ExternalReference thunk_ref = - ExternalReference::invoke_function_callback(masm->isolate()); + ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); AllowExternalCallThatCantCauseGC scope(masm); // Stores return the first js argument @@ -1194,7 +851,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo& ExternalReference thunk_ref = - ExternalReference::invoke_accessor_getter_callback(isolate()); + ExternalReference::invoke_accessor_getter_callback(); __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); __ ldr(api_function_address, diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 4e52a91738398f..02d1c6b1dd2d98 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -666,7 +666,7 @@ class Instruction { // reference to an instruction is to convert a pointer. There is no way // to allocate or create instances of class Instruction. // Use the At(pc) function to create references to Instruction. - static Instruction* At(byte* pc) { + static Instruction* At(Address pc) { return reinterpret_cast(pc); } diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index a4a540512d9dc2..73131d7d1843c2 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -58,7 +58,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { { UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); - __ mov(scratch, Operand(ExternalReference( + __ mov(scratch, Operand(ExternalReference::Create( IsolateAddressId::kCEntryFPAddress, isolate()))); __ str(fp, MemOperand(scratch)); } @@ -95,7 +95,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Call Deoptimizer::New(). { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); } // Preserve "deoptimizer" object in register r0 and get the input @@ -164,8 +164,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Call Deoptimizer::ComputeOutputFrames(). { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate()), 1); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); } __ pop(r0); // Restore deoptimizer object (class Deoptimizer). diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 9459a7e60de85a..5dab458889624c 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -2592,7 +2592,7 @@ int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { // Disassemble the instruction at *instr_ptr into the output buffer. int Decoder::InstructionDecode(byte* instr_ptr) { - Instruction* instr = Instruction::At(instr_ptr); + Instruction* instr = Instruction::At(reinterpret_cast
(instr_ptr)); // Print raw instruction bytes. out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ", diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc index 20ecef6c1cc5d7..a40b323d831fd5 100644 --- a/deps/v8/src/arm/interface-descriptors-arm.cc +++ b/deps/v8/src/arm/interface-descriptors-arm.cc @@ -34,7 +34,7 @@ void RecordWriteDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(kParameterCount, default_stub_registers); } -const Register FastNewFunctionContextDescriptor::FunctionRegister() { +const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { return r1; } const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r0; } @@ -254,12 +254,6 @@ void BinaryOpDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void StringAddDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r1, r0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - void ArgumentAdaptorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { static PlatformInterfaceDescriptor default_descriptor = diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index e363e0ecfe9313..5a013da14104e5 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -11,7 +11,9 @@ #include "src/base/division-by-constant.h" #include "src/base/utils/random-number-generator.h" #include "src/bootstrapper.h" +#include "src/builtins/constants-table-builder.h" #include "src/callable.h" +#include "src/code-factory.h" #include "src/code-stubs.h" #include "src/counters.h" #include "src/debug/debug.h" @@ -22,6 +24,7 @@ #include "src/objects-inl.h" #include "src/register-configuration.h" #include "src/runtime/runtime.h" +#include "src/snapshot/serializer-common.h" #include "src/arm/macro-assembler-arm.h" @@ -30,14 +33,24 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size, CodeObjectRequired create_code_object) - : TurboAssembler(isolate, buffer, size, create_code_object) {} + : TurboAssembler(isolate, buffer, size, create_code_object) { + if (create_code_object == CodeObjectRequired::kYes) { + // Unlike TurboAssembler, which can be used off the main thread and may not + // allocate, macro assembler creates its own copy of the self-reference + // marker in order to disambiguate between self-references during nested + // code generation (e.g.: codegen of the current object triggers stub + // compilation through CodeStub::GetCode()). + code_object_ = Handle::New( + *isolate->factory()->NewSelfReferenceMarker(), isolate); + } +} TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size, CodeObjectRequired create_code_object) : Assembler(isolate, buffer, buffer_size), isolate_(isolate) { if (create_code_object == CodeObjectRequired::kYes) { - code_object_ = - Handle::New(isolate->heap()->undefined_value(), isolate); + code_object_ = Handle::New( + isolate->heap()->self_reference_marker(), isolate); } } @@ -122,6 +135,74 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } +#ifdef V8_EMBEDDED_BUILTINS +void TurboAssembler::LookupConstant(Register destination, + Handle object) { + CHECK(isolate()->ShouldLoadConstantsFromRootList()); + CHECK(root_array_available_); + + // Ensure the given object is in the builtins constants table and fetch its + // index. + BuiltinsConstantsTableBuilder* builder = + isolate()->builtins_constants_table_builder(); + uint32_t index = builder->AddObject(object); + + // TODO(jgruber): Load builtins from the builtins table. + // TODO(jgruber): Ensure that code generation can recognize constant targets + // in kArchCallCodeObject. + + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( + Heap::kBuiltinsConstantsTableRootIndex)); + + // The ldr call below could end up clobbering the destination register when + // the offset does not fit into 12 bits (and thus needs to be loaded from the + // constant pool). In that case, we need to be extra-careful and temporarily + // use another register as the target. + + const uint32_t offset = + FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag; + const bool could_clobber_ip = !is_uint12(offset) && destination == ip; + + Register reg = destination; + if (could_clobber_ip) { + Push(r7); + reg = r7; + } + + LoadRoot(reg, Heap::kBuiltinsConstantsTableRootIndex); + ldr(destination, MemOperand(reg, offset)); + + if (could_clobber_ip) { + DCHECK_EQ(reg, r7); + Pop(r7); + } +} + +void TurboAssembler::LookupExternalReference(Register destination, + ExternalReference reference) { + CHECK(reference.address() != + ExternalReference::roots_array_start(isolate()).address()); + CHECK(isolate()->ShouldLoadConstantsFromRootList()); + CHECK(root_array_available_); + + // Encode as an index into the external reference table stored on the isolate. + + ExternalReferenceEncoder encoder(isolate()); + ExternalReferenceEncoder::Value v = encoder.Encode(reference.address()); + CHECK(!v.is_from_api()); + uint32_t index = v.index(); + + // Generate code to load from the external reference table. + + int32_t roots_to_external_reference_offset = + Heap::roots_to_external_reference_table_offset() + + ExternalReferenceTable::OffsetOfEntry(index); + + ldr(destination, + MemOperand(kRootRegister, roots_to_external_reference_offset)); +} +#endif // V8_EMBEDDED_BUILTINS + void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); } void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, @@ -133,14 +214,24 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); - Jump(reinterpret_cast(target), rmode, cond); + Jump(static_cast(target), rmode, cond); } void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LookupConstant(scratch, code); + add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); + Jump(scratch, cond); + return; + } +#endif // V8_EMBEDDED_BUILTINS // 'code' is always generated ARM code, never THUMB code - Jump(reinterpret_cast(code.address()), rmode, cond); + Jump(static_cast(code.address()), rmode, cond); } int TurboAssembler::CallSize(Register target, Condition cond) { @@ -159,7 +250,7 @@ void TurboAssembler::Call(Register target, Condition cond) { int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode, Condition cond) { Instr mov_instr = cond | MOV | LeaveCC; - Operand mov_operand = Operand(reinterpret_cast(target), rmode); + Operand mov_operand = Operand(target, rmode); return kInstrSize + mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize; } @@ -203,7 +294,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, // blx ip // @ return address - mov(ip, Operand(reinterpret_cast(target), rmode)); + mov(ip, Operand(target, rmode)); blx(ip, cond); DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); @@ -221,6 +312,16 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { DCHECK(RelocInfo::IsCodeTarget(rmode)); +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + // Use ip directly instead of using UseScratchRegisterScope, as we do not + // preserve scratch registers across calls. + LookupConstant(ip, code); + add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + Call(ip, cond); + return; + } +#endif // V8_EMBEDDED_BUILTINS // 'code' is always generated ARM code, never THUMB code Call(code.address(), rmode, cond, mode); } @@ -261,9 +362,32 @@ void TurboAssembler::Push(Smi* smi) { void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } void TurboAssembler::Move(Register dst, Handle value) { +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + Heap::RootListIndex root_index; + if (!isolate()->heap()->IsRootHandle(value, &root_index)) { + LookupConstant(dst, value); + } else { + LoadRoot(dst, root_index); + } + return; + } +#endif // V8_EMBEDDED_BUILTINS mov(dst, Operand(value)); } +void TurboAssembler::Move(Register dst, ExternalReference reference) { +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() && + reference.address() != + ExternalReference::roots_array_start(isolate()).address()) { + LookupExternalReference(dst, reference); + return; + } +#endif // V8_EMBEDDED_BUILTINS + mov(dst, Operand(reference)); +} + void TurboAssembler::Move(Register dst, Register src, Condition cond) { if (dst != src) { mov(dst, src, LeaveCC, cond); @@ -539,8 +663,7 @@ void TurboAssembler::CallRecordWriteStub( Pop(slot_parameter); Pop(object_parameter); - Move(isolate_parameter, - Operand(ExternalReference::isolate_address(isolate()))); + Move(isolate_parameter, ExternalReference::isolate_address(isolate())); Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); Call(callable.code(), RelocInfo::CODE_TARGET); @@ -558,10 +681,12 @@ void MacroAssembler::RecordWrite(Register object, Register address, SmiCheck smi_check) { DCHECK(object != value); if (emit_debug_code()) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - ldr(scratch, MemOperand(address)); - cmp(scratch, value); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + ldr(scratch, MemOperand(address)); + cmp(scratch, value); + } Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } @@ -1100,7 +1225,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, mov(scratch, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(scratch); if (type == StackFrame::INTERNAL) { - mov(scratch, Operand(CodeObject())); + Move(scratch, CodeObject()); push(scratch); } } @@ -1137,15 +1262,15 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, mov(scratch, Operand::Zero()); str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } - mov(scratch, Operand(CodeObject())); + Move(scratch, CodeObject()); str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset)); // Save the frame pointer and the context in top. - mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + isolate())); str(fp, MemOperand(scratch)); - mov(scratch, - Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + Move(scratch, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); str(cp, MemOperand(scratch)); // Optionally save all double registers. @@ -1204,18 +1329,18 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, // Clear top frame. mov(r3, Operand::Zero()); - mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + isolate())); str(r3, MemOperand(scratch)); // Restore current context from top and clear it in debug mode. - mov(scratch, - Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + Move(scratch, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ldr(cp, MemOperand(scratch)); #ifdef DEBUG mov(r3, Operand(Context::kInvalidContext)); - mov(scratch, - Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + Move(scratch, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); str(r3, MemOperand(scratch)); #endif @@ -1372,14 +1497,21 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, const ParameterCount& actual) { Label skip_hook; - ExternalReference debug_hook_avtive = + ExternalReference debug_hook_active = ExternalReference::debug_hook_on_function_call_address(isolate()); - mov(r4, Operand(debug_hook_avtive)); + Move(r4, debug_hook_active); ldrsb(r4, MemOperand(r4)); cmp(r4, Operand(0)); b(eq, &skip_hook); { + // Load receiver to pass it later to DebugOnFunctionCall hook. + if (actual.is_reg()) { + mov(r4, actual.reg()); + } else { + mov(r4, Operand(actual.immediate())); + } + ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { @@ -1395,6 +1527,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, } Push(fun); Push(fun); + Push(r4); CallRuntime(Runtime::kDebugOnFunctionCall); Pop(fun); if (new_target.is_valid()) { @@ -1494,7 +1627,7 @@ void MacroAssembler::MaybeDropFrames() { // Check whether we need to drop frames to restart a function on the stack. ExternalReference restart_fp = ExternalReference::debug_restart_fp_address(isolate()); - mov(r1, Operand(restart_fp)); + Move(r1, restart_fp); ldr(r1, MemOperand(r1)); tst(r1, r1); Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, @@ -1508,8 +1641,8 @@ void MacroAssembler::PushStackHandler() { Push(Smi::kZero); // Padding. // Link the current handler as the next handler. - mov(r6, - Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate()))); + mov(r6, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress, + isolate()))); ldr(r5, MemOperand(r6)); push(r5); // Set this new handler as the current one. @@ -1522,8 +1655,8 @@ void MacroAssembler::PopStackHandler() { Register scratch = temps.Acquire(); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(r1); - mov(scratch, - Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate()))); + mov(scratch, Operand(ExternalReference::Create( + IsolateAddressId::kHandlerAddress, isolate()))); str(r1, MemOperand(scratch)); add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); } @@ -1639,8 +1772,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, b(lt, done); } -void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, - DwVfpRegister double_input) { +void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, + Register result, + DwVfpRegister double_input) { Label done; TryInlineTruncateDoubleToI(result, double_input, &done); @@ -1650,7 +1784,8 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. vstr(double_input, MemOperand(sp, 0)); - CallStubDelayed(new (zone) DoubleToIStub(nullptr, result)); + Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); + ldr(result, MemOperand(sp, 0)); add(sp, sp, Operand(kDoubleSize)); pop(lr); @@ -1666,8 +1801,10 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, // should remove this need and make the runtime routine entry code // smarter. mov(r0, Operand(f->nargs)); - mov(r1, Operand(ExternalReference(f, isolate()))); - CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles)); + Move(r1, ExternalReference::Create(f)); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); } void MacroAssembler::CallRuntime(const Runtime::Function* f, @@ -1685,9 +1822,10 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // should remove this need and make the runtime routine entry code // smarter. mov(r0, Operand(num_arguments)); - mov(r1, Operand(ExternalReference(f, isolate()))); - CEntryStub stub(isolate(), 1, save_doubles); - CallStub(&stub); + Move(r1, ExternalReference::Create(f)); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); } void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { @@ -1700,24 +1838,23 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { // smarter. mov(r0, Operand(function->nargs)); } - JumpToExternalReference(ExternalReference(fid, isolate())); + JumpToExternalReference(ExternalReference::Create(fid)); } void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { #if defined(__thumb__) // Thumb mode builtin. - DCHECK_EQ(reinterpret_cast(builtin.address()) & 1, 1); + DCHECK_EQ(builtin.address() & 1, 1); #endif - mov(r1, Operand(builtin)); - CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, - builtin_exit_frame); - Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + Move(r1, builtin); + Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, + kArgvOnStack, builtin_exit_frame); + Jump(code, RelocInfo::CODE_TARGET); } void MacroAssembler::JumpToInstructionStream(Address entry) { - mov(kOffHeapTrampolineRegister, - Operand(reinterpret_cast(entry), RelocInfo::OFF_HEAP_TARGET)); + mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Jump(kOffHeapTrampolineRegister); } @@ -1733,7 +1870,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK_GT(value, 0); if (FLAG_native_code_counters && counter->Enabled()) { - mov(scratch2, Operand(ExternalReference(counter))); + Move(scratch2, ExternalReference::Create(counter)); ldr(scratch1, MemOperand(scratch2)); add(scratch1, scratch1, Operand(value)); str(scratch1, MemOperand(scratch2)); @@ -1745,7 +1882,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK_GT(value, 0); if (FLAG_native_code_counters && counter->Enabled()) { - mov(scratch2, Operand(ExternalReference(counter))); + Move(scratch2, ExternalReference::Create(counter)); ldr(scratch1, MemOperand(scratch2)); sub(scratch1, scratch1, Operand(value)); str(scratch1, MemOperand(scratch2)); @@ -1965,7 +2102,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, void TurboAssembler::CheckFor32DRegs(Register scratch) { - mov(scratch, Operand(ExternalReference::cpu_features(isolate()))); + Move(scratch, ExternalReference::cpu_features()); ldr(scratch, MemOperand(scratch)); tst(scratch, Operand(1u << VFP32DREGS)); } @@ -2197,7 +2334,7 @@ void TurboAssembler::CallCFunction(ExternalReference function, int num_double_arguments) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - mov(scratch, Operand(function)); + Move(scratch, function); CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index af8b449de6dbe7..51ef552a921bba 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -28,9 +28,10 @@ constexpr Register kInterpreterDispatchTableRegister = r8; constexpr Register kJavaScriptCallArgCountRegister = r0; constexpr Register kJavaScriptCallCodeStartRegister = r2; constexpr Register kJavaScriptCallNewTargetRegister = r3; -constexpr Register kOffHeapTrampolineRegister = r6; +constexpr Register kOffHeapTrampolineRegister = ip; constexpr Register kRuntimeCallFunctionRegister = r1; constexpr Register kRuntimeCallArgCountRegister = r0; +constexpr Register kWasmInstanceRegister = r3; // ---------------------------------------------------------------------------- // Static helper functions @@ -320,6 +321,12 @@ class TurboAssembler : public Assembler { void AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift); +#ifdef V8_EMBEDDED_BUILTINS + void LookupConstant(Register destination, Handle object); + void LookupExternalReference(Register destination, + ExternalReference reference); +#endif // V8_EMBEDDED_BUILTINS + // Returns the size of a call in instructions. Note, the value returned is // only valid as long as no entries are added to the constant pool between // checking the call size and emitting the actual call. @@ -331,6 +338,7 @@ class TurboAssembler : public Assembler { int CallStubSize(); void CallStubDelayed(CodeStub* stub); + // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime. void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, SaveFPRegsMode save_doubles = kDontSaveFPRegs); @@ -465,6 +473,7 @@ class TurboAssembler : public Assembler { // Register move. May do nothing if the registers are identical. void Move(Register dst, Smi* smi); void Move(Register dst, Handle value); + void Move(Register dst, ExternalReference reference); void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, Condition cond = al) { @@ -520,8 +529,8 @@ class TurboAssembler : public Assembler { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. // Exits with 'result' holding the answer. - void TruncateDoubleToIDelayed(Zone* zone, Register result, - DwVfpRegister double_input); + void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, + DwVfpRegister double_input); // EABI variant for double arguments in use. bool use_eabi_hardfloat() { @@ -540,11 +549,17 @@ class TurboAssembler : public Assembler { void ResetSpeculationPoisonRegister(); + bool root_array_available() const { return root_array_available_; } + void set_root_array_available(bool v) { root_array_available_ = v; } + + protected: + // This handle will be patched with the code object on installation. + Handle code_object_; + private: bool has_frame_ = false; + bool root_array_available_ = true; Isolate* const isolate_; - // This handle will be patched with the code object on installation. - Handle code_object_; // Compare single values and then load the fpscr flags to a register. void VFPCompareAndLoadFlags(const SwVfpRegister src1, diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 6a735fcef6f8cd..e8eb4740900c21 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1666,17 +1666,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { case ExternalReference::BUILTIN_FP_FP_CALL: case ExternalReference::BUILTIN_COMPARE_CALL: PrintF("Call to host function at %p with args %f, %f", - static_cast(FUNCTION_ADDR(generic_target)), dval0, - dval1); + reinterpret_cast(FUNCTION_ADDR(generic_target)), + dval0, dval1); break; case ExternalReference::BUILTIN_FP_CALL: PrintF("Call to host function at %p with arg %f", - static_cast(FUNCTION_ADDR(generic_target)), dval0); + reinterpret_cast(FUNCTION_ADDR(generic_target)), + dval0); break; case ExternalReference::BUILTIN_FP_INT_CALL: PrintF("Call to host function at %p with args %f, %d", - static_cast(FUNCTION_ADDR(generic_target)), dval0, - ival); + reinterpret_cast(FUNCTION_ADDR(generic_target)), + dval0, ival); break; default: UNREACHABLE(); @@ -1803,8 +1804,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF( "Call to host function at %p " "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x", - static_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3, - arg4, arg5, arg6, arg7, arg8); + reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, + arg3, arg4, arg5, arg6, arg7, arg8); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } @@ -3651,6 +3652,8 @@ int VFPConversionSaturate(double val, bool unsigned_res) { int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer, VFPRoundingMode mode) { + // TODO(jkummerow): These casts are undefined behavior if the integral + // part of {val} does not fit into the destination type. int32_t result = unsigned_integer ? static_cast(val) : static_cast(val); @@ -5731,13 +5734,12 @@ void Simulator::Execute() { } } - -void Simulator::CallInternal(byte* entry) { +void Simulator::CallInternal(Address entry) { // Adjust JS-based stack limit to C-based stack limit. isolate_->stack_guard()->AdjustStackLimitForSimulator(); // Prepare to execute the code at entry - set_register(pc, reinterpret_cast(entry)); + set_register(pc, static_cast(entry)); // Put down marker for end of simulation. The simulator will stop simulation // when the PC reaches this value. By saving the "end simulation" value into // the LR the simulation stops when returning to this call point. @@ -5791,7 +5793,7 @@ void Simulator::CallInternal(byte* entry) { set_register(r11, r11_val); } -intptr_t Simulator::CallImpl(byte* entry, int argument_count, +intptr_t Simulator::CallImpl(Address entry, int argument_count, const intptr_t* arguments) { // Set up arguments @@ -5823,7 +5825,7 @@ intptr_t Simulator::CallImpl(byte* entry, int argument_count, return get_register(r0); } -intptr_t Simulator::CallFPImpl(byte* entry, double d0, double d1) { +intptr_t Simulator::CallFPImpl(Address entry, double d0, double d1) { if (use_eabi_hardfloat()) { set_d_register_from_double(0, d0); set_d_register_from_double(1, d1); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 46a84ff4b44ec4..6eb3cf6c6be595 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -148,9 +148,7 @@ class Simulator : public SimulatorBase { void set_pc(int32_t value); int32_t get_pc() const; - Address get_sp() const { - return reinterpret_cast
(static_cast(get_register(sp))); - } + Address get_sp() const { return static_cast
(get_register(sp)); } // Accessor to the internal simulator stack area. uintptr_t StackLimit(uintptr_t c_limit) const; @@ -159,13 +157,13 @@ class Simulator : public SimulatorBase { void Execute(); template - Return Call(byte* entry, Args... args) { + Return Call(Address entry, Args... args) { return VariadicCall(this, &Simulator::CallImpl, entry, args...); } // Alternative: call a 2-argument double function. template - Return CallFP(byte* entry, double d0, double d1) { + Return CallFP(Address entry, double d0, double d1) { return ConvertReturn(CallFPImpl(entry, d0, d1)); } @@ -212,9 +210,9 @@ class Simulator : public SimulatorBase { end_sim_pc = -2 }; - V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count, + V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, const intptr_t* arguments); - intptr_t CallFPImpl(byte* entry, double d0, double d1); + intptr_t CallFPImpl(Address entry, double d0, double d1); // Unsupported instructions use Format to print an error and stop execution. void Format(Instruction* instr, const char* format); @@ -344,7 +342,7 @@ class Simulator : public SimulatorBase { void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value); uint32_t GetFromSpecialRegister(SRegister reg); - void CallInternal(byte* entry); + void CallInternal(Address entry); // Architecture state. // Saturating instructions require a Q flag to indicate saturation. diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h index 52f552270a7d9c..0c43fbe0e1ee6c 100644 --- a/deps/v8/src/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/arm64/assembler-arm64-inl.h @@ -18,12 +18,20 @@ bool CpuFeatures::SupportsOptimizer() { return true; } bool CpuFeatures::SupportsWasmSimd128() { return true; } void RelocInfo::apply(intptr_t delta) { - // On arm64 only internal references need extra work. - DCHECK(RelocInfo::IsInternalReference(rmode_)); - - // Absolute code pointer inside code object moves with the code object. - intptr_t* p = reinterpret_cast(pc_); - *p += delta; // Relocate entry. + // On arm64 only internal references and immediate branches need extra work. + if (RelocInfo::IsInternalReference(rmode_)) { + // Absolute code pointer inside code object moves with the code object. + intptr_t* p = reinterpret_cast(pc_); + *p += delta; // Relocate entry. + } else { + Instruction* instr = reinterpret_cast(pc_); + if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { + Address old_target = + reinterpret_cast
(instr->ImmPCOffsetTarget()); + Address new_target = old_target - delta; + instr->SetBranchImmTarget(reinterpret_cast(new_target)); + } + } } @@ -222,7 +230,7 @@ struct ImmediateInitializer { return RelocInfo::EXTERNAL_REFERENCE; } static inline int64_t immediate_for(ExternalReference t) {; - return reinterpret_cast(t.address()); + return static_cast(t.address()); } }; @@ -348,6 +356,10 @@ int64_t Operand::ImmediateValue() const { return immediate_.value(); } +RelocInfo::Mode Operand::ImmediateRMode() const { + DCHECK(IsImmediate() || IsHeapObjectRequest()); + return immediate_.rmode(); +} Register Operand::reg() const { DCHECK(IsShiftedRegister() || IsExtendedRegister()); @@ -523,9 +535,39 @@ Address Assembler::target_pointer_address_at(Address pc) { // Read/Modify the code target address in the branch/call instruction at pc. Address Assembler::target_address_at(Address pc, Address constant_pool) { - return Memory::Address_at(target_pointer_address_at(pc)); + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + return Memory::Address_at(target_pointer_address_at(pc)); + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + return reinterpret_cast
(instr->ImmPCOffsetTarget()); + } } +Handle Assembler::code_target_object_handle_at(Address pc) { + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + return Handle(reinterpret_cast( + Assembler::target_address_at(pc, 0 /* unused */))); + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + DCHECK_GE(instr->ImmPCOffset(), 0); + DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0); + DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2, + code_targets_.size()); + return code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2]; + } +} + +Address Assembler::runtime_entry_at(Address pc) { + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + return Assembler::target_address_at(pc, 0 /* unused */); + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + return instr->ImmPCOffset() + isolate_data().code_range_start_; + } +} Address Assembler::target_address_from_return_address(Address pc) { // Returns the address of the call target from the return address that will @@ -540,45 +582,37 @@ Address Assembler::target_address_from_return_address(Address pc) { return candidate; } - -Address Assembler::return_address_from_call_start(Address pc) { - // The call, generated by MacroAssembler::Call, is one of two possible - // sequences: - // - // Without relocation: - // movz temp, #(target & 0x000000000000ffff) - // movk temp, #(target & 0x00000000ffff0000) - // movk temp, #(target & 0x0000ffff00000000) - // blr temp - // - // With relocation: - // ldr temp, =target - // blr temp - // - // The return address is immediately after the blr instruction in both cases, - // so it can be found by adding the call size to the address at the start of - // the call sequence. - STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize); - STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize); - - Instruction* instr = reinterpret_cast(pc); - if (instr->IsMovz()) { - // Verify the instruction sequence. - DCHECK(instr->following(1)->IsMovk()); - DCHECK(instr->following(2)->IsMovk()); - DCHECK(instr->following(3)->IsBranchAndLinkToRegister()); - return pc + Assembler::kCallSizeWithoutRelocation; +int Assembler::deserialization_special_target_size(Address location) { + Instruction* instr = reinterpret_cast(location); + if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { + return kSpecialTargetSize; } else { - // Verify the instruction sequence. - DCHECK(instr->IsLdrLiteralX()); - DCHECK(instr->following(1)->IsBranchAndLinkToRegister()); - return pc + Assembler::kCallSizeWithRelocation; + DCHECK_EQ(instr->InstructionBits(), 0); + return kPointerSize; } } -void Assembler::deserialization_set_special_target_at( - Address constant_pool_entry, Code* code, Address target) { - Memory::Address_at(constant_pool_entry) = target; +void Assembler::deserialization_set_special_target_at(Address location, + Code* code, + Address target) { + Instruction* instr = reinterpret_cast(location); + if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { + if (target == 0) { + // We are simply wiping the target out for serialization. Set the offset + // to zero instead. + target = location; + } + instr->SetBranchImmTarget(reinterpret_cast(target)); + Assembler::FlushICache(location, kInstructionSize); + } else { + DCHECK_EQ(instr->InstructionBits(), 0); + Memory::Address_at(location) = target; + // Intuitively, we would think it is necessary to always flush the + // instruction cache after patching a target address in the code. However, + // in this case, only the constant pool contents change. The instruction + // accessing the constant pool remains unchanged, so a flush is not + // required. + } } void Assembler::deserialization_set_target_internal_reference_at( @@ -589,20 +623,35 @@ void Assembler::deserialization_set_target_internal_reference_at( void Assembler::set_target_address_at(Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode) { - Memory::Address_at(target_pointer_address_at(pc)) = target; - // Intuitively, we would think it is necessary to always flush the - // instruction cache after patching a target address in the code as follows: - // Assembler::FlushICache(pc, sizeof(target)); - // However, on ARM, an instruction is actually patched in the case of - // embedded constants of the form: - // ldr ip, [pc, #...] - // since the instruction accessing this address in the constant pool remains - // unchanged, a flush is not required. + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + Memory::Address_at(target_pointer_address_at(pc)) = target; + // Intuitively, we would think it is necessary to always flush the + // instruction cache after patching a target address in the code. However, + // in this case, only the constant pool contents change. The instruction + // accessing the constant pool remains unchanged, so a flush is not + // required. + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + if (target == 0) { + // We are simply wiping the target out for serialization. Set the offset + // to zero instead. + target = pc; + } + instr->SetBranchImmTarget(reinterpret_cast(target)); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + Assembler::FlushICache(pc, kInstructionSize); + } + } } - int RelocInfo::target_address_size() { - return kPointerSize; + if (IsCodedSpecially()) { + return Assembler::kSpecialTargetSize; + } else { + DCHECK(reinterpret_cast(pc_)->IsLdrLiteralX()); + return kPointerSize; + } } @@ -615,7 +664,26 @@ Address RelocInfo::target_address_address() { DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) || IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) || IsOffHeapTarget(rmode_)); - return Assembler::target_pointer_address_at(pc_); + Instruction* instr = reinterpret_cast(pc_); + // Read the address of the word containing the target_address in an + // instruction stream. + // The only architecture-independent user of this function is the serializer. + // The serializer uses it to find out how many raw bytes of instruction to + // output before the next target. + // For an instruction like B/BL, where the target bits are mixed into the + // instruction bits, the size of the target will be zero, indicating that the + // serializer should not step forward in memory after a target is resolved + // and written. + // For LDR literal instructions, we can skip up to the constant pool entry + // address. We make sure that RelocInfo is ordered by the + // target_address_address so that we do not skip over any relocatable + // instruction sequences. + if (instr->IsLdrLiteralX()) { + return constant_pool_entry_address(); + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + return reinterpret_cast
(pc_); + } } @@ -631,9 +699,13 @@ HeapObject* RelocInfo::target_object() { } Handle RelocInfo::target_object_handle(Assembler* origin) { - DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return Handle(reinterpret_cast( - Assembler::target_address_at(pc_, constant_pool_))); + if (rmode_ == EMBEDDED_OBJECT) { + return Handle(reinterpret_cast( + Assembler::target_address_at(pc_, constant_pool_))); + } else { + DCHECK(IsCodeTarget(rmode_)); + return origin->code_target_object_handle_at(pc_); + } } void RelocInfo::set_target_object(HeapObject* target, @@ -671,7 +743,7 @@ Address RelocInfo::target_internal_reference() { Address RelocInfo::target_internal_reference_address() { DCHECK(rmode_ == INTERNAL_REFERENCE); - return reinterpret_cast
(pc_); + return pc_; } void RelocInfo::set_wasm_code_table_entry(Address target, @@ -683,7 +755,7 @@ void RelocInfo::set_wasm_code_table_entry(Address target, Address RelocInfo::target_runtime_entry(Assembler* origin) { DCHECK(IsRuntimeEntry(rmode_)); - return target_address(); + return origin->runtime_entry_at(pc_); } void RelocInfo::set_target_runtime_entry(Address target, @@ -705,9 +777,9 @@ void RelocInfo::WipeOut() { IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsInternalReference(rmode_)); if (IsInternalReference(rmode_)) { - Memory::Address_at(pc_) = nullptr; + Memory::Address_at(pc_) = kNullAddress; } else { - Assembler::set_target_address_at(pc_, constant_pool_, nullptr); + Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); } } diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc index fe81147d768ad5..121c15aac9ca6c 100644 --- a/deps/v8/src/arm64/assembler-arm64.cc +++ b/deps/v8/src/arm64/assembler-arm64.cc @@ -157,14 +157,20 @@ CPURegList CPURegList::GetSafepointSavedRegisters() { // ----------------------------------------------------------------------------- // Implementation of RelocInfo -const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; - +const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | + 1 << RelocInfo::RUNTIME_ENTRY | + 1 << RelocInfo::INTERNAL_REFERENCE; bool RelocInfo::IsCodedSpecially() { // The deserializer needs to know whether a pointer is specially coded. Being - // specially coded on ARM64 means that it is a movz/movk sequence. We don't - // generate those for relocatable pointers. - return false; + // specially coded on ARM64 means that it is an immediate branch. + Instruction* instr = reinterpret_cast(pc_); + if (instr->IsLdrLiteralX()) { + return false; + } else { + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + return true; + } } @@ -174,7 +180,7 @@ bool RelocInfo::IsInConstantPool() { } Address RelocInfo::embedded_address() const { - return Memory::Address_at(Assembler::target_pointer_address_at(pc_)); + return Assembler::target_address_at(pc_, constant_pool_); } uint32_t RelocInfo::embedded_size() const { @@ -290,7 +296,7 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, } void Immediate::InitializeHandle(Handle handle) { - value_ = reinterpret_cast(handle.address()); + value_ = static_cast(handle.address()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } @@ -570,8 +576,8 @@ void Assembler::Reset() { memset(buffer_, 0, pc_ - buffer_); #endif pc_ = buffer_; - reloc_info_writer.Reposition(reinterpret_cast(buffer_ + buffer_size_), - reinterpret_cast(pc_)); + code_targets_.reserve(64); + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); constpool_.Clear(); next_constant_pool_check_ = 0; next_veneer_pool_check_ = kMaxInt; @@ -580,19 +586,27 @@ void Assembler::Reset() { void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { for (auto& request : heap_object_requests_) { - Handle object; + Address pc = reinterpret_cast
(buffer_) + request.offset(); switch (request.kind()) { - case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - IMMUTABLE, TENURED); + case HeapObjectRequest::kHeapNumber: { + Handle object = isolate->factory()->NewHeapNumber( + request.heap_number(), IMMUTABLE, TENURED); + set_target_address_at(pc, 0 /* unused */, object.address()); break; - case HeapObjectRequest::kCodeStub: + } + case HeapObjectRequest::kCodeStub: { request.code_stub()->set_isolate(isolate); - object = request.code_stub()->GetCode(); + Instruction* instr = reinterpret_cast(pc); + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + DCHECK_GE(instr->ImmPCOffset(), 0); + DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0); + DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2, + code_targets_.size()); + code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2] = + request.code_stub()->GetCode(); break; + } } - Address pc = buffer_ + request.offset(); - Memory::Address_at(target_pointer_address_at(pc)) = object.address(); } } @@ -4722,7 +4736,7 @@ void Assembler::GrowBuffer() { DeleteArray(buffer_); buffer_ = desc.buffer; buffer_size_ = desc.buffer_size; - pc_ = reinterpret_cast(pc_) + pc_delta; + pc_ = pc_ + pc_delta; reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, reloc_info_writer.last_pc() + pc_delta); @@ -4739,13 +4753,13 @@ void Assembler::GrowBuffer() { // Pending relocation entries are also relative, no need to relocate. } - -void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, + ConstantPoolMode constant_pool_mode) { // Non-relocatable constants should not end up in the literal pool. DCHECK(!RelocInfo::IsNone(rmode)); // We do not try to reuse pool constants. - RelocInfo rinfo(reinterpret_cast(pc_), rmode, data, nullptr); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, nullptr); bool write_reloc_info = true; if ((rmode == RelocInfo::COMMENT) || @@ -4760,12 +4774,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { RelocInfo::IsInternalReference(rmode) || RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. - } else { + } else if (constant_pool_mode == NEEDS_POOL_ENTRY) { write_reloc_info = constpool_.RecordEntry(data, rmode); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); } + // For modes that cannot use the constant pool, a different sequence of + // instructions will be emitted by this function's caller. if (!RelocInfo::IsNone(rmode) && write_reloc_info) { // Don't record external references unless the heap will be serialized. @@ -4778,6 +4794,34 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } } +int Assembler::GetCodeTargetIndex(Handle target) { + int current = static_cast(code_targets_.size()); + if (current > 0 && !target.is_null() && + code_targets_.back().address() == target.address()) { + // Optimization if we keep jumping to the same code target. + return (current - 1); + } else { + code_targets_.push_back(target); + return current; + } +} + +void Assembler::near_jump(int offset, RelocInfo::Mode rmode) { + if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); + b(offset); +} + +void Assembler::near_call(int offset, RelocInfo::Mode rmode) { + if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); + bl(offset); +} + +void Assembler::near_call(HeapObjectRequest request) { + RequestHeapObject(request); + int index = GetCodeTargetIndex(Handle()); + RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY); + bl(index); +} void Assembler::BlockConstPoolFor(int instructions) { int pc_limit = pc_offset() + instructions * kInstructionSize; @@ -4858,8 +4902,8 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { void Assembler::RecordVeneerPool(int location_offset, int size) { - RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL, - static_cast(size), nullptr); + RelocInfo rinfo(reinterpret_cast
(buffer_) + location_offset, + RelocInfo::VENEER_POOL, static_cast(size), nullptr); reloc_info_writer.Write(&rinfo); } @@ -4965,8 +5009,7 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, int Assembler::buffer_space() const { - return static_cast(reloc_info_writer.pos() - - reinterpret_cast(pc_)); + return static_cast(reloc_info_writer.pos() - pc_); } @@ -5009,6 +5052,15 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) { add(rd, rd, scratch); } +void PatchingAssembler::PatchSubSp(uint32_t immediate) { + // The code at the current instruction should be: + // sub sp, sp, #0 + + // Verify the expected code. + Instruction* expected_adr = InstructionAt(0); + CHECK(expected_adr->IsAddSubImmediate()); + sub(sp, sp, immediate); +} } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h index 94122e3939119d..431b5208112845 100644 --- a/deps/v8/src/arm64/assembler-arm64.h +++ b/deps/v8/src/arm64/assembler-arm64.h @@ -124,10 +124,6 @@ class CPURegister : public RegisterBase { } RegisterType type() const { return reg_type_; } - RegList bit() const { - DCHECK(static_cast(reg_code_) < (sizeof(RegList) * kBitsPerByte)); - return IsValid() ? 1UL << reg_code_ : 0; - } int SizeInBits() const { DCHECK(IsValid()); return reg_size_; @@ -445,7 +441,7 @@ ALIAS_REGISTER(Register, ip1, x17); ALIAS_REGISTER(Register, wip0, w16); ALIAS_REGISTER(Register, wip1, w17); // Root register. -ALIAS_REGISTER(Register, root, x26); +ALIAS_REGISTER(Register, kRootRegister, x26); ALIAS_REGISTER(Register, rr, x26); // Context pointer register. ALIAS_REGISTER(Register, cp, x27); @@ -485,14 +481,11 @@ bool AreAliased(const CPURegister& reg1, // same size, and are of the same type. The system stack pointer may be // specified. Arguments set to NoReg are ignored, as are any subsequent // arguments. At least one argument (reg1) must be valid (not NoCPUReg). -bool AreSameSizeAndType(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoCPUReg, - const CPURegister& reg4 = NoCPUReg, - const CPURegister& reg5 = NoCPUReg, - const CPURegister& reg6 = NoCPUReg, - const CPURegister& reg7 = NoCPUReg, - const CPURegister& reg8 = NoCPUReg); +bool AreSameSizeAndType( + const CPURegister& reg1, const CPURegister& reg2 = NoCPUReg, + const CPURegister& reg3 = NoCPUReg, const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, const CPURegister& reg8 = NoCPUReg); // AreSameFormat returns true if all of the specified VRegisters have the same // vector format. Arguments set to NoVReg are ignored, as are any subsequent @@ -517,12 +510,12 @@ typedef VRegister Simd128Register; // Lists of registers. class CPURegList { public: - explicit CPURegList(CPURegister reg1, CPURegister reg2 = NoCPUReg, - CPURegister reg3 = NoCPUReg, CPURegister reg4 = NoCPUReg) - : list_(reg1.bit() | reg2.bit() | reg3.bit() | reg4.bit()), - size_(reg1.SizeInBits()), - type_(reg1.type()) { - DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + template + explicit CPURegList(CPURegister reg0, CPURegisters... regs) + : list_(CPURegister::ListOf(reg0, regs...)), + size_(reg0.SizeInBits()), + type_(reg0.type()) { + DCHECK(AreSameSizeAndType(reg0, regs...)); DCHECK(IsValid()); } @@ -646,8 +639,8 @@ class CPURegList { CPURegister::RegisterType type_; bool IsValid() const { - const RegList kValidRegisters = 0x8000000ffffffff; - const RegList kValidVRegisters = 0x0000000ffffffff; + constexpr RegList kValidRegisters{0x8000000ffffffff}; + constexpr RegList kValidVRegisters{0x0000000ffffffff}; switch (type_) { case CPURegister::kRegister: return (list_ & kValidRegisters) == list_; @@ -751,6 +744,7 @@ class Operand { inline Immediate immediate() const; inline int64_t ImmediateValue() const; + inline RelocInfo::Mode ImmediateRMode() const; inline Register reg() const; inline Shift shift() const; inline Extend extend() const; @@ -950,7 +944,22 @@ class Assembler : public AssemblerBase { // RelocInfo and pools ------------------------------------------------------ // Record relocation information for current pc_. - void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + enum ConstantPoolMode { NEEDS_POOL_ENTRY, NO_POOL_ENTRY }; + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0, + ConstantPoolMode constant_pool_mode = NEEDS_POOL_ENTRY); + + // Generate a B immediate instruction with the corresponding relocation info. + // 'offset' is the immediate to encode in the B instruction (so it is the + // difference between the target and the PC of the instruction, divided by + // the instruction size). + void near_jump(int offset, RelocInfo::Mode rmode); + // Generate a BL immediate instruction with the corresponding relocation info. + // As for near_jump, 'offset' is the immediate to encode in the BL + // instruction. + void near_call(int offset, RelocInfo::Mode rmode); + // Generate a BL immediate instruction with the corresponding relocation info + // for the input HeapObjectRequest. + void near_call(HeapObjectRequest request); // Return the address in the constant pool of the code target address used by // the branch/call instruction at pc. @@ -963,42 +972,59 @@ class Assembler : public AssemblerBase { Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + // Add 'target' to the code_targets_ vector, if necessary, and return the + // offset at which it is stored. + int GetCodeTargetIndex(Handle target); + + // Returns the handle for the code object called at 'pc'. + // This might need to be temporarily encoded as an offset into code_targets_. + inline Handle code_target_object_handle_at(Address pc); + + // Returns the target address for a runtime function for the call encoded + // at 'pc'. + // Runtime entries can be temporarily encoded as the offset between the + // runtime function entrypoint and the code range start (stored in the + // code_range_start_ field), in order to be encodable as we generate the code, + // before it is moved into the code space. + inline Address runtime_entry_at(Address pc); + // Return the code target address at a call site from the return address of // that call in the instruction stream. inline static Address target_address_from_return_address(Address pc); - // Given the address of the beginning of a call, return the address in the - // instruction stream that call will return from. - inline static Address return_address_from_call_start(Address pc); - - // This sets the branch destination (which is in the constant pool on ARM). + // This sets the branch destination. 'location' here can be either the pc of + // an immediate branch or the address of an entry in the constant pool. // This is for calls and branches within generated code. - inline static void deserialization_set_special_target_at( - Address constant_pool_entry, Code* code, Address target); + inline static void deserialization_set_special_target_at(Address location, + Code* code, + Address target); + + // Get the size of the special target encoded at 'location'. + inline static int deserialization_special_target_size(Address location); // This sets the internal reference at the pc. inline static void deserialization_set_target_internal_reference_at( Address pc, Address target, RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); - // All addresses in the constant pool are the same size as pointers. - static constexpr int kSpecialTargetSize = kPointerSize; + // This value is used in the serialization process and must be zero for + // ARM64, as the code target is split across multiple instructions and does + // not exist separately in the code, so the serializer should not step + // forwards in memory after a target is resolved and written. + static constexpr int kSpecialTargetSize = 0; // The sizes of the call sequences emitted by MacroAssembler::Call. // Wherever possible, use MacroAssembler::CallSize instead of these constants, // as it will choose the correct value for a given relocation mode. // - // Without relocation: - // movz temp, #(target & 0x000000000000ffff) - // movk temp, #(target & 0x00000000ffff0000) - // movk temp, #(target & 0x0000ffff00000000) - // blr temp + // A "near" call is encoded in a BL immediate instruction: + // bl target // - // With relocation: - // ldr temp, =target - // blr temp - static constexpr int kCallSizeWithoutRelocation = 4 * kInstructionSize; - static constexpr int kCallSizeWithRelocation = 2 * kInstructionSize; + // whereas a "far" call will be encoded like this: + // ldr temp, =target + // blr temp + static constexpr int kNearCallSize = 1 * kInstructionSize; + static constexpr int kFarCallSize = 2 * kInstructionSize; // Size of the generated code in bytes uint64_t SizeOfGeneratedCode() const { @@ -1021,7 +1047,7 @@ class Assembler : public AssemblerBase { // change things to be consistent. void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) { DCHECK_GE(size, 0); - DCHECK(static_cast(size) == SizeOfCodeGeneratedSince(label)); + DCHECK_EQ(static_cast(size), SizeOfCodeGeneratedSince(label)); } // Return the number of instructions generated from label to the @@ -3431,8 +3457,6 @@ class Assembler : public AssemblerBase { // Verify that a label's link chain is intact. void CheckLabelLinkChain(Label const * label); - void RecordLiteral(int64_t imm, unsigned size); - // Postpone the generation of the constant pool for the specified number of // instructions. void BlockConstPoolFor(int instructions); @@ -3522,6 +3546,14 @@ class Assembler : public AssemblerBase { // are already bound. std::deque internal_reference_positions_; + // Before we copy code into the code space, we cannot encode calls to code + // targets as we normally would, as the difference between the instruction's + // location in the temporary buffer and the call target is not guaranteed to + // fit in the offset field. We keep track of the code handles we encounter + // in calls in this vector, and encode the index of the code handle in the + // vector instead. + std::vector> code_targets_; + // Relocation info records are also used during code generation as temporary // containers for constants and code target addresses until they are emitted // to the constant pool. These pending relocation info records are temporarily @@ -3666,6 +3698,7 @@ class PatchingAssembler : public Assembler { static constexpr int kAdrFarPatchableNNops = 2; static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; void PatchAdrFar(int64_t target_offset); + void PatchSubSp(uint32_t immediate); }; diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc index c1cb3025a1fcef..cca4c302696ecb 100644 --- a/deps/v8/src/arm64/code-stubs-arm64.cc +++ b/deps/v8/src/arm64/code-stubs-arm64.cc @@ -16,6 +16,7 @@ #include "src/ic/ic.h" #include "src/ic/stub-cache.h" #include "src/isolate.h" +#include "src/objects/api-callbacks.h" #include "src/objects/regexp-match-info.h" #include "src/regexp/jsregexp.h" #include "src/regexp/regexp-macro-assembler.h" @@ -36,437 +37,13 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kNewArray); } - -void DoubleToIStub::Generate(MacroAssembler* masm) { - Label done; - Register result = destination(); - - DCHECK(result.Is64Bits()); - - UseScratchRegisterScope temps(masm); - Register scratch1 = temps.AcquireX(); - Register scratch2 = temps.AcquireX(); - DoubleRegister double_scratch = temps.AcquireD(); - - __ Peek(double_scratch, 0); - // Try to convert with a FPU convert instruction. This handles all - // non-saturating cases. - __ TryConvertDoubleToInt64(result, double_scratch, &done); - __ Fmov(result, double_scratch); - - // If we reach here we need to manually convert the input to an int32. - - // Extract the exponent. - Register exponent = scratch1; - __ Ubfx(exponent, result, HeapNumber::kMantissaBits, - HeapNumber::kExponentBits); - - // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since - // the mantissa gets shifted completely out of the int32_t result. - __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); - __ CzeroX(result, ge); - __ B(ge, &done); - - // The Fcvtzs sequence handles all cases except where the conversion causes - // signed overflow in the int64_t target. Since we've already handled - // exponents >= 84, we can guarantee that 63 <= exponent < 84. - - if (masm->emit_debug_code()) { - __ Cmp(exponent, HeapNumber::kExponentBias + 63); - // Exponents less than this should have been handled by the Fcvt case. - __ Check(ge, AbortReason::kUnexpectedValue); - } - - // Isolate the mantissa bits, and set the implicit '1'. - Register mantissa = scratch2; - __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); - __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); - - // Negate the mantissa if necessary. - __ Tst(result, kXSignMask); - __ Cneg(mantissa, mantissa, ne); - - // Shift the mantissa bits in the correct place. We know that we have to shift - // it left here, because exponent >= 63 >= kMantissaBits. - __ Sub(exponent, exponent, - HeapNumber::kExponentBias + HeapNumber::kMantissaBits); - __ Lsl(result, mantissa, exponent); - - __ Bind(&done); - __ Ret(); -} - - -void MathPowStub::Generate(MacroAssembler* masm) { - // Stack on entry: - // sp[0]: Exponent (as a tagged value). - // sp[1]: Base (as a tagged value). - // - // The (tagged) result will be returned in x0, as a heap number. - - Register exponent_tagged = MathPowTaggedDescriptor::exponent(); - DCHECK(exponent_tagged.is(x11)); - Register exponent_integer = MathPowIntegerDescriptor::exponent(); - DCHECK(exponent_integer.is(x12)); - Register saved_lr = x19; - VRegister result_double = d0; - VRegister base_double = d0; - VRegister exponent_double = d1; - VRegister base_double_copy = d2; - VRegister scratch1_double = d6; - VRegister scratch0_double = d7; - - // A fast-path for integer exponents. - Label exponent_is_smi, exponent_is_integer; - // Allocate a heap number for the result, and return it. - Label done; - - // Unpack the inputs. - - // Handle double (heap number) exponents. - // Detect integer exponents stored as doubles and handle those in the - // integer fast-path. - __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double, - scratch0_double, &exponent_is_integer); - - { - AllowExternalCallThatCantCauseGC scope(masm); - __ Mov(saved_lr, lr); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 0, 2); - __ Mov(lr, saved_lr); - __ B(&done); - } - - // Handle SMI exponents. - __ Bind(&exponent_is_smi); - // x10 base_tagged The tagged base (input). - // x11 exponent_tagged The tagged exponent (input). - // d1 base_double The base as a double. - __ SmiUntag(exponent_integer, exponent_tagged); - - __ Bind(&exponent_is_integer); - // x10 base_tagged The tagged base (input). - // x11 exponent_tagged The tagged exponent (input). - // x12 exponent_integer The exponent as an integer. - // d1 base_double The base as a double. - - // Find abs(exponent). For negative exponents, we can find the inverse later. - Register exponent_abs = x13; - __ Cmp(exponent_integer, 0); - __ Cneg(exponent_abs, exponent_integer, mi); - // x13 exponent_abs The value of abs(exponent_integer). - - // Repeatedly multiply to calculate the power. - // result = 1.0; - // For each bit n (exponent_integer{n}) { - // if (exponent_integer{n}) { - // result *= base; - // } - // base *= base; - // if (remaining bits in exponent_integer are all zero) { - // break; - // } - // } - Label power_loop, power_loop_entry, power_loop_exit; - __ Fmov(scratch1_double, base_double); - __ Fmov(base_double_copy, base_double); - __ Fmov(result_double, 1.0); - __ B(&power_loop_entry); - - __ Bind(&power_loop); - __ Fmul(scratch1_double, scratch1_double, scratch1_double); - __ Lsr(exponent_abs, exponent_abs, 1); - __ Cbz(exponent_abs, &power_loop_exit); - - __ Bind(&power_loop_entry); - __ Tbz(exponent_abs, 0, &power_loop); - __ Fmul(result_double, result_double, scratch1_double); - __ B(&power_loop); - - __ Bind(&power_loop_exit); - - // If the exponent was positive, result_double holds the result. - __ Tbz(exponent_integer, kXSignBit, &done); - - // The exponent was negative, so find the inverse. - __ Fmov(scratch0_double, 1.0); - __ Fdiv(result_double, scratch0_double, result_double); - // ECMA-262 only requires Math.pow to return an 'implementation-dependent - // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow - // to calculate the subnormal value 2^-1074. This method of calculating - // negative powers doesn't work because 2^1074 overflows to infinity. To - // catch this corner-case, we bail out if the result was 0. (This can only - // occur if the divisor is infinity or the base is zero.) - __ Fcmp(result_double, 0.0); - __ B(&done, ne); - - AllowExternalCallThatCantCauseGC scope(masm); - __ Mov(saved_lr, lr); - __ Fmov(base_double, base_double_copy); - __ Scvtf(exponent_double, exponent_integer); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 0, 2); - __ Mov(lr, saved_lr); - __ Bind(&done); - __ Ret(); -} - void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { // It is important that the following stubs are generated in this order // because pregenerated stubs can only call other pregenerated stubs. - CEntryStub::GenerateAheadOfTime(isolate); CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); StoreFastElementStub::GenerateAheadOfTime(isolate); } - -void CodeStub::GenerateFPStubs(Isolate* isolate) { - // Floating-point code doesn't get special handling in ARM64, so there's - // nothing to do here. - USE(isolate); -} - -Movability CEntryStub::NeedsImmovableCode() { - // CEntryStub stores the return address on the stack before calling into - // C++ code. In some cases, the VM accesses this address, but it is not used - // when the C++ code returns to the stub because LR holds the return address - // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up - // returning to dead code. - // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't - // find any comment to confirm this, and I don't hit any crashes whatever - // this function returns. The anaylsis should be properly confirmed. - return kImmovable; -} - - -void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(isolate, 1, kDontSaveFPRegs); - stub.GetCode(); - CEntryStub stub_fp(isolate, 1, kSaveFPRegs); - stub_fp.GetCode(); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // The Abort mechanism relies on CallRuntime, which in turn relies on - // CEntryStub, so until this stub has been generated, we have to use a - // fall-back Abort mechanism. - // - // Note that this stub must be generated before any use of Abort. - MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); - - ASM_LOCATION("CEntryStub::Generate entry"); - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // Register parameters: - // x0: argc (including receiver, untagged) - // x1: target - // If argv_in_register(): - // x11: argv (pointer to first argument) - // - // The stack on entry holds the arguments and the receiver, with the receiver - // at the highest address: - // - // sp]argc-1]: receiver - // sp[argc-2]: arg[argc-2] - // ... ... - // sp[1]: arg[1] - // sp[0]: arg[0] - // - // The arguments are in reverse order, so that arg[argc-2] is actually the - // first argument to the target function and arg[0] is the last. - const Register& argc_input = x0; - const Register& target_input = x1; - - // Calculate argv, argc and the target address, and store them in - // callee-saved registers so we can retry the call without having to reload - // these arguments. - // TODO(jbramley): If the first call attempt succeeds in the common case (as - // it should), then we might be better off putting these parameters directly - // into their argument registers, rather than using callee-saved registers and - // preserving them on the stack. - const Register& argv = x21; - const Register& argc = x22; - const Register& target = x23; - - // Derive argv from the stack pointer so that it points to the first argument - // (arg[argc-2]), or just below the receiver in case there are no arguments. - // - Adjust for the arg[] array. - Register temp_argv = x11; - if (!argv_in_register()) { - __ SlotAddress(temp_argv, x0); - // - Adjust for the receiver. - __ Sub(temp_argv, temp_argv, 1 * kPointerSize); - } - - // Reserve three slots to preserve x21-x23 callee-saved registers. - int extra_stack_space = 3; - // Enter the exit frame. - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame( - save_doubles(), x10, extra_stack_space, - is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); - - // Poke callee-saved registers into reserved space. - __ Poke(argv, 1 * kPointerSize); - __ Poke(argc, 2 * kPointerSize); - __ Poke(target, 3 * kPointerSize); - - // We normally only keep tagged values in callee-saved registers, as they - // could be pushed onto the stack by called stubs and functions, and on the - // stack they can confuse the GC. However, we're only calling C functions - // which can push arbitrary data onto the stack anyway, and so the GC won't - // examine that part of the stack. - __ Mov(argc, argc_input); - __ Mov(target, target_input); - __ Mov(argv, temp_argv); - - // x21 : argv - // x22 : argc - // x23 : call target - // - // The stack (on entry) holds the arguments and the receiver, with the - // receiver at the highest address: - // - // argv[8]: receiver - // argv -> argv[0]: arg[argc-2] - // ... ... - // argv[...]: arg[1] - // argv[...]: arg[0] - // - // Immediately below (after) this is the exit frame, as constructed by - // EnterExitFrame: - // fp[8]: CallerPC (lr) - // fp -> fp[0]: CallerFP (old fp) - // fp[-8]: Space reserved for SPOffset. - // fp[-16]: CodeObject() - // sp[...]: Saved doubles, if saved_doubles is true. - // sp[32]: Alignment padding, if necessary. - // sp[24]: Preserved x23 (used for target). - // sp[16]: Preserved x22 (used for argc). - // sp[8]: Preserved x21 (used for argv). - // sp -> sp[0]: Space reserved for the return address. - // - // After a successful call, the exit frame, preserved registers (x21-x23) and - // the arguments (including the receiver) are dropped or popped as - // appropriate. The stub then returns. - // - // After an unsuccessful call, the exit frame and suchlike are left - // untouched, and the stub either throws an exception by jumping to one of - // the exception_returned label. - - // Prepare AAPCS64 arguments to pass to the builtin. - __ Mov(x0, argc); - __ Mov(x1, argv); - __ Mov(x2, ExternalReference::isolate_address(isolate())); - - Label return_location; - __ Adr(x12, &return_location); - __ Poke(x12, 0); - - if (__ emit_debug_code()) { - // Verify that the slot below fp[kSPOffset]-8 points to the return location - // (currently in x12). - UseScratchRegisterScope temps(masm); - Register temp = temps.AcquireX(); - __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); - __ Ldr(temp, MemOperand(temp, -static_cast(kXRegSize))); - __ Cmp(temp, x12); - __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame); - } - - // Call the builtin. - __ Blr(target); - __ Bind(&return_location); - - // Result returned in x0 or x1:x0 - do not destroy these registers! - - // x0 result0 The return code from the call. - // x1 result1 For calls which return ObjectPair. - // x21 argv - // x22 argc - // x23 target - const Register& result = x0; - - // Check result for exception sentinel. - Label exception_returned; - __ CompareRoot(result, Heap::kExceptionRootIndex); - __ B(eq, &exception_returned); - - // The call succeeded, so unwind the stack and return. - - // Restore callee-saved registers x21-x23. - __ Mov(x11, argc); - - __ Peek(argv, 1 * kPointerSize); - __ Peek(argc, 2 * kPointerSize); - __ Peek(target, 3 * kPointerSize); - - __ LeaveExitFrame(save_doubles(), x10, x9); - if (!argv_in_register()) { - // Drop the remaining stack slots and return from the stub. - __ DropArguments(x11); - } - __ AssertFPCRState(); - __ Ret(); - - // Handling of exception. - __ Bind(&exception_returned); - - ExternalReference pending_handler_context_address( - IsolateAddressId::kPendingHandlerContextAddress, isolate()); - ExternalReference pending_handler_entrypoint_address( - IsolateAddressId::kPendingHandlerEntrypointAddress, isolate()); - ExternalReference pending_handler_fp_address( - IsolateAddressId::kPendingHandlerFPAddress, isolate()); - ExternalReference pending_handler_sp_address( - IsolateAddressId::kPendingHandlerSPAddress, isolate()); - - // Ask the runtime for help to determine the handler. This will set x0 to - // contain the current pending exception, don't clobber it. - ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler, - isolate()); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ Mov(x0, 0); // argc. - __ Mov(x1, 0); // argv. - __ Mov(x2, ExternalReference::isolate_address(isolate())); - __ CallCFunction(find_handler, 3); - } - - // Retrieve the handler context, SP and FP. - __ Mov(cp, Operand(pending_handler_context_address)); - __ Ldr(cp, MemOperand(cp)); - { - UseScratchRegisterScope temps(masm); - Register scratch = temps.AcquireX(); - __ Mov(scratch, Operand(pending_handler_sp_address)); - __ Ldr(scratch, MemOperand(scratch)); - __ Mov(sp, scratch); - } - __ Mov(fp, Operand(pending_handler_fp_address)); - __ Ldr(fp, MemOperand(fp)); - - // If the handler is a JS frame, restore the context to the frame. Note that - // the context will be set to (cp == 0) for non-JS frames. - Label not_js_frame; - __ Cbz(cp, ¬_js_frame); - __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ Bind(¬_js_frame); - - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with - // both configurations. It is safe to always do this, because the underlying - // register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - - // Compute the handler entry address and jump to it. - __ Mov(x10, Operand(pending_handler_entrypoint_address)); - __ Ldr(x10, MemOperand(x10)); - __ Br(x10); -} - // This is the entry point from C++. 5 arguments are provided in x0-x4. // See use of the JSEntryFunction for example in src/execution.cc. // Input: @@ -478,30 +55,35 @@ void CEntryStub::Generate(MacroAssembler* masm) { // Output: // x0: result. void JSEntryStub::Generate(MacroAssembler* masm) { + Label invoke, handler_entry, exit; + Register code_entry = x0; - // Enable instruction instrumentation. This only works on the simulator, and - // will have no effect on the model or real hardware. - __ EnableInstrumentation(); + { + NoRootArrayScope no_root_array(masm); - Label invoke, handler_entry, exit; + // Enable instruction instrumentation. This only works on the simulator, and + // will have no effect on the model or real hardware. + __ EnableInstrumentation(); - __ PushCalleeSavedRegisters(); + __ PushCalleeSavedRegisters(); - ProfileEntryHookStub::MaybeCallEntryHook(masm); + ProfileEntryHookStub::MaybeCallEntryHook(masm); - // Set up the reserved register for 0.0. - __ Fmov(fp_zero, 0.0); + // Set up the reserved register for 0.0. + __ Fmov(fp_zero, 0.0); - // Initialize the root array register - __ InitializeRootRegister(); + // Initialize the root array register + __ InitializeRootRegister(); + } // Build an entry frame (see layout below). StackFrame::Type marker = type(); int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. __ Mov(x13, bad_frame_pointer); __ Mov(x12, StackFrame::TypeToMarker(marker)); - __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())); + __ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + isolate())); __ Ldr(x10, MemOperand(x11)); __ Push(x13, x12, xzr, x10); @@ -511,8 +93,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Push the JS entry frame marker. Also set js_entry_sp if this is the // outermost JS call. Label non_outermost_js, done; - ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate()); - __ Mov(x10, ExternalReference(js_entry_sp)); + ExternalReference js_entry_sp = + ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate()); + __ Mov(x10, js_entry_sp); __ Ldr(x11, MemOperand(x10)); // Select between the inner and outermost frame marker, based on the JS entry @@ -552,7 +135,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushTryHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Mov(x10, Operand(ExternalReference( + __ Mov(x10, Operand(ExternalReference::Create( IsolateAddressId::kPendingExceptionAddress, isolate()))); } __ Str(code_entry, MemOperand(x10)); @@ -569,7 +152,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { "Unexpected offset for StackHandlerConstants::kNextOffset"); // Link the current handler as the next handler. - __ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate())); + __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress, + isolate())); __ Ldr(x10, MemOperand(x11)); __ Push(padreg, x10); @@ -602,7 +186,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize, "Unexpected offset for StackHandlerConstants::kNextOffset"); __ Pop(x10, padreg); - __ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate())); + __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress, + isolate())); __ Drop(StackHandlerConstants::kSlotCount - 2); __ Str(x10, MemOperand(x11)); @@ -624,13 +209,13 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ PeekPair(x10, c_entry_fp, 1 * kPointerSize); __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME); __ B(ne, &non_outermost_js_2); - __ Mov(x12, ExternalReference(js_entry_sp)); + __ Mov(x12, js_entry_sp); __ Str(xzr, MemOperand(x12)); __ Bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. - __ Mov(x12, - ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())); + __ Mov(x12, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + isolate())); __ Str(c_entry_fp, MemOperand(x12)); } @@ -643,9 +228,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ Ret(); } -// The entry hook is a Push (stp) instruction, followed by a call. +// The entry hook is a Push (stp) instruction, followed by a near call. static const unsigned int kProfileEntryHookCallSize = - (1 * kInstructionSize) + Assembler::kCallSizeWithRelocation; + (1 * kInstructionSize) + Assembler::kNearCallSize; void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm, Zone* zone) { @@ -700,9 +285,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { // Under the simulator we need to indirect the entry hook through a trampoline // function at a known address. ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); - __ Mov(x10, Operand(ExternalReference(&dispatcher, - ExternalReference::BUILTIN_CALL, - isolate()))); + __ Mov(x10, Operand(ExternalReference::Create( + &dispatcher, ExternalReference::BUILTIN_CALL))); // It additionally takes an isolate as a third parameter __ Mov(x2, ExternalReference::isolate_address(isolate())); #endif @@ -735,12 +319,9 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { - intptr_t code = - reinterpret_cast(GetCode().location()); - __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); - __ Mov(x10, target); // Branch to the stub. - __ Blr(lr); + __ Mov(x10, target); + __ Call(GetCode(), RelocInfo::CODE_TARGET); } template @@ -967,7 +548,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2)); __ Add(x0, x0, Operand(3)); __ Push(new_target, allocation_site); - __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); + __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray)); } @@ -1131,8 +712,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); __ Mov(x0, ExternalReference::isolate_address(isolate)); - __ CallCFunction(ExternalReference::log_enter_external_function(isolate), - 1); + __ CallCFunction(ExternalReference::log_enter_external_function(), 1); __ PopSafepointRegisters(); } @@ -1146,8 +726,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); __ Mov(x0, ExternalReference::isolate_address(isolate)); - __ CallCFunction(ExternalReference::log_leave_external_function(isolate), - 1); + __ CallCFunction(ExternalReference::log_leave_external_function(), 1); __ PopSafepointRegisters(); } @@ -1203,8 +782,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, Register saved_result = x19; __ Mov(saved_result, x0); __ Mov(x0, ExternalReference::isolate_address(isolate)); - __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), - 1); + __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); __ Mov(x0, saved_result); __ B(&leave_exit_frame); } @@ -1275,8 +853,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { __ Mov(x10, argc()); __ Str(x10, MemOperand(x0, 2 * kPointerSize)); - ExternalReference thunk_ref = - ExternalReference::invoke_function_callback(masm->isolate()); + ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); AllowExternalCallThatCantCauseGC scope(masm); // Stores return the first js argument @@ -1354,7 +931,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { // x1 = v8::PropertyCallbackInfo& ExternalReference thunk_ref = - ExternalReference::invoke_accessor_getter_callback(isolate()); + ExternalReference::invoke_accessor_getter_callback(); Register api_function_address = x2; Register js_getter = x4; diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc index a81621b6a9a2f1..397f4cb36d4595 100644 --- a/deps/v8/src/arm64/deoptimizer-arm64.cc +++ b/deps/v8/src/arm64/deoptimizer-arm64.cc @@ -115,8 +115,8 @@ void Deoptimizer::TableEntryGenerator::Generate() { DCHECK_EQ(saved_registers.Count() % 2, 0); __ PushCPURegList(saved_registers); - __ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + __ Mov(x3, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate()))); __ Str(fp, MemOperand(x3)); const int kSavedRegistersAreaSize = @@ -165,7 +165,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { { // Call Deoptimizer::New(). AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); } // Preserve "deoptimizer" object in register x0. @@ -212,8 +212,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { { // Call Deoptimizer::ComputeOutputFrames(). AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate()), 1); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); } __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer). diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h index 499023ebb2b7b8..b1c488eb65407e 100644 --- a/deps/v8/src/arm64/instructions-arm64.h +++ b/deps/v8/src/arm64/instructions-arm64.h @@ -354,6 +354,12 @@ class Instruction { return (high16 << 16) | low16; } + bool IsUnconditionalBranch() const { + return Mask(UnconditionalBranchMask) == B; + } + + bool IsBranchAndLink() const { return Mask(UnconditionalBranchMask) == BL; } + bool IsBranchAndLinkToRegister() const { return Mask(UnconditionalBranchToRegisterMask) == BLR; } diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc index bcbe5d97dce3f5..89c7b98f51152a 100644 --- a/deps/v8/src/arm64/interface-descriptors-arm64.cc +++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc @@ -34,7 +34,7 @@ void RecordWriteDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(kParameterCount, default_stub_registers); } -const Register FastNewFunctionContextDescriptor::FunctionRegister() { +const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { return x1; } const Register FastNewFunctionContextDescriptor::SlotsRegister() { return x0; } @@ -260,14 +260,6 @@ void BinaryOpDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void StringAddDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: left operand - // x0: right operand - Register registers[] = {x1, x0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - void ArgumentAdaptorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { static PlatformInterfaceDescriptor default_descriptor = diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h index f96d4b20b8278a..20533362bc7927 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h @@ -1039,7 +1039,7 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) { void TurboAssembler::InitializeRootRegister() { ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); - Mov(root, Operand(roots_array_start)); + Mov(kRootRegister, Operand(roots_array_start)); } diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc index 5bbf71e28cf030..784ffbb2751123 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/arm64/macro-assembler-arm64.cc @@ -8,7 +8,9 @@ #include "src/base/bits.h" #include "src/base/division-by-constant.h" #include "src/bootstrapper.h" +#include "src/builtins/constants-table-builder.h" #include "src/callable.h" +#include "src/code-factory.h" #include "src/code-stubs.h" #include "src/debug/debug.h" #include "src/external-reference-table.h" @@ -18,6 +20,7 @@ #include "src/instruction-stream.h" #include "src/register-configuration.h" #include "src/runtime/runtime.h" +#include "src/snapshot/serializer-common.h" #include "src/arm64/macro-assembler-arm64-inl.h" #include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include @@ -28,7 +31,17 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, CodeObjectRequired create_code_object) - : TurboAssembler(isolate, buffer, buffer_size, create_code_object) {} + : TurboAssembler(isolate, buffer, buffer_size, create_code_object) { + if (create_code_object == CodeObjectRequired::kYes) { + // Unlike TurboAssembler, which can be used off the main thread and may not + // allocate, macro assembler creates its own copy of the self-reference + // marker in order to disambiguate between self-references during nested + // code generation (e.g.: codegen of the current object triggers stub + // compilation through CodeStub::GetCode()). + code_object_ = Handle::New( + *isolate->factory()->NewSelfReferenceMarker(), isolate); + } +} CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } @@ -47,8 +60,8 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size, fptmp_list_(DefaultFPTmpList()), use_real_aborts_(true) { if (create_code_object == CodeObjectRequired::kYes) { - code_object_ = - Handle::New(isolate->heap()->undefined_value(), isolate); + code_object_ = Handle::New( + isolate->heap()->self_reference_marker(), isolate); } } @@ -310,7 +323,6 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, if (operand.NeedsRelocation(this)) { Ldr(dst, operand); - } else if (operand.IsImmediate()) { // Call the macro assembler for generic immediates. Mov(dst, operand.ImmediateValue()); @@ -352,6 +364,16 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, } } +void TurboAssembler::Mov(const Register& rd, ExternalReference reference) { +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + LookupExternalReference(rd, reference); + return; + } +#endif // V8_EMBEDDED_BUILTINS + Mov(rd, Operand(reference)); +} + void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { DCHECK(is_uint16(imm)); int byte1 = (imm & 0xFF); @@ -1355,8 +1377,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { Str(src, MemOperand(sp, offset)); } - -void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { +void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { if (offset.IsImmediate()) { DCHECK_GE(offset.ImmediateValue(), 0); } else if (emit_debug_code()) { @@ -1552,7 +1573,7 @@ void TurboAssembler::LoadRoot(CPURegister destination, Heap::RootListIndex index) { // TODO(jbramley): Most root values are constants, and can be synthesized // without a load. Refer to the ARM back end for details. - Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); + Ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2)); } @@ -1566,7 +1587,17 @@ void MacroAssembler::LoadObject(Register result, Handle object) { } void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); } -void TurboAssembler::Move(Register dst, Handle x) { Mov(dst, x); } + +void TurboAssembler::Move(Register dst, Handle x) { +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + LookupConstant(dst, x); + return; + } +#endif // V8_EMBEDDED_BUILTINS + Mov(dst, x); +} + void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); } void TurboAssembler::Swap(Register lhs, Register rhs) { @@ -1718,12 +1749,10 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) { Label start_call; Bind(&start_call); #endif - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - Ldr(temp, Operand::EmbeddedCode(stub)); - Blr(temp); + Operand operand = Operand::EmbeddedCode(stub); + near_call(operand.heap_object_request()); #ifdef DEBUG - AssertSizeOfCodeGeneratedSince(&start_call, kCallSizeWithRelocation); + AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize); #endif } @@ -1744,8 +1773,10 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, // should remove this need and make the runtime routine entry code // smarter. Mov(x0, f->nargs); - Mov(x1, ExternalReference(f, isolate())); - CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles)); + Mov(x1, ExternalReference::Create(f)); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); } void MacroAssembler::CallRuntime(const Runtime::Function* f, @@ -1760,23 +1791,23 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // Place the necessary arguments. Mov(x0, num_arguments); - Mov(x1, ExternalReference(f, isolate())); + Mov(x1, ExternalReference::Create(f)); - CEntryStub stub(isolate(), 1, save_doubles); - CallStub(&stub); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); } void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { Mov(x1, builtin); - CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, - builtin_exit_frame); - Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, + kArgvOnStack, builtin_exit_frame); + Jump(code, RelocInfo::CODE_TARGET); } void MacroAssembler::JumpToInstructionStream(Address entry) { - Mov(kOffHeapTrampolineRegister, - Operand(reinterpret_cast(entry), RelocInfo::OFF_HEAP_TARGET)); + Mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Br(kOffHeapTrampolineRegister); } @@ -1790,7 +1821,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { // smarter. Mov(x0, function->nargs); } - JumpToExternalReference(ExternalReference(fid, isolate())); + JumpToExternalReference(ExternalReference::Create(fid)); } int TurboAssembler::ActivationFrameAlignment() { @@ -1852,30 +1883,124 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, } } -void TurboAssembler::Jump(Register target) { Br(target); } +#ifdef V8_EMBEDDED_BUILTINS +void TurboAssembler::LookupConstant(Register destination, + Handle object) { + CHECK(isolate()->ShouldLoadConstantsFromRootList()); + CHECK(root_array_available_); -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, - Condition cond) { + // Ensure the given object is in the builtins constants table and fetch its + // index. + BuiltinsConstantsTableBuilder* builder = + isolate()->builtins_constants_table_builder(); + uint32_t index = builder->AddObject(object); + + // TODO(jgruber): Load builtins from the builtins table. + // TODO(jgruber): Ensure that code generation can recognize constant targets + // in kArchCallCodeObject. + + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( + Heap::kBuiltinsConstantsTableRootIndex)); + + LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex); + Ldr(destination, FieldMemOperand(destination, FixedArray::kHeaderSize + + index * kPointerSize)); +} + +void TurboAssembler::LookupExternalReference(Register destination, + ExternalReference reference) { + CHECK(reference.address() != + ExternalReference::roots_array_start(isolate()).address()); + CHECK(isolate()->ShouldLoadConstantsFromRootList()); + CHECK(root_array_available_); + + // Encode as an index into the external reference table stored on the isolate. + + ExternalReferenceEncoder encoder(isolate()); + ExternalReferenceEncoder::Value v = encoder.Encode(reference.address()); + CHECK(!v.is_from_api()); + uint32_t index = v.index(); + + // Generate code to load from the external reference table. + + int32_t roots_to_external_reference_offset = + Heap::roots_to_external_reference_table_offset() + + ExternalReferenceTable::OffsetOfEntry(index); + + Ldr(destination, + MemOperand(kRootRegister, roots_to_external_reference_offset)); +} +#endif // V8_EMBEDDED_BUILTINS + +void TurboAssembler::Jump(Register target, Condition cond) { + if (cond == nv) return; + Label done; + if (cond != al) B(NegateCondition(cond), &done); + Br(target); + Bind(&done); +} + +void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, + Condition cond) { if (cond == nv) return; - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); Label done; if (cond != al) B(NegateCondition(cond), &done); - Mov(temp, Operand(target, rmode)); - Br(temp); + if (CanUseNearCallOrJump(rmode)) { + DCHECK(IsNearCallOffset(offset)); + near_jump(static_cast(offset), rmode); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + uint64_t imm = reinterpret_cast(pc_) + offset * kInstructionSize; + Mov(temp, Immediate(imm, rmode)); + Br(temp); + } Bind(&done); } +namespace { + +// The calculated offset is either: +// * the 'target' input unmodified if this is a WASM call, or +// * the offset of the target from the current PC, in instructions, for any +// other type of call. +static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, + byte* pc) { + int64_t offset = static_cast(target); + // The target of WebAssembly calls is still an index instead of an actual + // address at this point, and needs to be encoded as-is. + if (rmode != RelocInfo::WASM_CALL) { + offset -= reinterpret_cast(pc); + DCHECK_EQ(offset % kInstructionSize, 0); + offset = offset / static_cast(kInstructionSize); + } + return offset; +} +} // namespace + void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { - DCHECK(!RelocInfo::IsCodeTarget(rmode)); - Jump(reinterpret_cast(target), rmode, cond); + JumpHelper(CalculateTargetOffset(target, rmode, pc_), rmode, cond); } void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); - Jump(reinterpret_cast(code.address()), rmode, cond); +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + LookupConstant(scratch, code); + Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); + Jump(scratch, cond); + return; + } +#endif // V8_EMBEDDED_BUILTINS + if (CanUseNearCallOrJump(rmode)) { + JumpHelper(static_cast(GetCodeTargetIndex(code)), rmode, cond); + } else { + Jump(code.address(), rmode, cond); + } } void TurboAssembler::Call(Register target) { @@ -1892,20 +2017,6 @@ void TurboAssembler::Call(Register target) { #endif } -void TurboAssembler::Call(Label* target) { - BlockPoolsScope scope(this); -#ifdef DEBUG - Label start_call; - Bind(&start_call); -#endif - - Bl(target); - -#ifdef DEBUG - AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); -#endif -} - // TurboAssembler::CallSize is sensitive to changes in this function, as it // requires to know how many instructions are used to branch to the target. void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { @@ -1915,33 +2026,40 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { Bind(&start_call); #endif - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - - if (RelocInfo::IsNone(rmode)) { - // Addresses are 48 bits so we never need to load the upper 16 bits. - uint64_t imm = reinterpret_cast(target); - // If we don't use ARM tagged addresses, the 16 higher bits must be 0. - DCHECK_EQ((imm >> 48) & 0xFFFF, 0); - movz(temp, (imm >> 0) & 0xFFFF, 0); - movk(temp, (imm >> 16) & 0xFFFF, 16); - movk(temp, (imm >> 32) & 0xFFFF, 32); + if (CanUseNearCallOrJump(rmode)) { + int64_t offset = CalculateTargetOffset(target, rmode, pc_); + DCHECK(IsNearCallOffset(offset)); + near_call(static_cast(offset), rmode); } else { - Ldr(temp, Immediate(reinterpret_cast(target), rmode)); + IndirectCall(target, rmode); } - Blr(temp); #ifdef DEBUG AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); #endif } void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { + BlockPoolsScope scope(this); #ifdef DEBUG Label start_call; Bind(&start_call); #endif - Call(code.address(), rmode); +#ifdef V8_EMBEDDED_BUILTINS + if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + LookupConstant(scratch, code); + Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); + Call(scratch); + return; + } +#endif // V8_EMBEDDED_BUILTINS + if (CanUseNearCallOrJump(rmode)) { + near_call(GetCodeTargetIndex(code), rmode); + } else { + IndirectCall(code.address(), rmode); + } #ifdef DEBUG // Check the size of the code generated. @@ -1954,10 +2072,21 @@ void TurboAssembler::Call(ExternalReference target) { Register temp = temps.AcquireX(); // Immediate is in charge of setting the relocation mode to // EXTERNAL_REFERENCE. - Ldr(temp, Immediate(target)); + Mov(temp, Immediate(target)); Call(temp); } +void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, Immediate(target, rmode)); + Blr(temp); +} + +bool TurboAssembler::IsNearCallOffset(int64_t offset) { + return is_int26(offset); +} + void TurboAssembler::CallForDeoptimization(Address target, RelocInfo::Mode rmode) { DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY); @@ -1972,12 +2101,20 @@ void TurboAssembler::CallForDeoptimization(Address target, // Deoptimisation table entries require the call address to be in x16, in // order to compute the entry id. + // TODO(all): Put the entry id back in the table now that we are using + // a direct branch for the call and do not need to set up x16. DCHECK(temp.Is(x16)); - Ldr(temp, Immediate(reinterpret_cast(target), rmode)); - Blr(temp); + Mov(temp, Immediate(target, rmode)); + + int64_t offset = static_cast(target) - + static_cast(isolate_data().code_range_start_); + DCHECK_EQ(offset % kInstructionSize, 0); + offset = offset / static_cast(kInstructionSize); + DCHECK(IsNearCallOffset(offset)); + near_call(static_cast(offset), rmode); #ifdef DEBUG - AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); + AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize); #endif } @@ -1986,23 +2123,14 @@ int TurboAssembler::CallSize(Register target) { return kInstructionSize; } -int TurboAssembler::CallSize(Label* target) { - USE(target); - return kInstructionSize; -} - int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) { USE(target); - - return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation - : kCallSizeWithRelocation; + return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize; } int TurboAssembler::CallSize(Handle code, RelocInfo::Mode rmode) { USE(code); - - return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation - : kCallSizeWithRelocation; + return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize; } void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value, @@ -2161,13 +2289,16 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, const ParameterCount& actual) { Label skip_hook; - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - Mov(x4, Operand(debug_hook_active)); + Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); Ldrsb(x4, MemOperand(x4)); Cbz(x4, &skip_hook); { + // Load receiver to pass it later to DebugOnFunctionCall hook. + Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) + : Operand(actual.reg()); + Mov(x4, actual_op); + Ldr(x4, MemOperand(sp, x4, LSL, kPointerSizeLog2)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); @@ -2181,8 +2312,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, SmiTag(expected_reg); SmiTag(actual_reg); Push(expected_reg, actual_reg, new_target, fun); - - PushArgument(fun); + Push(fun, x4); CallRuntime(Runtime::kDebugOnFunctionCall); // Restore values from stack. @@ -2304,8 +2434,9 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result, B(vc, done); } -void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, - DoubleRegister double_input) { +void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, + Register result, + DoubleRegister double_input) { Label done; // Try to convert the double to an int64. If successful, the bottom 32 bits @@ -2315,9 +2446,9 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, // If we fell through then inline version didn't succeed - call stub instead. Push(lr, double_input); - auto stub = new (zone) DoubleToIStub(nullptr, result); - // DoubleToIStub preserves any registers it needs to clobber. - CallStubDelayed(stub); + // DoubleToI preserves any registers it needs to clobber. + Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); + Ldr(result, MemOperand(sp, 0)); DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes()); Pop(xzr, lr); // xzr to drop the double input on the stack. @@ -2334,12 +2465,12 @@ void TurboAssembler::Prologue() { void TurboAssembler::EnterFrame(StackFrame::Type type) { UseScratchRegisterScope temps(this); - Register type_reg = temps.AcquireX(); - Register code_reg = temps.AcquireX(); if (type == StackFrame::INTERNAL) { + Register code_reg = temps.AcquireX(); + Move(code_reg, CodeObject()); + Register type_reg = temps.AcquireX(); Mov(type_reg, StackFrame::TypeToMarker(type)); - Mov(code_reg, Operand(CodeObject())); Push(lr, fp, type_reg, code_reg); Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp); // sp[4] : lr @@ -2347,6 +2478,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { // sp[1] : type // sp[0] : [code object] } else if (type == StackFrame::WASM_COMPILED) { + Register type_reg = temps.AcquireX(); Mov(type_reg, StackFrame::TypeToMarker(type)); Push(lr, fp); Mov(fp, sp); @@ -2357,6 +2489,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { // sp[0] : for alignment } else { DCHECK_EQ(type, StackFrame::CONSTRUCT); + Register type_reg = temps.AcquireX(); Mov(type_reg, StackFrame::TypeToMarker(type)); // Users of this frame type push a context pointer after the type field, @@ -2418,7 +2551,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, Mov(fp, sp); Mov(scratch, StackFrame::TypeToMarker(frame_type)); Push(scratch, xzr); - Mov(scratch, Operand(CodeObject())); + Move(scratch, CodeObject()); Push(scratch, padreg); // fp[8]: CallerPC (lr) // fp -> fp[0]: CallerFP (old fp) @@ -2434,11 +2567,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kPaddingOffset); // Save the frame pointer and context pointer in the top frame. - Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + Mov(scratch, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); Str(fp, MemOperand(scratch)); Mov(scratch, - Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); Str(cp, MemOperand(scratch)); STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kLastExitFrameField); @@ -2481,19 +2614,19 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles, // Restore the context pointer from the top frame. Mov(scratch, - Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); Ldr(cp, MemOperand(scratch)); if (emit_debug_code()) { // Also emit debug code to clear the cp in the top frame. Mov(scratch2, Operand(Context::kInvalidContext)); - Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress, - isolate()))); + Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress, + isolate())); Str(scratch2, MemOperand(scratch)); } // Clear the frame pointer from the top frame. - Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, - isolate()))); + Mov(scratch, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); Str(xzr, MemOperand(scratch)); // Pop the exit frame. @@ -2515,7 +2648,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK_NE(value, 0); if (FLAG_native_code_counters && counter->Enabled()) { - Mov(scratch2, ExternalReference(counter)); + Mov(scratch2, ExternalReference::Create(counter)); Ldr(scratch1.W(), MemOperand(scratch2)); Add(scratch1.W(), scratch1.W(), value); Str(scratch1.W(), MemOperand(scratch2)); @@ -2530,9 +2663,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, void MacroAssembler::MaybeDropFrames() { // Check whether we need to drop frames to restart a function on the stack. - ExternalReference restart_fp = - ExternalReference::debug_restart_fp_address(isolate()); - Mov(x1, Operand(restart_fp)); + Mov(x1, ExternalReference::debug_restart_fp_address(isolate())); Ldr(x1, MemOperand(x1)); Tst(x1, x1); Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, @@ -2890,6 +3021,10 @@ void TurboAssembler::Assert(Condition cond, AbortReason reason) { } } +void TurboAssembler::AssertUnreachable(AbortReason reason) { + if (emit_debug_code()) Abort(reason); +} + void MacroAssembler::AssertRegisterIsRoot(Register reg, Heap::RootListIndex index, AbortReason reason) { @@ -3115,7 +3250,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { dc32(arg_pattern_list); // kPrintfArgPatternListOffset } #else - Call(ExternalReference::printf_function(isolate())); + Call(ExternalReference::printf_function()); #endif } diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h index 6b1b8957cb5dcf..16aa006b2f4660 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/arm64/macro-assembler-arm64.h @@ -58,6 +58,7 @@ namespace internal { #define kOffHeapTrampolineRegister ip0 #define kRuntimeCallFunctionRegister x1 #define kRuntimeCallArgCountRegister x0 +#define kWasmInstanceRegister x7 #define LS_MACRO_LIST(V) \ V(Ldrb, Register&, rt, LDRB_w) \ @@ -182,10 +183,10 @@ class TurboAssembler : public Assembler { CodeObjectRequired create_code_object); // The Abort method should call a V8 runtime function, but the CallRuntime - // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will - // use a simpler abort mechanism that doesn't depend on CEntryStub. + // mechanism depends on CEntry. If use_real_aborts is false, Abort will + // use a simpler abort mechanism that doesn't depend on CEntry. // - // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is + // The purpose of this is to allow Aborts to be compiled whilst CEntry is // being generated. bool use_real_aborts() const { return use_real_aborts_; } @@ -219,6 +220,14 @@ class TurboAssembler : public Assembler { bool allow_macro_instructions() const { return allow_macro_instructions_; } #endif + // We should not use near calls or jumps for JS->WASM calls and calls to + // external references, since the code spaces are not guaranteed to be close + // to each other. + bool CanUseNearCallOrJump(RelocInfo::Mode rmode) { + return rmode != RelocInfo::JS_TO_WASM_CALL && + rmode != RelocInfo::EXTERNAL_REFERENCE; + } + // Activation support. void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { @@ -231,6 +240,7 @@ class TurboAssembler : public Assembler { void Mov(const Register& rd, const Operand& operand, DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + void Mov(const Register& rd, ExternalReference reference); void Mov(const Register& rd, uint64_t imm); inline void Mov(const Register& rd, const Register& rm); void Mov(const VRegister& vd, int vd_index, const VRegister& vn, @@ -563,6 +573,7 @@ class TurboAssembler : public Assembler { bool AllowThisStubCall(CodeStub* stub); void CallStubDelayed(CodeStub* stub); + // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime. void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, SaveFPRegsMode save_doubles = kDontSaveFPRegs); @@ -582,6 +593,10 @@ class TurboAssembler : public Assembler { // Use --debug_code to enable. void Assert(Condition cond, AbortReason reason); + // Like Assert(), but without condition. + // Use --debug_code to enable. + void AssertUnreachable(AbortReason reason); + void AssertSmi(Register object, AbortReason reason = AbortReason::kOperandIsNotASmi); @@ -867,26 +882,32 @@ class TurboAssembler : public Assembler { int shift_amount = 0); void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); - void Jump(Register target); +#ifdef V8_EMBEDDED_BUILTINS + void LookupConstant(Register destination, Handle object); + void LookupExternalReference(Register destination, + ExternalReference reference); +#endif // V8_EMBEDDED_BUILTINS + + void Jump(Register target, Condition cond = al); void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Call(Register target); - void Call(Label* target); void Call(Address target, RelocInfo::Mode rmode); void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET); void Call(ExternalReference target); + // Generate an indirect call (for when a direct call's range is not adequate). + void IndirectCall(Address target, RelocInfo::Mode rmode); + void CallForDeoptimization(Address target, RelocInfo::Mode rmode); // For every Call variant, there is a matching CallSize function that returns // the size (in bytes) of the call sequence. static int CallSize(Register target); - static int CallSize(Label* target); - static int CallSize(Address target, RelocInfo::Mode rmode); - static int CallSize(Handle code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET); + int CallSize(Address target, RelocInfo::Mode rmode); + int CallSize(Handle code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET); // Calls a C function. // The called function is not allowed to trigger a @@ -902,8 +923,8 @@ class TurboAssembler : public Assembler { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. // Exits with 'result' holding the answer. - void TruncateDoubleToIDelayed(Zone* zone, Register result, - DoubleRegister double_input); + void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, + DoubleRegister double_input); inline void Mul(const Register& rd, const Register& rn, const Register& rm); @@ -984,6 +1005,10 @@ class TurboAssembler : public Assembler { // be 16 byte aligned. void Poke(const CPURegister& src, const Operand& offset); + // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. + // The stack pointer must be aligned to 16 bytes. + void Peek(const CPURegister& dst, const Operand& offset); + // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent // with 'src2' at a higher address than 'src1'. The offset is in bytes. The // stack pointer must be 16 byte aligned. @@ -1206,6 +1231,9 @@ class TurboAssembler : public Assembler { void ResetSpeculationPoisonRegister(); + bool root_array_available() const { return root_array_available_; } + void set_root_array_available(bool v) { root_array_available_ = v; } + protected: // The actual Push and Pop implementations. These don't generate any code // other than that required for the push or pop. This allows @@ -1238,8 +1266,12 @@ class TurboAssembler : public Assembler { // have mixed types. The format string (x0) should not be included. void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr); + // This handle will be patched with the code object on installation. + Handle code_object_; + private: bool has_frame_ = false; + bool root_array_available_ = true; Isolate* const isolate_; #if DEBUG // Tell whether any of the macro instruction can be used. When false the @@ -1247,8 +1279,7 @@ class TurboAssembler : public Assembler { // of instructions is called. bool allow_macro_instructions_; #endif - // This handle will be patched with the code object on installation. - Handle code_object_; + // Scratch registers available for use by the MacroAssembler. CPURegList tmp_list_; @@ -1276,6 +1307,9 @@ class TurboAssembler : public Assembler { void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op); + + static bool IsNearCallOffset(int64_t offset); + void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al); }; class MacroAssembler : public TurboAssembler { @@ -1606,10 +1640,6 @@ class MacroAssembler : public TurboAssembler { std::vector queued_; }; - // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. - // The stack pointer must be aligned to 16 bytes. - void Peek(const CPURegister& dst, const Operand& offset); - // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The // values peeked will be adjacent, with the value in 'dst2' being from a // higher address than 'dst1'. The offset is in bytes. The stack pointer must diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc index 290be13cd6a86e..839c4edda6f0f0 100644 --- a/deps/v8/src/arm64/simulator-arm64.cc +++ b/deps/v8/src/arm64/simulator-arm64.cc @@ -117,7 +117,7 @@ Simulator* Simulator::current(Isolate* isolate) { return sim; } -void Simulator::CallImpl(byte* entry, CallArgument* args) { +void Simulator::CallImpl(Address entry, CallArgument* args) { int index_x = 0; int index_d = 0; diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h index 8cd1e02b6f8723..4bd9294c2f27de 100644 --- a/deps/v8/src/arm64/simulator-arm64.h +++ b/deps/v8/src/arm64/simulator-arm64.h @@ -719,7 +719,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { // Call an arbitrary function taking an arbitrary number of arguments. template - Return Call(byte* entry, Args... args) { + Return Call(Address entry, Args... args) { // Convert all arguments to CallArgument. CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()}; CallImpl(entry, call_args); @@ -2279,7 +2279,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { private: void Init(FILE* stream); - V8_EXPORT_PRIVATE void CallImpl(byte* entry, CallArgument* args); + V8_EXPORT_PRIVATE void CallImpl(Address entry, CallArgument* args); // Read floating point return values. template diff --git a/deps/v8/src/asan.h b/deps/v8/src/asan.h new file mode 100644 index 00000000000000..fc0add016e1ca9 --- /dev/null +++ b/deps/v8/src/asan.h @@ -0,0 +1,30 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// AddressSanitizer support. + +#ifndef V8_ASAN_H_ +#define V8_ASAN_H_ + +#include "src/base/macros.h" +#include "src/globals.h" + +#ifdef V8_USE_ADDRESS_SANITIZER + +#include + +#else // !V8_USE_ADDRESS_SANITIZER + +#define ASAN_POISON_MEMORY_REGION(start, size) \ + static_assert( \ + (std::is_pointer::value || \ + std::is_same::value) && \ + std::is_convertible::value, \ + "static type violation") +#define ASAN_UNPOISON_MEMORY_REGION(start, size) \ + ASAN_POISON_MEMORY_REGION(start, size) + +#endif // V8_USE_ADDRESS_SANITIZER + +#endif // V8_ASAN_H_ diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index f210b42a62e0d0..1fca56b0fc8480 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -1345,7 +1345,8 @@ void AsmJsParser::ValidateCase() { FAIL("Numeric literal out of range"); } int32_t value = static_cast(uvalue); - if (negate) { + DCHECK_IMPLIES(negate && uvalue == 0x80000000, value == kMinInt); + if (negate && value != kMinInt) { value = -value; } EXPECT_TOKEN(':'); @@ -1406,7 +1407,6 @@ AsmType* AsmJsParser::NumericLiteral() { current_function_builder_->EmitI32Const(static_cast(uvalue)); return AsmType::FixNum(); } else { - DCHECK_LE(uvalue, 0xFFFFFFFF); current_function_builder_->EmitI32Const(static_cast(uvalue)); return AsmType::Unsigned(); } @@ -2501,18 +2501,16 @@ void AsmJsParser::GatherCases(ZoneVector* cases) { } } else if (depth == 1 && Peek(TOK(case))) { scanner_.Next(); - int32_t value; uint32_t uvalue; - if (Check('-')) { - if (!CheckForUnsigned(&uvalue)) { - break; - } - value = -static_cast(uvalue); - } else { - if (!CheckForUnsigned(&uvalue)) { - break; - } - value = static_cast(uvalue); + bool negate = false; + if (Check('-')) negate = true; + if (!CheckForUnsigned(&uvalue)) { + break; + } + int32_t value = static_cast(uvalue); + DCHECK_IMPLIES(negate && uvalue == 0x80000000, value == kMinInt); + if (negate && value != kMinInt) { + value = -value; } cases->push_back(value); } else if (Peek(AsmJsScanner::kEndOfInput) || diff --git a/deps/v8/src/assembler-arch.h b/deps/v8/src/assembler-arch.h new file mode 100644 index 00000000000000..5858907537c415 --- /dev/null +++ b/deps/v8/src/assembler-arch.h @@ -0,0 +1,30 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_ASSEMBLER_ARCH_H_ +#define V8_ASSEMBLER_ARCH_H_ + +#include "src/assembler.h" + +#if V8_TARGET_ARCH_IA32 +#include "src/ia32/assembler-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "src/x64/assembler-x64.h" +#elif V8_TARGET_ARCH_ARM64 +#include "src/arm64/assembler-arm64.h" +#elif V8_TARGET_ARCH_ARM +#include "src/arm/assembler-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/assembler-ppc.h" +#elif V8_TARGET_ARCH_MIPS +#include "src/mips/assembler-mips.h" +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/assembler-mips64.h" +#elif V8_TARGET_ARCH_S390 +#include "src/s390/assembler-s390.h" +#else +#error Unknown architecture. +#endif + +#endif // V8_ASSEMBLER_ARCH_H_ diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 48d10418c0ac18..799f08a4d893a5 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -54,7 +54,7 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; AssemblerBase::IsolateData::IsolateData(Isolate* isolate) : serializer_enabled_(isolate->serializer_enabled()) -#if V8_TARGET_ARCH_X64 +#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 , code_range_start_( isolate->heap()->memory_allocator()->code_range()->start()) @@ -96,7 +96,7 @@ void AssemblerBase::FlushICache(void* start, size_t size) { void AssemblerBase::Print(Isolate* isolate) { OFStream os(stdout); - v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr); + v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_); } // ----------------------------------------------------------------------------- @@ -313,9 +313,10 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { byte* begin_pos = pos_; #endif DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); - DCHECK_GE(rinfo->pc() - last_pc_, 0); + DCHECK_GE(rinfo->pc() - reinterpret_cast
(last_pc_), 0); // Use unsigned delta-encoding for pc. - uint32_t pc_delta = static_cast(rinfo->pc() - last_pc_); + uint32_t pc_delta = + static_cast(rinfo->pc() - reinterpret_cast
(last_pc_)); // The two most common modes are given small tags, and usually fit in a byte. if (rmode == RelocInfo::EMBEDDED_OBJECT) { @@ -337,7 +338,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteIntData(static_cast(rinfo->data())); } } - last_pc_ = rinfo->pc(); + last_pc_ = reinterpret_cast(rinfo->pc()); #ifdef DEBUG DCHECK_LE(begin_pos - pos_, kMaxSize); #endif @@ -450,38 +451,39 @@ void RelocIterator::next() { } RelocIterator::RelocIterator(Code* code, int mode_mask) - : mode_mask_(mode_mask) { - rinfo_.host_ = code; - rinfo_.pc_ = code->raw_instruction_start(); - rinfo_.data_ = 0; - rinfo_.constant_pool_ = code->constant_pool(); - // Relocation info is read backwards. - pos_ = code->relocation_start() + code->relocation_size(); - end_ = code->relocation_start(); - if (mode_mask_ == 0) pos_ = end_; - next(); -} + : RelocIterator(code, code->raw_instruction_start(), code->constant_pool(), + code->relocation_end(), code->relocation_start(), + mode_mask) {} + +RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask) + : RelocIterator(nullptr, code_reference.instruction_start(), + code_reference.constant_pool(), + code_reference.relocation_end(), + code_reference.relocation_start(), mode_mask) {} RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) - : mode_mask_(mode_mask) { - rinfo_.pc_ = desc.buffer; - // Relocation info is read backwards. - pos_ = desc.buffer + desc.buffer_size; - end_ = pos_ - desc.reloc_size; - if (mode_mask_ == 0) pos_ = end_; - next(); -} + : RelocIterator(nullptr, reinterpret_cast
(desc.buffer), 0, + desc.buffer + desc.buffer_size, + desc.buffer + desc.buffer_size - desc.reloc_size, + mode_mask) {} RelocIterator::RelocIterator(Vector instructions, Vector reloc_info, Address const_pool, int mode_mask) - : mode_mask_(mode_mask) { - rinfo_.pc_ = instructions.start(); - rinfo_.constant_pool_ = const_pool; + : RelocIterator(nullptr, reinterpret_cast
(instructions.start()), + const_pool, reloc_info.start() + reloc_info.size(), + reloc_info.start(), mode_mask) { rinfo_.flags_ = RelocInfo::kInNativeWasmCode; +} + +RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool, + const byte* pos, const byte* end, int mode_mask) + : pos_(pos), end_(end), mode_mask_(mode_mask) { // Relocation info is read backwards. - pos_ = reloc_info.start() + reloc_info.size(); - end_ = reloc_info.start(); + DCHECK_GE(pos_, end_); + rinfo_.host_ = host; + rinfo_.pc_ = pc; + rinfo_.constant_pool_ = constant_pool; if (mode_mask_ == 0) pos_ = end_; next(); } @@ -551,7 +553,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { } void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT - os << static_cast(pc_) << " " << RelocModeName(rmode_); + os << reinterpret_cast(pc_) << " " << RelocModeName(rmode_); if (IsComment(rmode_)) { os << " (" << reinterpret_cast(data_) << ")"; } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) { @@ -565,7 +567,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT ExternalReferenceEncoder ref_encoder(isolate); os << " (" << ref_encoder.NameOfAddress(isolate, target_external_reference()) - << ") (" << static_cast(target_external_reference()) + << ") (" << reinterpret_cast(target_external_reference()) << ")"; } else if (IsCodeTarget(rmode_)) { const Address code_target = target_address(); @@ -582,7 +584,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT } os << ") "; } - os << " (" << static_cast(target_address()) << ")"; + os << " (" << reinterpret_cast(target_address()) << ")"; } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) { // Depotimization bailouts are stored as runtime entries. int id = Deoptimizer::GetDeoptimizationId( @@ -607,7 +609,7 @@ void RelocInfo::Verify(Isolate* isolate) { case CODE_TARGET: { // convert inline target address to code object Address addr = target_address(); - CHECK_NOT_NULL(addr); + CHECK_NE(addr, kNullAddress); // Check that we can find the right code object. Code* code = Code::GetCodeFromTargetAddress(addr); Object* found = isolate->FindCodeObject(addr); @@ -626,7 +628,7 @@ void RelocInfo::Verify(Isolate* isolate) { } case OFF_HEAP_TARGET: { Address addr = target_off_heap_target(); - CHECK_NOT_NULL(addr); + CHECK_NE(addr, kNullAddress); CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr)); break; } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index e79f4cc8693332..35f6147053a58e 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -40,6 +40,7 @@ #include #include "src/allocation.h" +#include "src/code-reference.h" #include "src/contexts.h" #include "src/deoptimize-reason.h" #include "src/double.h" @@ -98,7 +99,7 @@ class AssemblerBase: public Malloced { IsolateData(const IsolateData&) = default; bool serializer_enabled_; -#if V8_TARGET_ARCH_X64 +#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 Address code_range_start_; #endif }; @@ -162,6 +163,9 @@ class AssemblerBase: public Malloced { static const int kMinimalBufferSize = 4*KB; static void FlushICache(void* start, size_t size); + static void FlushICache(Address start, size_t size) { + return FlushICache(reinterpret_cast(start), size); + } protected: // The buffer into which code and relocation info are generated. It could @@ -180,6 +184,7 @@ class AssemblerBase: public Malloced { } // The program counter, which points into the buffer above and moves forward. + // TODO(jkummerow): This should probably have type {Address}. byte* pc_; private: @@ -315,11 +320,6 @@ class CpuFeatures : public AllStatic { DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; - -enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; - -enum ArgvMode { kArgvOnStack, kArgvInRegister }; - // Specifies whether to perform icache flush operations on RelocInfo updates. // If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an // instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be @@ -414,7 +414,7 @@ class RelocInfo { RelocInfo() = default; - RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) + RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host) : pc_(pc), rmode_(rmode), data_(data), host_(host) {} static inline bool IsRealRelocMode(Mode mode) { @@ -476,8 +476,7 @@ class RelocInfo { static constexpr int ModeMask(Mode mode) { return 1 << mode; } // Accessors - byte* pc() const { return pc_; } - void set_pc(byte* pc) { pc_ = pc; } + Address pc() const { return pc_; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } Code* host() const { return host_; } @@ -613,15 +612,13 @@ class RelocInfo { uint32_t embedded_size() const; Address embedded_address() const; - // On ARM, note that pc_ is the address of the constant pool entry - // to be relocated and not the address of the instruction - // referencing the constant pool entry (except when rmode_ == - // comment). - byte* pc_; + // On ARM/ARM64, note that pc_ is the address of the instruction referencing + // the constant pool and not the address of the constant pool entry. + Address pc_; Mode rmode_; intptr_t data_ = 0; Code* host_; - Address constant_pool_ = nullptr; + Address constant_pool_ = kNullAddress; Flags flags_; friend class RelocIterator; }; @@ -683,6 +680,8 @@ class RelocIterator: public Malloced { // iteration iff bit k of mode_mask is set. explicit RelocIterator(Code* code, int mode_mask = -1); explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1); + explicit RelocIterator(const CodeReference code_reference, + int mode_mask = -1); explicit RelocIterator(Vector instructions, Vector reloc_info, Address const_pool, int mode_mask = -1); @@ -700,6 +699,9 @@ class RelocIterator: public Malloced { } private: + RelocIterator(Code* host, Address pc, Address constant_pool, const byte* pos, + const byte* end, int mode_mask); + // Advance* moves the position before/after reading. // *Read* reads from current byte(s) into rinfo_. // *Get* just reads and returns info on current byte. @@ -958,8 +960,8 @@ class RegisterBase { } template - static constexpr int bit() { - return 1 << code(); + static constexpr RegList bit() { + return RegList{1} << code(); } static SubType from_code(int code) { @@ -968,11 +970,18 @@ class RegisterBase { return SubType{code}; } + // Constexpr version (pass registers as template parameters). template static constexpr RegList ListOf() { return CombineRegLists(RegisterBase::bit()...); } + // Non-constexpr version (pass registers as method parameters). + template + static RegList ListOf(Register... regs) { + return CombineRegLists(regs.bit()...); + } + bool is_valid() const { return reg_code_ != kCode_no_reg; } int code() const { @@ -980,7 +989,7 @@ class RegisterBase { return reg_code_; } - int bit() const { return 1 << code(); } + RegList bit() const { return RegList{1} << code(); } inline constexpr bool operator==(SubType other) const { return reg_code_ == other.reg_code_; diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 458afb8bc1d6c4..5efecc5375a33d 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -223,7 +223,6 @@ AstRawString* AstValueFactory::GetOneByteStringInternal( return GetString(hash_field, true, literal); } - AstRawString* AstValueFactory::GetTwoByteStringInternal( Vector literal) { uint32_t hash_field = StringHasher::HashSequentialString( @@ -231,7 +230,6 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal( return GetString(hash_field, false, Vector::cast(literal)); } - const AstRawString* AstValueFactory::GetString(Handle literal) { AstRawString* result = nullptr; DisallowHeapAllocation no_gc; @@ -280,7 +278,6 @@ void AstValueFactory::Internalize(Isolate* isolate) { ResetStrings(); } - AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte, Vector literal_bytes) { // literal_bytes here points to whatever the user passed, and this is OK diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 392af8a5013afb..15b8bff61b697f 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -276,7 +276,9 @@ std::unique_ptr FunctionLiteral::GetDebugName() const { AllowHandleDereference allow_deref; return inferred_name_->ToCString(); } else { - return std::unique_ptr(new char{'\0'}); + char* empty_str = new char[1]; + empty_str[0] = 0; + return std::unique_ptr(empty_str); } // TODO(rmcilroy): Deal with two-character strings. diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index b95d54abb9bd93..35dede266b37e3 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -1429,6 +1429,8 @@ class ArrayLiteral final : public AggregateLiteral { ZoneList* values() const { return values_; } + int first_spread_index() const { return first_spread_index_; } + bool is_empty() const; // Populate the depth field and flags, returns the depth. @@ -1453,16 +1455,6 @@ class ArrayLiteral final : public AggregateLiteral { return AggregateLiteral::ComputeFlags(disable_mementos); } - // Provide a mechanism for iterating through values to rewrite spreads. - ZoneList::iterator FirstSpreadOrEndValue() const { - return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_ - : values_->end(); - } - ZoneList::iterator BeginValue() const { - return values_->begin(); - } - ZoneList::iterator EndValue() const { return values_->end(); } - private: friend class AstNodeFactory; diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index 2ca75e3c31e8ea..4f9029810a044a 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -1325,7 +1325,7 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) { void AstPrinter::VisitSpread(Spread* node) { - IndentedScope indent(this, "...", node->position()); + IndentedScope indent(this, "SPREAD", node->position()); Visit(node->expression()); } diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 2c1355ead1416b..42affeea2c9dd5 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -2234,6 +2234,8 @@ void Scope::AllocateNonParameterLocal(Variable* var) { if (var->IsUnallocated() && MustAllocate(var)) { if (MustAllocateInContext(var)) { AllocateHeapSlot(var); + DCHECK_IMPLIES(is_catch_scope(), + var->index() == Context::THROWN_OBJECT_INDEX); } else { AllocateStackSlot(var); } diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h index f04391e2b22cad..6eeaed140bf0dd 100644 --- a/deps/v8/src/base/adapters.h +++ b/deps/v8/src/base/adapters.h @@ -8,6 +8,8 @@ #ifndef V8_BASE_ADAPTERS_H_ #define V8_BASE_ADAPTERS_H_ +#include + #include "src/base/macros.h" namespace v8 { @@ -17,13 +19,15 @@ namespace base { template class ReversedAdapter { public: - typedef decltype(static_cast(nullptr)->rbegin()) Iterator; + using Iterator = + std::reverse_iterator()))>; explicit ReversedAdapter(T& t) : t_(t) {} - ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {} + ReversedAdapter(const ReversedAdapter& ra) = default; - Iterator begin() const { return t_.rbegin(); } - Iterator end() const { return t_.rend(); } + // TODO(clemensh): Use std::rbegin/std::rend once we have C++14 support. + Iterator begin() const { return Iterator(std::end(t_)); } + Iterator end() const { return Iterator(std::begin(t_)); } private: T& t_; diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h index 5ba1ad424606c1..7787e4ff5279d3 100644 --- a/deps/v8/src/base/atomic-utils.h +++ b/deps/v8/src/base/atomic-utils.h @@ -14,6 +14,7 @@ namespace v8 { namespace base { +// Deprecated. Use std::atomic for new code. template class AtomicNumber { public: diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc index 95b84cf328021b..7a1cc175cb733e 100644 --- a/deps/v8/src/base/ieee754.cc +++ b/deps/v8/src/base/ieee754.cc @@ -51,6 +51,7 @@ namespace { /* * A union which permits us to convert between a double and two 32 bit * ints. + * TODO(jkummerow): This is undefined behavior. Use bit_cast instead. */ #if V8_TARGET_LITTLE_ENDIAN diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc index e58fdba09f54af..64f7fed413e3c1 100644 --- a/deps/v8/src/base/logging.cc +++ b/deps/v8/src/base/logging.cc @@ -135,7 +135,7 @@ class FailureMessage { static const uintptr_t kStartMarker = 0xdecade10; static const uintptr_t kEndMarker = 0xdecade11; - static const int kMessageBufferSize = 1024; + static const int kMessageBufferSize = 512; uintptr_t start_marker_ = kStartMarker; char message_[kMessageBufferSize]; @@ -154,6 +154,7 @@ void V8_Fatal(const char* file, int line, const char* format, ...) { fflush(stdout); fflush(stderr); + // Print the formatted message to stdout without cropping the output. v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line); diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index db2f19459168f0..3437309bc7ee2a 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -43,7 +43,6 @@ template char (&ArraySizeHelper(const T (&array)[N]))[N]; #endif - // bit_cast is a template function that implements the // equivalent of "*reinterpret_cast(&source)". We need this in // very low-level functions like the protobuf library and fast math @@ -150,20 +149,27 @@ V8_INLINE Dest bit_cast(Source const& source) { #define INLINE(declarator) V8_INLINE declarator #define NO_INLINE(declarator) V8_NOINLINE declarator -// Define V8_USE_ADDRESS_SANITIZER macros. +// Define V8_USE_ADDRESS_SANITIZER macro. #if defined(__has_feature) #if __has_feature(address_sanitizer) #define V8_USE_ADDRESS_SANITIZER 1 #endif #endif -// Define DISABLE_ASAN macros. +// Define DISABLE_ASAN macro. #ifdef V8_USE_ADDRESS_SANITIZER #define DISABLE_ASAN __attribute__((no_sanitize_address)) #else #define DISABLE_ASAN #endif +// Define V8_USE_MEMORY_SANITIZER macro. +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#define V8_USE_MEMORY_SANITIZER 1 +#endif +#endif + // Helper macro to define no_sanitize attributes only with clang. #if defined(__clang__) && defined(__has_attribute) #if __has_attribute(no_sanitize) @@ -271,6 +277,14 @@ struct Use { } // namespace base } // namespace v8 +// implicit_cast(x) triggers an implicit cast from {x} to type {A}. This is +// useful in situations where static_cast(x) would do too much. +// Only use this for cheap-to-copy types, or use move semantics explicitly. +template +V8_INLINE A implicit_cast(A x) { + return x; +} + // Define our own macros for writing 64-bit constants. This is less fragile // than defining __STDC_CONSTANT_MACROS before including , and it // works on compilers that don't have it (like MSVC). @@ -296,6 +310,14 @@ struct Use { #define V8PRIdPTR V8_PTR_PREFIX "d" #define V8PRIuPTR V8_PTR_PREFIX "u" +#ifdef V8_TARGET_ARCH_64_BIT +#define V8_PTR_HEX_DIGITS 12 +#define V8PRIxPTR_FMT "0x%012" V8PRIxPTR +#else +#define V8_PTR_HEX_DIGITS 8 +#define V8PRIxPTR_FMT "0x%08" V8PRIxPTR +#endif + // ptrdiff_t is 't' according to the standard, but MSVC uses 'I'. #if V8_CC_MSVC #define V8PRIxPTRDIFF "Ix" diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index 59b653d6cd9060..6b4158f079e3e8 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -203,11 +203,21 @@ typedef LazyStaticInstance +// Controls whether a LockGuard always requires a valid Mutex or will just +// ignore it if it's nullptr. +enum class NullBehavior { kRequireNotNull, kIgnoreIfNull }; + +template class LockGuard final { public: - explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); } - ~LockGuard() { mutex_->Unlock(); } + explicit LockGuard(Mutex* mutex) : mutex_(mutex) { + if (Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr) { + mutex_->Lock(); + } + } + ~LockGuard() { + if (mutex_ != nullptr) mutex_->Unlock(); + } private: Mutex* mutex_; diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index ddcdc1a2d38078..c5d94fc6ba55b4 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -34,6 +34,8 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: return PAGE_NOACCESS; + case OS::MemoryPermission::kRead: + return PAGE_READONLY; case OS::MemoryPermission::kReadWrite: return PAGE_READWRITE; case OS::MemoryPermission::kReadWriteExecute: diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index bba3f1baba953c..3a9d65a12d393d 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -19,6 +19,8 @@ uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: return 0; // no permissions + case OS::MemoryPermission::kRead: + return ZX_VM_FLAG_PERM_READ; case OS::MemoryPermission::kReadWrite: return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE; case OS::MemoryPermission::kReadWriteExecute: diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index fee67589b6b45b..d21107d6f785f6 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -112,6 +112,8 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: return PROT_NONE; + case OS::MemoryPermission::kRead: + return PROT_READ; case OS::MemoryPermission::kReadWrite: return PROT_READ | PROT_WRITE; case OS::MemoryPermission::kReadWriteExecute: @@ -352,6 +354,18 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { if (ret == 0 && access == OS::MemoryPermission::kNoAccess) { ret = ReclaimInaccessibleMemory(address, size); } + +// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after +// changing permissions away from OS::MemoryPermission::kNoAccess. Since this +// state is not kept at this layer, we always call this if access != kNoAccess. +// The cost is a syscall that effectively no-ops. +// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. +// https://crbug.com/823915 +#if defined(OS_MACOSX) + if (access != OS::MemoryPermission::kNoAccess) + madvise(address, size, MADV_FREE_REUSE); +#endif + return ret == 0; } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index d4aa44f8a7d96f..f618c65fb6458e 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -758,6 +758,8 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: return PAGE_NOACCESS; + case OS::MemoryPermission::kRead: + return PAGE_READONLY; case OS::MemoryPermission::kReadWrite: return PAGE_READWRITE; case OS::MemoryPermission::kReadWriteExecute: diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 4fbc87c4aaa58c..5d015eeeac3798 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -159,6 +159,7 @@ class V8_BASE_EXPORT OS { // v8::PageAllocator. enum class MemoryPermission { kNoAccess, + kRead, kReadWrite, // TODO(hpayer): Remove this flag. Memory should never be rwx. kReadWriteExecute, diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h index 07356346ec80eb..cbbe7e3cbf2d7e 100644 --- a/deps/v8/src/base/template-utils.h +++ b/deps/v8/src/base/template-utils.h @@ -56,13 +56,6 @@ std::unique_ptr make_unique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -// implicit_cast(x) triggers an implicit cast from {x} to type {A}. This is -// useful in situations where static_cast(x) would do too much. -template -A implicit_cast(A x) { - return x; -} - // Helper to determine how to pass values: Pass scalars and arrays by value, // others by const reference (even if it was a non-const ref before; this is // disallowed by the style guide anyway). diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index bbb374918a966a..ed8fd72c91527d 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -19,15 +19,18 @@ #include "src/extensions/trigger-failure-extension.h" #include "src/heap/heap.h" #include "src/isolate-inl.h" +#include "src/objects/api-callbacks.h" +#ifdef V8_INTL_SUPPORT +#include "src/objects/intl-objects.h" +#include "src/objects/js-locale.h" +#endif // V8_INTL_SUPPORT +#include "src/objects/hash-table-inl.h" #include "src/objects/js-regexp.h" +#include "src/objects/templates.h" #include "src/snapshot/natives.h" #include "src/snapshot/snapshot.h" #include "src/wasm/wasm-js.h" -#if V8_INTL_SUPPORT -#include "src/objects/intl-objects.h" -#endif // V8_INTL_SUPPORT - namespace v8 { namespace internal { @@ -161,7 +164,7 @@ class Genesis BASE_EMBEDDED { // Creates some basic objects. Used for creating a context from scratch. void CreateRoots(); // Creates the empty function. Used for creating a context from scratch. - Handle CreateEmptyFunction(Isolate* isolate); + Handle CreateEmptyFunction(); // Returns the %ThrowTypeError% intrinsic function. // See ES#sec-%throwtypeerror% for details. Handle GetThrowTypeErrorIntrinsic(); @@ -357,10 +360,11 @@ void Bootstrapper::DetachGlobal(Handle env) { namespace { V8_NOINLINE Handle SimpleCreateSharedFunctionInfo( - Isolate* isolate, Builtins::Name builtin_id, Handle name, int len) { + Isolate* isolate, Builtins::Name builtin_id, Handle name, int len, + FunctionKind kind = FunctionKind::kNormalFunction) { Handle shared = isolate->factory()->NewSharedFunctionInfoForBuiltin(name, builtin_id, - kNormalFunction); + kind); shared->set_internal_formal_parameter_count(len); shared->set_length(len); return shared; @@ -589,29 +593,33 @@ V8_NOINLINE void InstallSpeciesGetter(Handle constructor) { } // namespace -Handle Genesis::CreateEmptyFunction(Isolate* isolate) { - Factory* factory = isolate->factory(); - +Handle Genesis::CreateEmptyFunction() { // Allocate the function map first and then patch the prototype later. - Handle empty_function_map = factory->CreateSloppyFunctionMap( + Handle empty_function_map = factory()->CreateSloppyFunctionMap( FUNCTION_WITHOUT_PROTOTYPE, MaybeHandle()); empty_function_map->set_is_prototype_map(true); DCHECK(!empty_function_map->is_dictionary_map()); + // Allocate ScopeInfo for the empty function. + Handle scope_info = ScopeInfo::CreateForEmptyFunction(isolate()); + // Allocate the empty function as the prototype for function according to // ES#sec-properties-of-the-function-prototype-object NewFunctionArgs args = NewFunctionArgs::ForBuiltin( - factory->empty_string(), empty_function_map, Builtins::kEmptyFunction); - Handle empty_function = factory->NewFunction(args); + factory()->empty_string(), empty_function_map, Builtins::kEmptyFunction); + Handle empty_function = factory()->NewFunction(args); + native_context()->set_empty_function(*empty_function); // --- E m p t y --- - Handle source = factory->NewStringFromStaticChars("() {}"); - Handle - - + + - diff --git a/deps/v8/tools/turbolizer/node.js b/deps/v8/tools/turbolizer/node.js index b718cdc4dfee34..237b4d2b2d20f3 100644 --- a/deps/v8/tools/turbolizer/node.js +++ b/deps/v8/tools/turbolizer/node.js @@ -57,7 +57,11 @@ var Node = { } else { propsString = "[" + this.properties + "]"; } - return this.title + "\n" + propsString + "\n" + this.opinfo; + let title = this.title + "\n" + propsString + "\n" + this.opinfo; + if (this.origin) { + title += `\nOrigin: #${this.origin.nodeId} in phase ${this.origin.phase}/${this.origin.reducer}`; + } + return title; }, getDisplayLabel: function() { var result = this.id + ":" + this.label; diff --git a/deps/v8/tools/turbolizer/schedule-view.js b/deps/v8/tools/turbolizer/schedule-view.js index ef4789211d3c48..0864fceea7362b 100644 --- a/deps/v8/tools/turbolizer/schedule-view.js +++ b/deps/v8/tools/turbolizer/schedule-view.js @@ -5,124 +5,159 @@ "use strict"; class ScheduleView extends TextView { - constructor(id, broker) { - super(id, broker, null, false); - let view = this; - let BLOCK_STYLE = { - css: 'tag' - }; - const BLOCK_HEADER_STYLE = { - css: 'com', - block_id: -1, - location: function(text) { - let matches = /\d+/.exec(text); - if (!matches) return undefined; - BLOCK_HEADER_STYLE.block_id = Number(matches[0]); - return { - block_id: BLOCK_HEADER_STYLE.block_id - }; - }, - }; - const BLOCK_LINK_STYLE = { - css: 'tag', - link: function(text) { - let id = Number(text.substr(1)); - view.select(function(location) { return location.block_id == id; }, true, true); - } - }; - const ID_STYLE = { - css: 'tag', - location: function(text) { - let matches = /\d+/.exec(text); - return { - node_id: Number(matches[0]), - block_id: BLOCK_HEADER_STYLE.block_id - }; - }, - }; - const ID_LINK_STYLE = { - css: 'tag', - link: function(text) { - let id = Number(text); - view.select(function(location) { return location.node_id == id; }, true, true); + + createViewElement() { + const pane = document.createElement('div'); + pane.setAttribute('id', "schedule"); + pane.innerHTML = + `
+       
    +
+
`; + return pane; + } + + constructor(parentId, broker) { + super(parentId, broker, null, false); + } + + attachSelection(s) { + const view = this; + if (!(s instanceof Set)) return; + view.selectionHandler.clear(); + view.blockSelectionHandler.clear(); + view.sourcePositionSelectionHandler.clear(); + const selected = new Array(); + for (const key of s) selected.push(key); + view.selectionHandler.select(selected, true); + } + + createElementFromString(htmlString) { + var div = document.createElement('div'); + div.innerHTML = htmlString.trim(); + return div.firstChild; + } + + + elementForBlock(block) { + const view = this; + function createElement(tag, cls, content) { + const el = document.createElement(tag); + if (isIterable(cls)) { + for (const c of cls) el.classList.add(c); + } else { + el.classList.add(cls); } - }; - const NODE_STYLE = { css: 'kwd' }; - const GOTO_STYLE = { css: 'kwd', - goto_id: -2, - location: function(text) { - return { - node_id: GOTO_STYLE.goto_id--, - block_id: BLOCK_HEADER_STYLE.block_id - }; + if (content != undefined) el.innerHTML = content; + return el; + } + + function mkNodeLinkHandler(nodeId) { + return function (e) { + e.stopPropagation(); + if (!e.shiftKey) { + view.selectionHandler.clear(); + } + view.selectionHandler.select([nodeId], true); + }; + } + + function createElementForNode(node) { + const nodeEl = createElement("div", "node"); + const node_id = createElement("div", ["node-id", "tag", "clickable"], node.id); + node_id.onclick = mkNodeLinkHandler(node.id); + view.addHtmlElementForNodeId(node.id, node_id); + nodeEl.appendChild(node_id); + const node_label = createElement("div", "node-label", node.label); + nodeEl.appendChild(node_label); + if (node.inputs.length > 0) { + const node_parameters = createElement("div", ["parameter-list", "comma-sep-list"]); + for (const param of node.inputs) { + const paramEl = createElement("div", ["parameter", "tag", "clickable"], param); + node_parameters.appendChild(paramEl); + paramEl.onclick = mkNodeLinkHandler(param); + view.addHtmlElementForNodeId(param, paramEl); + } + nodeEl.appendChild(node_parameters); } + return nodeEl; } - const ARROW_STYLE = { css: 'kwd' }; - let patterns = [ - [ - [/^--- BLOCK B\d+/, BLOCK_HEADER_STYLE, 1], - [/^\s+\d+: /, ID_STYLE, 2], - [/^\s+Goto/, GOTO_STYLE, 6], - [/^.*/, null, -1] - ], - [ - [/^ +/, null], - [/^\(deferred\)/, BLOCK_HEADER_STYLE], - [/^B\d+/, BLOCK_LINK_STYLE], - [/^<-/, ARROW_STYLE], - [/^->/, ARROW_STYLE], - [/^,/, null], - [/^---/, BLOCK_HEADER_STYLE, -1] - ], - // Parse opcode including [] - [ - [/^[A-Za-z0-9_]+(\[.*\])?$/, NODE_STYLE, -1], - [/^[A-Za-z0-9_]+(\[(\[.*?\]|.)*?\])?/, NODE_STYLE, 3] - ], - // Parse optional parameters - [ - [/^ /, null, 4], - [/^\(/, null], - [/^\d+/, ID_LINK_STYLE], - [/^, /, null], - [/^\)$/, null, -1], - [/^\)/, null, 4], - ], - [ - [/^ -> /, ARROW_STYLE, 5], - [/^.*/, null, -1] - ], - [ - [/^B\d+$/, BLOCK_LINK_STYLE, -1], - [/^B\d+/, BLOCK_LINK_STYLE], - [/^, /, null] - ], - [ - [/^ -> /, ARROW_STYLE], - [/^B\d+$/, BLOCK_LINK_STYLE, -1] - ] - ]; - this.setPatterns(patterns); + + function mkBlockLinkHandler(blockId) { + return function (e) { + e.stopPropagation(); + if (!e.shiftKey) { + view.blockSelectionHandler.clear(); + } + view.blockSelectionHandler.select(["" + blockId], true); + }; + } + + const schedule_block = createElement("div", "schedule-block"); + const block_id = createElement("div", ["block-id", "com", "clickable"], block.id); + block_id.onclick = mkBlockLinkHandler(block.id); + schedule_block.appendChild(block_id); + const block_pred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]); + for (const pred of block.pred) { + const predEl = createElement("div", ["block-id", "com", "clickable"], pred); + predEl.onclick = mkBlockLinkHandler(pred); + block_pred.appendChild(predEl); + } + if (block.pred.length) schedule_block.appendChild(block_pred); + const nodes = createElement("div", "nodes"); + for (const node of block.nodes) { + nodes.appendChild(createElementForNode(node, block.id)); + } + schedule_block.appendChild(nodes); + const block_succ = createElement("div", ["successor-list", "block-list", "comma-sep-list"]); + for (const succ of block.succ) { + const succEl = createElement("div", ["block-id", "com", "clickable"], succ); + succEl.onclick = mkBlockLinkHandler(succ); + block_succ.appendChild(succEl); + } + if (block.succ.length) schedule_block.appendChild(block_succ); + this.addHtmlElementForBlockId(block.id, schedule_block); + return schedule_block; } - initializeContent(data, rememberedSelection) { - super.initializeContent(data, rememberedSelection); - var graph = this; - var locations = []; - for (var id of rememberedSelection) { - locations.push({ node_id : id }); + addBlocks(blocks) { + for (const block of blocks) { + const blockEl = this.elementForBlock(block); + this.divNode.appendChild(blockEl); } - this.selectLocations(locations, true, true); + } + + initializeContent(data, rememberedSelection) { + this.clearText(); + this.schedule = data.schedule + this.addBlocks(data.schedule.blocks); + this.attachSelection(rememberedSelection); } detachSelection() { - var selection = this.selection.detachSelection(); - var s = new Set(); - for (var i of selection) { - if (i.location.node_id != undefined && i.location.node_id > 0) { - s.add(i.location.node_id); + this.blockSelection.clear(); + this.sourcePositionSelection.clear(); + return this.selection.detachSelection(); + } + + lineString(node) { + return `${node.id}: ${node.label}(${node.inputs.join(", ")})` + } + + searchInputAction(view, searchBar) { + d3.event.stopPropagation(); + this.selectionHandler.clear(); + const query = searchBar.value; + if (query.length == 0) return; + const select = []; + window.sessionStorage.setItem("lastSearch", query); + const reg = new RegExp(query); + for (const node of this.schedule.nodes) { + if (node === undefined) continue; + if (reg.exec(this.lineString(node)) != null) { + select.push(node.id) } - }; - return s; + } + this.selectionHandler.select(select, true); } } diff --git a/deps/v8/tools/turbolizer/selection-broker.js b/deps/v8/tools/turbolizer/selection-broker.js index 822cf1ce1f62fd..0ae006aa0151bd 100644 --- a/deps/v8/tools/turbolizer/selection-broker.js +++ b/deps/v8/tools/turbolizer/selection-broker.js @@ -2,98 +2,72 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -var SelectionBroker = function() { - this.brokers = []; - this.dispatching = false; - this.lastDispatchingHandler = null; - this.nodePositionMap = []; - this.sortedPositionList = []; - this.positionNodeMap = []; -}; +class SelectionBroker { + constructor(sourceResolver) { + this.sourcePositionHandlers = []; + this.nodeHandlers = []; + this.blockHandlers = []; + this.sourceResolver = sourceResolver; + }; -SelectionBroker.prototype.addSelectionHandler = function(handler) { - this.brokers.push(handler); -} + addSourcePositionHandler(handler) { + this.sourcePositionHandlers.push(handler); + } -SelectionBroker.prototype.setNodePositionMap = function(map) { - let broker = this; - if (!map) return; - broker.nodePositionMap = map; - broker.positionNodeMap = []; - broker.sortedPositionList = []; - let next = 0; - for (let i in broker.nodePositionMap) { - broker.sortedPositionList[next] = Number(broker.nodePositionMap[i]); - broker.positionNodeMap[next++] = i; + addNodeHandler(handler) { + this.nodeHandlers.push(handler); } - broker.sortedPositionList = sortUnique(broker.sortedPositionList, - function(a,b) { return a - b; }); - this.positionNodeMap.sort(function(a,b) { - let result = broker.nodePositionMap[a] - broker.nodePositionMap[b]; - if (result != 0) return result; - return a - b; - }); -} -SelectionBroker.prototype.select = function(from, locations, selected) { - let broker = this; - if (!broker.dispatching) { - broker.lastDispatchingHandler = from; - try { - broker.dispatching = true; - let enrichLocations = function(locations) { - result = []; - for (let location of locations) { - let newLocation = {}; - if (location.pos_start != undefined) { - newLocation.pos_start = location.pos_start; - } - if (location.pos_end != undefined) { - newLocation.pos_end = location.pos_end; - } - if (location.node_id != undefined) { - newLocation.node_id = location.node_id; - } - if (location.block_id != undefined) { - newLocation.block_id = location.block_id; - } - if (newLocation.pos_start == undefined && - newLocation.pos_end == undefined && - newLocation.node_id != undefined) { - if (broker.nodePositionMap && broker.nodePositionMap[location.node_id]) { - newLocation.pos_start = broker.nodePositionMap[location.node_id]; - newLocation.pos_end = location.pos_start + 1; - } - } - result.push(newLocation); - } - return result; - } - locations = enrichLocations(locations); - for (var b of this.brokers) { - if (b != from) { - b.brokeredSelect(locations, selected); - } + addBlockHandler(handler) { + this.blockHandlers.push(handler); + } + + broadcastSourcePositionSelect(from, sourcePositions, selected) { + let broker = this; + sourcePositions = sourcePositions.filter((l) => { + if (typeof l.scriptOffset == 'undefined' + || typeof l.inliningId == 'undefined') { + console.log("Warning: invalid source position"); + return false; } + return true; + }); + for (var b of this.sourcePositionHandlers) { + if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected); } - finally { - broker.dispatching = false; + const nodes = this.sourceResolver.sourcePositionsToNodeIds(sourcePositions); + for (var b of this.nodeHandlers) { + if (b != from) b.brokeredNodeSelect(nodes, selected); } } -} -SelectionBroker.prototype.clear = function(from) { - this.lastDispatchingHandler = null; - if (!this.dispatching) { - try { - this.dispatching = true; - this.brokers.forEach(function(b) { - if (b != from) { - b.brokeredClear(); - } - }); - } finally { - this.dispatching = false; + broadcastNodeSelect(from, nodes, selected) { + let broker = this; + for (var b of this.nodeHandlers) { + if (b != from) b.brokeredNodeSelect(nodes, selected); } + const sourcePositions = this.sourceResolver.nodeIdsToSourcePositions(nodes); + for (var b of this.sourcePositionHandlers) { + if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected); + } + } + + broadcastBlockSelect(from, blocks, selected) { + let broker = this; + for (var b of this.blockHandlers) { + if (b != from) b.brokeredBlockSelect(blocks, selected); + } + } + + broadcastClear(from) { + this.sourcePositionHandlers.forEach(function (b) { + if (b != from) b.brokeredClear(); + }); + this.nodeHandlers.forEach(function (b) { + if (b != from) b.brokeredClear(); + }); + this.blockHandlers.forEach(function (b) { + if (b != from) b.brokeredClear(); + }); } } diff --git a/deps/v8/tools/turbolizer/selection.js b/deps/v8/tools/turbolizer/selection.js index 26f1bde1972606..9bd937c84a5916 100644 --- a/deps/v8/tools/turbolizer/selection.js +++ b/deps/v8/tools/turbolizer/selection.js @@ -2,107 +2,59 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -var Selection = function(handler) { - this.handler = handler; - this.selectionBase = null; - this.lastSelection = null; - this.selection = new Set(); -} - - -Selection.prototype.isEmpty = function() { - return this.selection.size == 0; -} - - -Selection.prototype.clear = function() { - var handler = this.handler; - this.selectionBase = null; - this.lastSelection = null; - handler.select(this.selection, false); - handler.clear(); - this.selection = new Set(); -} +class Selection { + constructor(stringKeyFnc) { + this.selection = new Map(); + this.stringKey = stringKeyFnc; + } + isEmpty() { + return this.selection.size == 0; + } -count = 0; + clear() { + this.selection = new Map(); + } -Selection.prototype.select = function(s, isSelected) { - var handler = this.handler; - if (!(Symbol.iterator in Object(s))) { s = [s]; } - if (isSelected) { - let first = true; - for (let i of s) { - if (first) { - this.selectionBase = i; - this.lastSelection = i; - first = false; + select(s, isSelected) { + if (!isIterable(s)) { s = [s]; } + for (const i of s) { + if (!i) continue; + if (isSelected == undefined) { + isSelected = !this.selection.has(this.stringKey(i)); } - this.selection.add(i); - } - handler.select(this.selection, true); - } else { - let unselectSet = new Set(); - for (let i of s) { - if (this.selection.has(i)) { - unselectSet.add(i); - this.selection.delete(i); + if (isSelected) { + this.selection.set(this.stringKey(i), i); + } else { + this.selection.delete(this.stringKey(i)); } } - handler.select(unselectSet, false); } -} + isSelected(i) { + return this.selection.has(this.stringKey(i)); + } -Selection.prototype.extendTo = function(pos) { - if (pos == this.lastSelection || this.lastSelection === null) return; + isKeySelected(key) { + return this.selection.has(key); + } - var handler = this.handler; - var pos_diff = handler.selectionDifference(pos, true, this.lastSelection, false); - var unselect_diff = []; - if (pos_diff.length == 0) { - pos_diff = handler.selectionDifference(this.selectionBase, false, pos, true); - if (pos_diff.length != 0) { - unselect_diff = handler.selectionDifference(this.lastSelection, true, this.selectionBase, false); - this.selection = new Set(); - this.selection.add(this.selectionBase); - for (var d of pos_diff) { - this.selection.add(d); - } - } else { - unselect_diff = handler.selectionDifference(this.lastSelection, true, pos, false); - for (var d of unselect_diff) { - this.selection.delete(d); - } - } - } else { - unselect_diff = handler.selectionDifference(this.selectionBase, false, this.lastSelection, true); - if (unselect_diff != 0) { - pos_diff = handler.selectionDifference(pos, true, this.selectionBase, false); - if (pos_diff.length == 0) { - unselect_diff = handler.selectionDifference(pos, false, this.lastSelection, true); - } - for (var d of unselect_diff) { - this.selection.delete(d); - } - } - if (pos_diff.length != 0) { - for (var d of pos_diff) { - this.selection.add(d); - } + selectedKeys() { + var result = new Set(); + for (var i of this.selection.keys()) { + result.add(i); } + return result; } - handler.select(unselect_diff, false); - handler.select(pos_diff, true); - this.lastSelection = pos; -} - -Selection.prototype.detachSelection = function() { - var result = new Set(); - for (var i of this.selection) { - result.add(i); + detachSelection() { + var result = new Set(); + for (var i of this.selection.keys()) { + result.add(i); + } + this.clear(); + return result; } - this.clear(); - return result; + + [Symbol.iterator]() { return this.selection.values() } } diff --git a/deps/v8/tools/turbolizer/source-resolver.js b/deps/v8/tools/turbolizer/source-resolver.js new file mode 100644 index 00000000000000..dd3732ad569524 --- /dev/null +++ b/deps/v8/tools/turbolizer/source-resolver.js @@ -0,0 +1,326 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +function sourcePositionLe(a, b) { + if (a.inliningId == b.inliningId) { + return a.scriptOffset - b.scriptOffset; + } + return a.inliningId - b.inliningId; +} + +function sourcePositionEq(a, b) { + return a.inliningId == b.inliningId && + a.scriptOffset == b.scriptOffset; +} + +function sourcePositionToStringKey(sourcePosition) { + if (!sourcePosition) return "undefined"; + return "" + sourcePosition.inliningId + ":" + sourcePosition.scriptOffset; +} + +class SourceResolver { + constructor() { + // Maps node ids to source positions. + this.nodePositionMap = []; + // Maps source ids to source objects. + this.sources = []; + // Maps inlining ids to inlining objects. + this.inlinings = []; + // Maps source position keys to inlinings. + this.inliningsMap = new Map(); + // Maps source position keys to node ids. + this.positionToNodes = new Map(); + // Maps phase ids to phases. + this.phases = []; + // Maps phase names to phaseIds. + this.phaseNames = new Map(); + // The disassembly phase is stored separately. + this.disassemblyPhase = undefined; + } + + setSources(sources, mainBackup) { + if (sources) { + for (let [sourceId, source] of Object.entries(sources)) { + this.sources[sourceId] = source; + this.sources[sourceId].sourcePositions = []; + } + } + // This is a fallback if the JSON is incomplete (e.g. due to compiler crash). + if (!this.sources[-1]) { + this.sources[-1] = mainBackup; + this.sources[-1].sourcePositions = []; + } + } + + setInlinings(inlinings) { + if (inlinings) { + for (const [inliningId, inlining] of Object.entries(inlinings)) { + this.inlinings[inliningId] = inlining; + this.inliningsMap.set(sourcePositionToStringKey(inlining.inliningPosition), inlining); + } + } + // This is a default entry for the script itself that helps + // keep other code more uniform. + this.inlinings[-1] = { sourceId: -1 }; + } + + setNodePositionMap(map) { + if (!map) return; + if (typeof map[0] != 'object') { + const alternativeMap = {}; + for (const [nodeId, scriptOffset] of Object.entries(map)) { + alternativeMap[nodeId] = { scriptOffset: scriptOffset, inliningId: -1 }; + } + map = alternativeMap; + }; + + for (const [nodeId, sourcePosition] of Object.entries(map)) { + if (sourcePosition == undefined) { + console.log("Warning: undefined source position ", sourcePosition, " for nodeId ", nodeId); + } + const inliningId = sourcePosition.inliningId; + const inlining = this.inlinings[inliningId]; + if (inlining) { + const sourceId = inlining.sourceId; + this.sources[sourceId].sourcePositions.push(sourcePosition); + } + this.nodePositionMap[nodeId] = sourcePosition; + let key = sourcePositionToStringKey(sourcePosition); + if (!this.positionToNodes.has(key)) { + this.positionToNodes.set(key, []); + } + this.positionToNodes.get(key).push(nodeId); + } + for (const [sourceId, source] of Object.entries(this.sources)) { + source.sourcePositions = sortUnique(source.sourcePositions, + sourcePositionLe, sourcePositionEq); + } + } + + sourcePositionsToNodeIds(sourcePositions) { + const nodeIds = new Set(); + for (const sp of sourcePositions) { + let key = sourcePositionToStringKey(sp); + let nodeIdsForPosition = this.positionToNodes.get(key); + if (!nodeIdsForPosition) continue; + for (const nodeId of nodeIdsForPosition) { + nodeIds.add(nodeId); + } + } + return nodeIds; + } + + nodeIdsToSourcePositions(nodeIds) { + const sourcePositions = new Map(); + for (const nodeId of nodeIds) { + let sp = this.nodePositionMap[nodeId]; + let key = sourcePositionToStringKey(sp); + sourcePositions.set(key, sp); + } + const sourcePositionArray = []; + for (const sp of sourcePositions.values()) { + sourcePositionArray.push(sp); + } + return sourcePositionArray; + } + + forEachSource(f) { + this.sources.forEach(f); + } + + translateToSourceId(sourceId, location) { + for (const position of this.getInlineStack(location)) { + let inlining = this.inlinings[position.inliningId]; + if (!inlining) continue; + if (inlining.sourceId == sourceId) { + return position; + } + } + return location; + } + + addInliningPositions(sourcePosition, locations) { + let inlining = this.inliningsMap.get(sourcePositionToStringKey(sourcePosition)); + if (!inlining) return; + let sourceId = inlining.sourceId + const source = this.sources[sourceId]; + for (const sp of source.sourcePositions) { + locations.push(sp); + this.addInliningPositions(sp, locations); + } + } + + getInliningForPosition(sourcePosition) { + return this.inliningsMap.get(sourcePositionToStringKey(sourcePosition)); + } + + getSource(sourceId) { + return this.sources[sourceId]; + } + + getSourceName(sourceId) { + const source = this.sources[sourceId]; + return `${source.sourceName}:${source.functionName}`; + } + + sourcePositionFor(sourceId, scriptOffset) { + if (!this.sources[sourceId]) { + return null; + } + const list = this.sources[sourceId].sourcePositions; + for (let i = 0; i < list.length; i++) { + const sourcePosition = list[i] + const position = sourcePosition.scriptOffset; + const nextPosition = list[Math.min(i + 1, list.length - 1)].scriptOffset; + if ((position <= scriptOffset && scriptOffset < nextPosition)) { + return sourcePosition; + } + } + return null; + } + + sourcePositionsInRange(sourceId, start, end) { + if (!this.sources[sourceId]) return []; + const res = []; + const list = this.sources[sourceId].sourcePositions; + for (let i = 0; i < list.length; i++) { + const sourcePosition = list[i] + if (start <= sourcePosition.scriptOffset && sourcePosition.scriptOffset < end) { + res.push(sourcePosition); + } + } + return res; + } + + getInlineStack(sourcePosition) { + if (!sourcePosition) { + return []; + } + let inliningStack = []; + let cur = sourcePosition; + while (cur && cur.inliningId != -1) { + inliningStack.push(cur); + let inlining = this.inlinings[cur.inliningId]; + if (!inlining) { + break; + } + cur = inlining.inliningPosition; + } + if (cur && cur.inliningId == -1) { + inliningStack.push(cur); + } + return inliningStack; + } + + parsePhases(phases) { + for (const [phaseId, phase] of Object.entries(phases)) { + if (phase.type == 'disassembly') { + this.disassemblyPhase = phase; + } else if (phase.type == 'schedule') { + this.phases.push(this.parseSchedule(phase)) + this.phaseNames.set(phase.name, this.phases.length); + } else { + this.phases.push(phase); + this.phaseNames.set(phase.name, this.phases.length); + } + } + } + + repairPhaseId(anyPhaseId) { + return Math.max(0, Math.min(anyPhaseId, this.phases.length - 1)) + } + + getPhase(phaseId) { + return this.phases[phaseId]; + } + + getPhaseIdByName(phaseName) { + return this.phaseNames.get(phaseName); + } + + forEachPhase(f) { + this.phases.forEach(f); + } + + parseSchedule(phase) { + function createNode(state, match) { + let inputs = []; + if (match.groups.args) { + const nodeIdsString = match.groups.args.replace(/\s/g, ''); + const nodeIdStrings = nodeIdsString.split(','); + inputs = nodeIdStrings.map((n) => Number.parseInt(n, 10)); + } + const node = {id: Number.parseInt(match.groups.id, 10), + label: match.groups.label, + inputs: inputs}; + if (match.groups.blocks) { + const nodeIdsString = match.groups.blocks.replace(/\s/g, '').replace(/B/g,''); + const nodeIdStrings = nodeIdsString.split(','); + const successors = nodeIdStrings.map((n) => Number.parseInt(n, 10)); + state.currentBlock.succ = successors; + } + state.nodes[node.id] = node; + state.currentBlock.nodes.push(node); + } + function createBlock(state, match) { + let predecessors = []; + if (match.groups.in) { + const blockIdsString = match.groups.in.replace(/\s/g, '').replace(/B/g, ''); + const blockIdStrings = blockIdsString.split(','); + predecessors = blockIdStrings.map((n) => Number.parseInt(n, 10)); + } + const block = {id: Number.parseInt(match.groups.id, 10), + isDeferred: match.groups.deferred != undefined, + pred: predecessors.sort(), + succ: [], + nodes: []}; + state.blocks[block.id] = block; + state.currentBlock = block; + } + function setGotoSuccessor(state, match) { + state.currentBlock.succ = [Number.parseInt(match.groups.successor.replace(/\s/g, ''), 10)]; + } + const rules = [ + { + lineRegexps: + [ /^\s*(?\d+):\ (?