diff --git a/common.gypi b/common.gypi index 0ce5c5226d9571..42a76de6ca3562 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.9', + 'v8_embedder_string': '-node.4', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index f1bd2f33fe771a..34011600a05cd9 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -67,6 +67,7 @@ /third_party/jsoncpp/source !/third_party/colorama /third_party/colorama/src +!/third_party/glibc !/third_party/googletest /third_party/googletest/src/* !/third_party/googletest/src/googletest diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 9696fc61d54d8f..7a3bf83ea4dd39 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -44,6 +44,7 @@ CodeWeavers, Inc. <*@codeweavers.com> Alibaba, Inc. <*@alibaba-inc.com> SiFive, Inc. <*@sifive.com> +Aapo Alasuutari Aaron Bieber Aaron O'Mullan Abdulla Kamar @@ -285,3 +286,4 @@ Zheng Liu Zhongping Wang 柳荣一 Yang Xiang +Kotaro Ohsugi diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 3be7da8f3bb11f..9730021294563b 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -132,6 +132,8 @@ v8_flag(name = "v8_enable_snapshot_code_comments") v8_flag(name = "v8_enable_snapshot_native_code_counters") +v8_flag(name = "v8_enable_static_roots") + v8_flag(name = "v8_enable_trace_maps") v8_flag(name = "v8_enable_v8_checks") @@ -316,6 +318,7 @@ v8_config( "v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS", "v8_enable_runtime_call_stats": "V8_RUNTIME_CALL_STATS", "v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS", + "v8_enable_static_roots": "V8_STATIC_ROOTS", "v8_enable_trace_maps": "V8_TRACE_MAPS", "v8_enable_v8_checks": "V8_ENABLE_CHECKS", "v8_enable_verify_csa": "ENABLE_VERIFY_CSA", @@ -325,7 +328,6 @@ v8_config( }, defines = [ "GOOGLE3", - "ENABLE_DEBUGGER_SUPPORT", "V8_ADVANCED_BIGINT_ALGORITHMS", "V8_CONCURRENT_MARKING", ] + select({ @@ -963,6 +965,7 @@ filegroup( "src/objects/templates.tq", "src/objects/torque-defined-classes.tq", "src/objects/turbofan-types.tq", + "src/objects/turboshaft-types.tq", "test/torque/test-torque.tq", "third_party/v8/builtins/array-sort.tq", ] + select({ @@ -1245,6 +1248,7 @@ filegroup( "src/common/message-template.h", "src/common/operation.h", "src/common/ptr-compr-inl.h", + "src/common/ptr-compr.cc", "src/common/ptr-compr.h", "src/compiler-dispatcher/lazy-compile-dispatcher.cc", "src/compiler-dispatcher/lazy-compile-dispatcher.h", @@ -1429,6 +1433,8 @@ filegroup( "src/heap/cppgc-js/cpp-marking-state-inl.h", "src/heap/cppgc-js/cpp-snapshot.cc", "src/heap/cppgc-js/cpp-snapshot.h", + "src/heap/cppgc-js/cross-heap-remembered-set.cc", + "src/heap/cppgc-js/cross-heap-remembered-set.h", "src/heap/cppgc-js/unified-heap-marking-state.cc", "src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-state-inl.h", @@ -1459,8 +1465,6 @@ filegroup( "src/heap/gc-tracer.cc", "src/heap/gc-tracer-inl.h", "src/heap/gc-tracer.h", - "src/heap/global-handle-marking-visitor.cc", - "src/heap/global-handle-marking-visitor.h", "src/heap/heap-allocator-inl.h", "src/heap/heap-allocator.cc", "src/heap/heap-allocator.h", @@ -1567,6 +1571,8 @@ filegroup( "src/heap/stress-scavenge-observer.h", "src/heap/sweeper.cc", "src/heap/sweeper.h", + "src/heap/traced-handles-marking-visitor.cc", + "src/heap/traced-handles-marking-visitor.h", "src/heap/weak-object-worklists.cc", "src/heap/weak-object-worklists.h", "src/ic/call-optimization.cc", @@ -1915,6 +1921,8 @@ filegroup( "src/objects/transitions.h", "src/objects/turbofan-types-inl.h", "src/objects/turbofan-types.h", + "src/objects/turboshaft-types-inl.h", + "src/objects/turboshaft-types.h", "src/objects/type-hints.cc", "src/objects/type-hints.h", "src/objects/value-serializer.cc", @@ -2031,6 +2039,7 @@ filegroup( "src/roots/roots-inl.h", "src/roots/roots.cc", "src/roots/roots.h", + "src/roots/static-roots.h", "src/runtime/runtime-array.cc", "src/runtime/runtime-atomics.cc", "src/runtime/runtime-bigint.cc", @@ -2882,7 +2891,10 @@ filegroup( "src/compiler/state-values-utils.h", "src/compiler/store-store-elimination.cc", "src/compiler/store-store-elimination.h", + "src/compiler/turboshaft/assembler.cc", "src/compiler/turboshaft/assembler.h", + "src/compiler/turboshaft/assert-types-reducer.h", + "src/compiler/turboshaft/branch-elimination-reducer.h", "src/compiler/turboshaft/decompression-optimization.cc", "src/compiler/turboshaft/decompression-optimization.h", "src/compiler/turboshaft/deopt-data.h", @@ -2891,9 +2903,15 @@ filegroup( "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph.cc", "src/compiler/turboshaft/graph.h", + "src/compiler/turboshaft/index.h", "src/compiler/turboshaft/graph-visualizer.cc", "src/compiler/turboshaft/graph-visualizer.h", + "src/compiler/turboshaft/late-escape-analysis-reducer.h", + "src/compiler/turboshaft/late-escape-analysis-reducer.cc", + "src/compiler/turboshaft/layered-hash-map.h", "src/compiler/turboshaft/machine-optimization-reducer.h", + "src/compiler/turboshaft/memory-optimization.cc", + "src/compiler/turboshaft/memory-optimization.h", "src/compiler/turboshaft/operations.cc", "src/compiler/turboshaft/operations.h", "src/compiler/turboshaft/operation-matching.h", @@ -2908,9 +2926,13 @@ filegroup( "src/compiler/turboshaft/simplify-tf-loops.cc", "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/snapshot-table.h", + "src/compiler/turboshaft/type-inference-reducer.h", + "src/compiler/turboshaft/types.cc", + "src/compiler/turboshaft/types.h", "src/compiler/turboshaft/utils.cc", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", + "src/compiler/turboshaft/variable-reducer.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.cc", @@ -2990,6 +3012,7 @@ filegroup( "src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-number-gen.cc", "src/builtins/builtins-object-gen.cc", + "src/builtins/builtins-object-gen.h", "src/builtins/builtins-promise-gen.cc", "src/builtins/builtins-promise-gen.h", "src/builtins/builtins-proxy-gen.cc", @@ -3215,6 +3238,8 @@ filegroup( "src/snapshot/embedded/platform-embedded-file-writer-mac.h", "src/snapshot/embedded/platform-embedded-file-writer-win.cc", "src/snapshot/embedded/platform-embedded-file-writer-win.h", + "src/snapshot/static-roots-gen.cc", + "src/snapshot/static-roots-gen.h", "src/snapshot/mksnapshot.cc", "src/snapshot/snapshot-empty.cc", ], @@ -3249,6 +3274,8 @@ filegroup( "src/inspector/v8-debugger.h", "src/inspector/v8-debugger-agent-impl.cc", "src/inspector/v8-debugger-agent-impl.h", + "src/inspector/v8-debugger-barrier.cc", + "src/inspector/v8-debugger-barrier.h", "src/inspector/v8-debugger-id.cc", "src/inspector/v8-debugger-id.h", "src/inspector/v8-debugger-script.cc", @@ -3549,12 +3576,14 @@ v8_mksnapshot( cc_library( name = "icu/generated_torque_headers", hdrs = [":icu/generated_torque_files"], + copts = ["-Wno-implicit-fallthrough"], strip_include_prefix = "icu", ) cc_library( name = "noicu/generated_torque_headers", hdrs = [":noicu/generated_torque_files"], + copts = ["-Wno-implicit-fallthrough"], strip_include_prefix = "noicu", ) @@ -3564,6 +3593,7 @@ v8_library( ":v8_libbase_files", ":v8_shared_internal_headers", ], + copts = ["-Wno-implicit-fallthrough"], ) cc_library( @@ -3572,6 +3602,7 @@ cc_library( "src/torque/kythe-data.h", "src/torque/torque-compiler.h", ], + copts = ["-Wno-implicit-fallthrough"], include_prefix = "third_party/v8", includes = ["."], ) @@ -3581,7 +3612,7 @@ cc_library( srcs = [ ":torque_base_files", ], - copts = select({ + copts = ["-Wno-implicit-fallthrough"] + select({ "@v8//bazel/config:is_posix": ["-fexceptions"], "//conditions:default": [], }), @@ -3598,6 +3629,7 @@ v8_library( ":v8_base_without_compiler_files", ":v8_common_libshared_files", ], + copts = ["-Wno-implicit-fallthrough"], icu_deps = [ ":icu/generated_torque_headers", "//external:icu", @@ -3623,6 +3655,7 @@ v8_library( name = "v8", srcs = [":v8_inspector_files"], hdrs = [":public_header_files"], + copts = ["-Wno-implicit-fallthrough"], icu_deps = [":icu/v8_libshared"], icu_srcs = [":icu/snapshot_files"], noicu_deps = [":noicu/v8_libshared"], @@ -3635,6 +3668,7 @@ v8_library( name = "wee8", srcs = [":wee8_files"], hdrs = [":public_wasm_c_api_header_files"], + copts = ["-Wno-implicit-fallthrough"], strip_include_prefix = "third_party", visibility = ["//visibility:public"], deps = [":noicu/v8"], @@ -3664,6 +3698,7 @@ v8_binary( "src/interpreter/bytecodes.cc", "src/interpreter/bytecodes.h", ], + copts = ["-Wno-implicit-fallthrough"], deps = ["v8_libbase"], ) @@ -3675,6 +3710,7 @@ v8_binary( ":v8_libbase_files", ":v8_shared_internal_headers", ], + copts = ["-Wno-implicit-fallthrough"], defines = [ "V8_INTL_SUPPORT", "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC", @@ -3693,7 +3729,7 @@ v8_binary( "src/torque/torque.cc", ":torque_base_files", ], - copts = select({ + copts = ["-Wno-implicit-fallthrough"] + select({ "@v8//bazel/config:is_posix": ["-fexceptions"], "//conditions:default": [], }), @@ -3708,6 +3744,7 @@ v8_binary( v8_binary( name = "mksnapshot", srcs = [":mksnapshot_files"], + copts = ["-Wno-implicit-fallthrough"], icu_deps = [":icu/v8_libshared"], linkopts = select({ "@v8//bazel/config:is_android": ["-llog"], @@ -3719,6 +3756,7 @@ v8_binary( v8_binary( name = "d8", srcs = [":d8_files"], + copts = ["-Wno-implicit-fallthrough"], icu_deps = [":icu/v8"], noicu_deps = [":noicu/v8"], ) diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 78fd8cd2d38790..c9fec31475d6c7 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -120,6 +120,9 @@ declare_args() { # as per the --native-code-counters flag. v8_enable_snapshot_native_code_counters = "" + # Use pre-generated static root pointer values from static-roots.h. + v8_enable_static_roots = false + # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false @@ -310,9 +313,6 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" - # Enable all available sandbox features. Implies v8_enable_sandbox. - v8_enable_sandbox_future = false - # Expose the memory corruption API to JavaScript. Useful for testing the sandbox. # WARNING This will expose builtins that (by design) cause memory corruption. # Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API @@ -376,6 +376,9 @@ declare_args() { # Compile V8 using zlib as dependency. # Sets -DV8_USE_ZLIB v8_use_zlib = true + + # Make ValueDeserializer crash if the data to deserialize is invalid. + v8_value_deserializer_hard_fail = false } # Derived defaults. @@ -463,13 +466,26 @@ if (v8_enable_maglev == "") { } if (v8_builtins_profiling_log_file == "default") { v8_builtins_profiling_log_file = "" - if (is_debug == false) { - if (v8_current_cpu == "x64") { + + # Don't use existing profile when + # * generating a new one (i.e. v8_enable_builtins_profiling), + # * is_debug or dcheck_always_on because they add more checks to the + # builtins control flow which we don't want to generate, + # * v8_enable_webassembly because it changes the set of opcodes which affects + # graphs hashes, + # * !is_clang because it might affect argument evaluation order, which + # makes node IDs not predictable for subgraphs like Op1(Op2(), Op3()) and + # as a result different graph hash. + if (!v8_enable_builtins_profiling && is_clang && !is_debug && + !dcheck_always_on && v8_enable_webassembly) { + if ((v8_current_cpu == "x64" || v8_current_cpu == "arm64") && + v8_enable_pointer_compression && v8_enable_external_code_space) { + # Note, currently x64 profile can be applied to arm64 but not the other + # way round. v8_builtins_profiling_log_file = "tools/builtins-pgo/x64.profile" - } else if (v8_current_cpu == "arm64") { - v8_builtins_profiling_log_file = "tools/builtins-pgo/arm64.profile" - } else if (v8_current_cpu == "arm") { - v8_builtins_profiling_log_file = "tools/builtins-pgo/arm.profile" + } else if (v8_current_cpu == "x86" || v8_current_cpu == "arm") { + # Note, x86 profile can be applied to arm but not the other way round. + v8_builtins_profiling_log_file = "tools/builtins-pgo/x86.profile" } } } @@ -522,11 +538,22 @@ if (v8_enable_sandbox == "") { v8_enable_external_code_space && target_os != "fuchsia" } -# Enable all available sandbox features if sandbox future is enabled. -if (v8_enable_sandbox_future) { - v8_enable_sandbox = true +if (v8_enable_static_roots == "") { + # Static roots are only valid for builds with pointer compression and a + # shared ro heap. Also, non-wasm and non-i18n builds have fewer read-only + # roots. + v8_enable_static_roots = + v8_enable_pointer_compression && v8_enable_shared_ro_heap && + v8_enable_pointer_compression_shared_cage && v8_enable_webassembly && + v8_enable_i18n_support } +assert(!v8_enable_static_roots || + (v8_enable_pointer_compression && v8_enable_shared_ro_heap && + v8_enable_pointer_compression_shared_cage && + v8_enable_webassembly && v8_enable_i18n_support), + "Trying to enable static roots in a configuration that is not supported") + assert(!v8_disable_write_barriers || v8_enable_single_generation, "Disabling write barriers works only with single generation") @@ -1052,9 +1079,18 @@ config("features") { if (v8_enable_pointer_compression_8gb) { defines += [ "V8_COMPRESS_POINTERS_8GB" ] } + if (v8_enable_static_roots) { + defines += [ "V8_STATIC_ROOTS" ] + } if (v8_use_zlib) { defines += [ "V8_USE_ZLIB" ] } + if (v8_use_libm_trig_functions) { + defines += [ "V8_USE_LIBM_TRIG_FUNCTIONS" ] + } + if (v8_value_deserializer_hard_fail) { + defines += [ "V8_VALUE_DESERIALIZER_HARD_FAIL" ] + } } config("toolchain") { @@ -1571,93 +1607,95 @@ if (is_android && enable_java_templates) { } } -action("postmortem-metadata") { - # Only targets in this file and the top-level visibility target can - # depend on this. - visibility = [ - ":*", - "//:gn_visibility", - ] - - script = "tools/gen-postmortem-metadata.py" - - # NOSORT - sources = [ - "$target_gen_dir/torque-generated/instance-types.h", - "src/objects/allocation-site.h", - "src/objects/allocation-site-inl.h", - "src/objects/cell.h", - "src/objects/cell-inl.h", - "src/objects/code.h", - "src/objects/code-inl.h", - "src/objects/data-handler.h", - "src/objects/data-handler-inl.h", - "src/objects/descriptor-array.h", - "src/objects/descriptor-array-inl.h", - "src/objects/feedback-cell.h", - "src/objects/feedback-cell-inl.h", - "src/objects/fixed-array.h", - "src/objects/fixed-array-inl.h", - "src/objects/heap-number.h", - "src/objects/heap-number-inl.h", - "src/objects/heap-object.h", - "src/objects/heap-object-inl.h", - "src/objects/instance-type.h", - "src/objects/js-array-buffer.h", - "src/objects/js-array-buffer-inl.h", - "src/objects/js-array.h", - "src/objects/js-array-inl.h", - "src/objects/js-function-inl.h", - "src/objects/js-function.cc", - "src/objects/js-function.h", - "src/objects/js-objects.cc", - "src/objects/js-objects.h", - "src/objects/js-objects-inl.h", - "src/objects/js-promise.h", - "src/objects/js-promise-inl.h", - "src/objects/js-raw-json.cc", - "src/objects/js-raw-json.h", - "src/objects/js-raw-json-inl.h", - "src/objects/js-regexp.cc", - "src/objects/js-regexp.h", - "src/objects/js-regexp-inl.h", - "src/objects/js-regexp-string-iterator.h", - "src/objects/js-regexp-string-iterator-inl.h", - "src/objects/map.cc", - "src/objects/map.h", - "src/objects/map-inl.h", - "src/objects/megadom-handler.h", - "src/objects/megadom-handler-inl.h", - "src/objects/name.h", - "src/objects/name-inl.h", - "src/objects/objects.h", - "src/objects/objects-inl.h", - "src/objects/oddball.h", - "src/objects/oddball-inl.h", - "src/objects/primitive-heap-object.h", - "src/objects/primitive-heap-object-inl.h", - "src/objects/scope-info.h", - "src/objects/scope-info-inl.h", - "src/objects/script.h", - "src/objects/script-inl.h", - "src/objects/shared-function-info.cc", - "src/objects/shared-function-info.h", - "src/objects/shared-function-info-inl.h", - "src/objects/string.cc", - "src/objects/string-comparator.cc", - "src/objects/string-comparator.h", - "src/objects/string.h", - "src/objects/string-inl.h", - "src/objects/struct.h", - "src/objects/struct-inl.h", - ] - - outputs = [ "$target_gen_dir/debug-support.cc" ] +if (v8_postmortem_support) { + action("postmortem-metadata") { + # Only targets in this file and the top-level visibility target can + # depend on this. + visibility = [ + ":*", + "//:gn_visibility", + ] - args = rebase_path(outputs, root_build_dir) + - rebase_path(sources, root_build_dir) + script = "tools/gen-postmortem-metadata.py" - deps = [ ":run_torque" ] + # NOSORT + sources = [ + "$target_gen_dir/torque-generated/instance-types.h", + "src/objects/allocation-site.h", + "src/objects/allocation-site-inl.h", + "src/objects/cell.h", + "src/objects/cell-inl.h", + "src/objects/code.h", + "src/objects/code-inl.h", + "src/objects/data-handler.h", + "src/objects/data-handler-inl.h", + "src/objects/descriptor-array.h", + "src/objects/descriptor-array-inl.h", + "src/objects/feedback-cell.h", + "src/objects/feedback-cell-inl.h", + "src/objects/fixed-array.h", + "src/objects/fixed-array-inl.h", + "src/objects/heap-number.h", + "src/objects/heap-number-inl.h", + "src/objects/heap-object.h", + "src/objects/heap-object-inl.h", + "src/objects/instance-type.h", + "src/objects/js-array-buffer.h", + "src/objects/js-array-buffer-inl.h", + "src/objects/js-array.h", + "src/objects/js-array-inl.h", + "src/objects/js-function-inl.h", + "src/objects/js-function.cc", + "src/objects/js-function.h", + "src/objects/js-objects.cc", + "src/objects/js-objects.h", + "src/objects/js-objects-inl.h", + "src/objects/js-promise.h", + "src/objects/js-promise-inl.h", + "src/objects/js-raw-json.cc", + "src/objects/js-raw-json.h", + "src/objects/js-raw-json-inl.h", + "src/objects/js-regexp.cc", + "src/objects/js-regexp.h", + "src/objects/js-regexp-inl.h", + "src/objects/js-regexp-string-iterator.h", + "src/objects/js-regexp-string-iterator-inl.h", + "src/objects/map.cc", + "src/objects/map.h", + "src/objects/map-inl.h", + "src/objects/megadom-handler.h", + "src/objects/megadom-handler-inl.h", + "src/objects/name.h", + "src/objects/name-inl.h", + "src/objects/objects.h", + "src/objects/objects-inl.h", + "src/objects/oddball.h", + "src/objects/oddball-inl.h", + "src/objects/primitive-heap-object.h", + "src/objects/primitive-heap-object-inl.h", + "src/objects/scope-info.h", + "src/objects/scope-info-inl.h", + "src/objects/script.h", + "src/objects/script-inl.h", + "src/objects/shared-function-info.cc", + "src/objects/shared-function-info.h", + "src/objects/shared-function-info-inl.h", + "src/objects/string.cc", + "src/objects/string-comparator.cc", + "src/objects/string-comparator.h", + "src/objects/string.h", + "src/objects/string-inl.h", + "src/objects/struct.h", + "src/objects/struct-inl.h", + ] + + outputs = [ "$target_gen_dir/debug-support.cc" ] + + args = rebase_path(outputs, root_build_dir) + + rebase_path(sources, root_build_dir) + + deps = [ ":run_torque" ] + } } torque_files = [ @@ -1860,6 +1898,7 @@ torque_files = [ "src/objects/templates.tq", "src/objects/torque-defined-classes.tq", "src/objects/turbofan-types.tq", + "src/objects/turboshaft-types.tq", "test/torque/test-torque.tq", "third_party/v8/builtins/array-sort.tq", ] @@ -2159,6 +2198,12 @@ template("run_mksnapshot") { "--turbo-profiling-input", rebase_path(v8_builtins_profiling_log_file, root_build_dir), ] + + # Replace this with --warn-about-builtin-profile-data to see the full + # list of builtins with incompatible profiles. + # TODO(crbug.com/v8/13647): Do not fail for invalid profiles + # args += [ "--abort-on-bad-builtin-profile-data" ] + args += [ "--warn-about-builtin-profile-data" ] } # This is needed to distinguish between generating code for the simulator @@ -2291,6 +2336,7 @@ action("v8_dump_build_config") { "$v8_enable_pointer_compression_shared_cage", "v8_enable_sandbox=$v8_enable_sandbox", "v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap", + "v8_disable_write_barriers=$v8_disable_write_barriers", "v8_enable_third_party_heap=$v8_enable_third_party_heap", "v8_enable_webassembly=$v8_enable_webassembly", "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", @@ -2404,6 +2450,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-number-gen.cc", "src/builtins/builtins-object-gen.cc", + "src/builtins/builtins-object-gen.h", "src/builtins/builtins-promise-gen.cc", "src/builtins/builtins-promise-gen.h", "src/builtins/builtins-proxy-gen.cc", @@ -2454,57 +2501,57 @@ v8_source_set("v8_initializers") { if (v8_current_cpu == "x86") { sources += [ - ### gcmole(arch:ia32) ### + ### gcmole(ia32) ### "src/builtins/ia32/builtins-ia32.cc", ] } else if (v8_current_cpu == "x64") { sources += [ - ### gcmole(arch:x64) ### + ### gcmole(x64) ### "src/builtins/x64/builtins-x64.cc", ] } else if (v8_current_cpu == "arm") { sources += [ - ### gcmole(arch:arm) ### + ### gcmole(arm) ### "src/builtins/arm/builtins-arm.cc", ] } else if (v8_current_cpu == "arm64") { sources += [ - ### gcmole(arch:arm64) ### + ### gcmole(arm64) ### "src/builtins/arm64/builtins-arm64.cc", ] } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ - ### gcmole(arch:mips64el) ### + ### gcmole(mips64el) ### "src/builtins/mips64/builtins-mips64.cc", ] } else if (v8_current_cpu == "loong64") { sources += [ - ### gcmole(arch:loong64) ### + ### gcmole(loong64) ### "src/builtins/loong64/builtins-loong64.cc", ] } else if (v8_current_cpu == "ppc") { sources += [ - ### gcmole(arch:ppc) ### + ### gcmole(ppc) ### "src/builtins/ppc/builtins-ppc.cc", ] } else if (v8_current_cpu == "ppc64") { sources += [ - ### gcmole(arch:ppc64) ### + ### gcmole(ppc64) ### "src/builtins/ppc/builtins-ppc.cc", ] } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { sources += [ - ### gcmole(arch:s390) ### + ### gcmole(s390) ### "src/builtins/s390/builtins-s390.cc", ] } else if (v8_current_cpu == "riscv64") { sources += [ - ### gcmole(arch:riscv64) ### + ### gcmole(riscv64) ### "src/builtins/riscv/builtins-riscv.cc", ] } else if (v8_current_cpu == "riscv32") { sources += [ - ### gcmole(arch:riscv32) ### + ### gcmole(riscv32) ### "src/builtins/riscv/builtins-riscv.cc", ] } @@ -2929,13 +2976,19 @@ v8_header_set("v8_internal_headers") { "src/compiler/state-values-utils.h", "src/compiler/store-store-elimination.h", "src/compiler/turboshaft/assembler.h", + "src/compiler/turboshaft/assert-types-reducer.h", + "src/compiler/turboshaft/branch-elimination-reducer.h", "src/compiler/turboshaft/decompression-optimization.h", "src/compiler/turboshaft/deopt-data.h", "src/compiler/turboshaft/fast-hash.h", "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-visualizer.h", "src/compiler/turboshaft/graph.h", + "src/compiler/turboshaft/index.h", + "src/compiler/turboshaft/late-escape-analysis-reducer.h", + "src/compiler/turboshaft/layered-hash-map.h", "src/compiler/turboshaft/machine-optimization-reducer.h", + "src/compiler/turboshaft/memory-optimization.h", "src/compiler/turboshaft/operation-matching.h", "src/compiler/turboshaft/operations.h", "src/compiler/turboshaft/optimization-phase.h", @@ -2945,8 +2998,11 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/sidetable.h", "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/snapshot-table.h", + "src/compiler/turboshaft/type-inference-reducer.h", + "src/compiler/turboshaft/types.h", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", + "src/compiler/turboshaft/variable-reducer.h", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.h", "src/compiler/typed-optimization.h", @@ -3057,6 +3113,7 @@ v8_header_set("v8_internal_headers") { "src/heap/cppgc-js/cpp-marking-state-inl.h", "src/heap/cppgc-js/cpp-marking-state.h", "src/heap/cppgc-js/cpp-snapshot.h", + "src/heap/cppgc-js/cross-heap-remembered-set.h", "src/heap/cppgc-js/unified-heap-marking-state-inl.h", "src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-verifier.h", @@ -3078,7 +3135,6 @@ v8_header_set("v8_internal_headers") { "src/heap/gc-idle-time-handler.h", "src/heap/gc-tracer-inl.h", "src/heap/gc-tracer.h", - "src/heap/global-handle-marking-visitor.h", "src/heap/heap-allocator-inl.h", "src/heap/heap-allocator.h", "src/heap/heap-controller.h", @@ -3145,6 +3201,7 @@ v8_header_set("v8_internal_headers") { "src/heap/stress-marking-observer.h", "src/heap/stress-scavenge-observer.h", "src/heap/sweeper.h", + "src/heap/traced-handles-marking-visitor.h", "src/heap/weak-object-worklists.h", "src/ic/call-optimization.h", "src/ic/handler-configuration-inl.h", @@ -3406,6 +3463,8 @@ v8_header_set("v8_internal_headers") { "src/objects/transitions.h", "src/objects/turbofan-types-inl.h", "src/objects/turbofan-types.h", + "src/objects/turboshaft-types-inl.h", + "src/objects/turboshaft-types.h", "src/objects/type-hints.h", "src/objects/value-serializer.h", "src/objects/visitors-inl.h", @@ -3473,6 +3532,7 @@ v8_header_set("v8_internal_headers") { "src/regexp/special-case.h", "src/roots/roots-inl.h", "src/roots/roots.h", + "src/roots/static-roots.h", "src/runtime/runtime-utils.h", "src/runtime/runtime.h", "src/sandbox/bounded-size-inl.h", @@ -3574,7 +3634,6 @@ v8_header_set("v8_internal_headers") { if (v8_enable_maglev) { sources += [ - "src/maglev/maglev-assembler-inl.h", "src/maglev/maglev-assembler.h", "src/maglev/maglev-basic-block.h", "src/maglev/maglev-code-gen-state.h", @@ -3595,9 +3654,13 @@ v8_header_set("v8_internal_headers") { "src/maglev/maglev-regalloc-data.h", "src/maglev/maglev-regalloc.h", "src/maglev/maglev-register-frame-array.h", - "src/maglev/maglev-vreg-allocator.h", "src/maglev/maglev.h", ] + if (v8_current_cpu == "arm64") { + sources += [ "src/maglev/arm64/maglev-assembler-arm64-inl.h" ] + } else if (v8_current_cpu == "x64") { + sources += [ "src/maglev/x64/maglev-assembler-x64-inl.h" ] + } } if (v8_enable_webassembly) { @@ -3758,7 +3821,8 @@ v8_header_set("v8_internal_headers") { } if (v8_current_cpu == "x86") { - sources += [ ### gcmole(arch:ia32) ### + sources += [ + ### gcmole(ia32) ### "src/baseline/ia32/baseline-assembler-ia32-inl.h", "src/baseline/ia32/baseline-compiler-ia32-inl.h", "src/codegen/ia32/assembler-ia32-inl.h", @@ -3776,7 +3840,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/ia32/liftoff-assembler-ia32.h", ] } else if (v8_current_cpu == "x64") { - sources += [ ### gcmole(arch:x64) ### + sources += [ + ### gcmole(x64) ### "src/baseline/x64/baseline-assembler-x64-inl.h", "src/baseline/x64/baseline-compiler-x64-inl.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", @@ -3812,7 +3877,8 @@ v8_header_set("v8_internal_headers") { } } } else if (v8_current_cpu == "arm") { - sources += [ ### gcmole(arch:arm) ### + sources += [ + ### gcmole(arm) ### "src/baseline/arm/baseline-assembler-arm-inl.h", "src/baseline/arm/baseline-compiler-arm-inl.h", "src/codegen/arm/assembler-arm-inl.h", @@ -3830,7 +3896,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/arm/liftoff-assembler-arm.h", ] } else if (v8_current_cpu == "arm64") { - sources += [ ### gcmole(arch:arm64) ### + sources += [ + ### gcmole(arm64) ### "src/baseline/arm64/baseline-assembler-arm64-inl.h", "src/baseline/arm64/baseline-compiler-arm64-inl.h", "src/codegen/arm64/assembler-arm64-inl.h", @@ -3872,7 +3939,8 @@ v8_header_set("v8_internal_headers") { sources += [ "src/diagnostics/unwinding-info-win64.h" ] } } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { - sources += [ ### gcmole(arch:mips64el) ### + sources += [ + ### gcmole(mips64el) ### "src/baseline/mips64/baseline-assembler-mips64-inl.h", "src/baseline/mips64/baseline-compiler-mips64-inl.h", "src/codegen/mips64/assembler-mips64-inl.h", @@ -3888,7 +3956,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", ] } else if (v8_current_cpu == "loong64") { - sources += [ ### gcmole(arch:loong64) ### + sources += [ + ### gcmole(loong64) ### "src/baseline/loong64/baseline-assembler-loong64-inl.h", "src/baseline/loong64/baseline-compiler-loong64-inl.h", "src/codegen/loong64/assembler-loong64-inl.h", @@ -3904,7 +3973,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/loong64/liftoff-assembler-loong64.h", ] } else if (v8_current_cpu == "ppc") { - sources += [ ### gcmole(arch:ppc) ### + sources += [ + ### gcmole(ppc) ### "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.h", "src/codegen/ppc/constants-ppc.h", @@ -3920,7 +3990,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", ] } else if (v8_current_cpu == "ppc64") { - sources += [ ### gcmole(arch:ppc64) ### + sources += [ + ### gcmole(ppc64) ### "src/baseline/ppc/baseline-assembler-ppc-inl.h", "src/baseline/ppc/baseline-compiler-ppc-inl.h", "src/codegen/ppc/assembler-ppc-inl.h", @@ -3938,7 +4009,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", ] } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { - sources += [ ### gcmole(arch:s390) ### + sources += [ + ### gcmole(s390) ### "src/baseline/s390/baseline-assembler-s390-inl.h", "src/baseline/s390/baseline-compiler-s390-inl.h", "src/codegen/s390/assembler-s390-inl.h", @@ -3956,7 +4028,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/s390/liftoff-assembler-s390.h", ] } else if (v8_current_cpu == "riscv64") { - sources += [ ### gcmole(arch:riscv64) ### + sources += [ + ### gcmole(riscv64) ### "src/baseline/riscv/baseline-assembler-riscv-inl.h", "src/baseline/riscv/baseline-compiler-riscv-inl.h", "src/codegen/riscv/assembler-riscv-inl.h", @@ -3995,7 +4068,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h", ] } else if (v8_current_cpu == "riscv32") { - sources += [ ### gcmole(arch:riscv32) ### + sources += [ + ### gcmole(riscv32) ### "src/baseline/riscv/baseline-assembler-riscv-inl.h", "src/baseline/riscv/baseline-compiler-riscv-inl.h", "src/codegen/riscv/assembler-riscv.h", @@ -4247,15 +4321,19 @@ v8_source_set("v8_turboshaft") { visibility = [ ":*" ] # Only targets in this file can depend on this. sources = [ + "src/compiler/turboshaft/assembler.cc", "src/compiler/turboshaft/decompression-optimization.cc", "src/compiler/turboshaft/graph-builder.cc", "src/compiler/turboshaft/graph-visualizer.cc", "src/compiler/turboshaft/graph.cc", + "src/compiler/turboshaft/late-escape-analysis-reducer.cc", + "src/compiler/turboshaft/memory-optimization.cc", "src/compiler/turboshaft/operations.cc", "src/compiler/turboshaft/optimization-phase.cc", "src/compiler/turboshaft/recreate-schedule.cc", "src/compiler/turboshaft/representations.cc", "src/compiler/turboshaft/simplify-tf-loops.cc", + "src/compiler/turboshaft/types.cc", "src/compiler/turboshaft/utils.cc", ] @@ -4385,6 +4463,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/unoptimized-compilation-info.cc", "src/common/assert-scope.cc", "src/common/code-memory-access.cc", + "src/common/ptr-compr.cc", "src/compiler-dispatcher/lazy-compile-dispatcher.cc", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", "src/date/date.cc", @@ -4460,6 +4539,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/concurrent-marking.cc", "src/heap/cppgc-js/cpp-heap.cc", "src/heap/cppgc-js/cpp-snapshot.cc", + "src/heap/cppgc-js/cross-heap-remembered-set.cc", "src/heap/cppgc-js/unified-heap-marking-state.cc", "src/heap/cppgc-js/unified-heap-marking-verifier.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.cc", @@ -4471,7 +4551,6 @@ v8_source_set("v8_base_without_compiler") { "src/heap/free-list.cc", "src/heap/gc-idle-time-handler.cc", "src/heap/gc-tracer.cc", - "src/heap/global-handle-marking-visitor.cc", "src/heap/heap-allocator.cc", "src/heap/heap-controller.cc", "src/heap/heap-layout-tracer.cc", @@ -4509,6 +4588,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/stress-marking-observer.cc", "src/heap/stress-scavenge-observer.cc", "src/heap/sweeper.cc", + "src/heap/traced-handles-marking-visitor.cc", "src/heap/weak-object-worklists.cc", "src/ic/call-optimization.cc", "src/ic/handler-configuration.cc", @@ -4755,7 +4835,6 @@ v8_source_set("v8_base_without_compiler") { if (v8_enable_maglev) { sources += [ - "src/maglev/maglev-assembler.cc", "src/maglev/maglev-code-generator.cc", "src/maglev/maglev-compilation-info.cc", "src/maglev/maglev-compilation-unit.cc", @@ -4768,10 +4847,22 @@ v8_source_set("v8_base_without_compiler") { "src/maglev/maglev-regalloc.cc", "src/maglev/maglev.cc", ] + if (v8_current_cpu == "arm64") { + sources += [ + "src/maglev/arm64/maglev-assembler-arm64.cc", + "src/maglev/arm64/maglev-ir-arm64.cc", + ] + } else if (v8_current_cpu == "x64") { + sources += [ + "src/maglev/x64/maglev-assembler-x64.cc", + "src/maglev/x64/maglev-ir-x64.cc", + ] + } } if (v8_enable_webassembly) { - sources += [ ### gcmole(all) ### + sources += [ + ### gcmole(all) ### "src/asmjs/asm-js.cc", "src/asmjs/asm-parser.cc", "src/asmjs/asm-scanner.cc", @@ -4853,7 +4944,8 @@ v8_source_set("v8_base_without_compiler") { } if (v8_current_cpu == "x86") { - sources += [ ### gcmole(arch:ia32) ### + sources += [ + ### gcmole(ia32) ### "src/codegen/ia32/assembler-ia32.cc", "src/codegen/ia32/cpu-ia32.cc", "src/codegen/ia32/macro-assembler-ia32.cc", @@ -4868,7 +4960,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/ia32/regexp-macro-assembler-ia32.cc", ] } else if (v8_current_cpu == "x64") { - sources += [ ### gcmole(arch:x64) ### + sources += [ + ### gcmole(x64) ### "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc", "src/codegen/x64/assembler-x64.cc", "src/codegen/x64/cpu-x64.cc", @@ -4906,7 +4999,8 @@ v8_source_set("v8_base_without_compiler") { } } } else if (v8_current_cpu == "arm") { - sources += [ ### gcmole(arch:arm) ### + sources += [ + ### gcmole(arm) ### "src/codegen/arm/assembler-arm.cc", "src/codegen/arm/constants-arm.cc", "src/codegen/arm/cpu-arm.cc", @@ -4924,7 +5018,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/arm/regexp-macro-assembler-arm.cc", ] } else if (v8_current_cpu == "arm64") { - sources += [ ### gcmole(arch:arm64) ### + sources += [ + ### gcmole(arm64) ### "src/codegen/arm64/assembler-arm64.cc", "src/codegen/arm64/cpu-arm64.cc", "src/codegen/arm64/decoder-arm64.cc", @@ -4971,7 +5066,8 @@ v8_source_set("v8_base_without_compiler") { sources += [ "src/diagnostics/unwinding-info-win64.cc" ] } } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { - sources += [ ### gcmole(arch:mips64el) ### + sources += [ + ### gcmole(mips64el) ### "src/codegen/mips64/assembler-mips64.cc", "src/codegen/mips64/constants-mips64.cc", "src/codegen/mips64/cpu-mips64.cc", @@ -4988,7 +5084,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/mips64/regexp-macro-assembler-mips64.cc", ] } else if (v8_current_cpu == "loong64") { - sources += [ ### gcmole(arch:loong64) ### + sources += [ + ### gcmole(loong64) ### "src/codegen/loong64/assembler-loong64.cc", "src/codegen/loong64/constants-loong64.cc", "src/codegen/loong64/cpu-loong64.cc", @@ -5005,7 +5102,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/loong64/regexp-macro-assembler-loong64.cc", ] } else if (v8_current_cpu == "ppc") { - sources += [ ### gcmole(arch:ppc) ### + sources += [ + ### gcmole(ppc) ### "src/codegen/ppc/assembler-ppc.cc", "src/codegen/ppc/constants-ppc.cc", "src/codegen/ppc/cpu-ppc.cc", @@ -5023,7 +5121,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/ppc/regexp-macro-assembler-ppc.cc", ] } else if (v8_current_cpu == "ppc64") { - sources += [ ### gcmole(arch:ppc64) ### + sources += [ + ### gcmole(ppc64) ### "src/codegen/ppc/assembler-ppc.cc", "src/codegen/ppc/constants-ppc.cc", "src/codegen/ppc/cpu-ppc.cc", @@ -5041,7 +5140,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/ppc/regexp-macro-assembler-ppc.cc", ] } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { - sources += [ ### gcmole(arch:s390) ### + sources += [ + ### gcmole(s390) ### "src/codegen/s390/assembler-s390.cc", "src/codegen/s390/constants-s390.cc", "src/codegen/s390/cpu-s390.cc", @@ -5059,7 +5159,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/s390/regexp-macro-assembler-s390.cc", ] } else if (v8_current_cpu == "riscv64") { - sources += [ ### gcmole(arch:riscv64) ### + sources += [ + ### gcmole(riscv64) ### "src/codegen/riscv/assembler-riscv.cc", "src/codegen/riscv/base-assembler-riscv.cc", "src/codegen/riscv/base-constants-riscv.cc", @@ -5085,7 +5186,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/riscv/regexp-macro-assembler-riscv.cc", ] } else if (v8_current_cpu == "riscv32") { - sources += [ ### gcmole(arch:riscv32) ### + sources += [ + ### gcmole(riscv32) ### "src/codegen/riscv/assembler-riscv.cc", "src/codegen/riscv/base-assembler-riscv.cc", "src/codegen/riscv/base-constants-riscv.cc", @@ -5612,9 +5714,35 @@ v8_component("v8_libbase") { [ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ] } + if (v8_use_libm_trig_functions) { + deps += [ ":libm" ] + } + # TODO(infra): Add support for qnx, freebsd, openbsd, netbsd, and solaris. } +if (v8_use_libm_trig_functions) { + source_set("libm") { + sources = [ + "third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h", + "third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h", + ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs -= [ "//build/config/compiler:chromium_code" ] + if (!is_debug) { + # Build code using -O3, see: crbug.com/1084371. + configs += [ "//build/config/compiler:optimize_speed" ] + } + } +} + v8_component("v8_libplatform") { sources = [ "//base/trace_event/common/trace_event_common.h", @@ -6124,6 +6252,8 @@ if (current_toolchain == v8_snapshot_toolchain) { "src/snapshot/embedded/platform-embedded-file-writer-win.h", "src/snapshot/mksnapshot.cc", "src/snapshot/snapshot-empty.cc", + "src/snapshot/static-roots-gen.cc", + "src/snapshot/static-roots-gen.h", ] if (v8_control_flow_integrity) { diff --git a/deps/v8/DEPS b/deps/v8/DEPS index ab0a9bf5431112..aaf6abbde2e3ae 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -30,8 +30,15 @@ vars = { 'checkout_instrumented_libraries': False, 'checkout_ittapi': False, + + # Fetch the prebuilt binaries for llvm-cov and llvm-profdata. Needed to + # process the raw profiles produced by instrumented targets (built with + # the gn arg 'use_clang_coverage'). + 'checkout_clang_coverage_tools': False, + # Fetch clang-tidy into the same bin/ directory as our clang binary. 'checkout_clang_tidy': False, + 'chromium_url': 'https://chromium.googlesource.com', 'android_url': 'https://android.googlesource.com', 'download_gcmole': False, @@ -43,22 +50,22 @@ vars = { 'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/', # reclient CIPD package version - 'reclient_version': 're_client_version:0.83.0.da55f4f-gomaip', + 'reclient_version': 're_client_version:0.87.0.b6908b3-gomaip', # GN CIPD package version. - 'gn_version': 'git_revision:a4d67be044b42963de801001e7146f9657c7fad4', + 'gn_version': 'git_revision:70d6c60823c0233a0f35eccc25b2b640d2980bdc', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.8.2.chromium.3', # luci-go CIPD package version. - 'luci_go': 'git_revision:f8f64a8c560d2bf68a3ad1137979d17cffb36d30', + 'luci_go': 'git_revision:bac571b5399502fa16ac48a1d3820e1117505085', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:10.20221109.1.1', + 'fuchsia_version': 'version:11.20221209.0.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -91,16 +98,16 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C', + 'android_sdk_cmdline-tools_version': 'oWlET2yQhaPKQ66tYNuSPaueU78Z9VlxpyxOoUjwRuIC', } deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '875cb19167f2e0d7b1eca89a4d5b5693421424c6', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '3d4b0c1e773d659da18710fc4984b8195f6d5aea', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '49ac7cf34ab2e59a10629a7a722cfb94348c4996', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '202b660eb577da482fdec18173379df77147a394', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -124,11 +131,11 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4218f3525ad438b22b0e173d963515a09d143398', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '52399655fdafdd14ade17ab12ddc9e955423aa5a', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '1a32724f721e1c3b6c590a07fe4a954344f15e48', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '25a3d07096374aeeffa3dab8b582143dde5a9ca9', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a318d6a4c283a9d342d2a1e20292c1496fe12997', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '09a1f53060bc601b5ee821d7ab52071eed096fda', 'buildtools/win': { 'packages': [ { @@ -154,7 +161,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ade328d530525333751e8a3b58f02e18624da085', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e6c6460a5b94e32e01ce9a9d236f3148d4648ce5', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', @@ -202,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'f0b11967c94cba8f7cca91d2da20c98d4420fc25', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '2f1cf6121c17b31d4607afbaec37f33b0526cfc4', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -210,7 +217,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ae1a70891738fb14f64fbb884e00b87ac663aa15', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a964ca1296b9238d0797aa5f25597efa7b897515', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -227,9 +234,9 @@ deps = { 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'da07448619763d1cde255b361324242646f5b268', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1b7d391f0528fb3a4976b7541b387ee04f915f83', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '7bb87a375ffc3effd17a50f690099dcfb9ee280b', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. @@ -263,9 +270,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '8bbd6c3129b5146489f2321f054e855c347857f4', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '18d27fa10b237fdfcbd8f0c65c19fe009981a3bc', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'd3df9cc5362e0af4cda798b0612dde39783b3dc0', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3344dd8997f422862a1c5477b490b3611be31351', 'tools/luci-go': { 'packages': [ { @@ -499,7 +506,7 @@ hooks = [ '--arch=x64'], }, { - 'name': 'msan_chained_origins', + 'name': 'msan_chained_origins_xenial', 'pattern': '.', 'condition': 'checkout_instrumented_libraries', 'action': [ 'python3', @@ -507,11 +514,11 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1', ], }, { - 'name': 'msan_no_origins', + 'name': 'msan_no_origins_xenial', 'pattern': '.', 'condition': 'checkout_instrumented_libraries', 'action': [ 'python3', @@ -519,7 +526,7 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1', ], }, { @@ -557,6 +564,14 @@ hooks = [ 'condition': 'host_os != "aix"', 'action': ['python3', 'tools/clang/scripts/update.py'], }, + { + # This is supposed to support the same set of platforms as 'clang' above. + 'name': 'clang_coverage', + 'pattern': '.', + 'condition': 'checkout_clang_coverage_tools', + 'action': ['python3', 'tools/clang/scripts/update.py', + '--package=coverage_tools'], + }, { 'name': 'clang_tidy', 'pattern': '.', diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 03db4c126f346d..5c31d4dfabd776 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -85,6 +85,7 @@ def _V8PresubmitChecks(input_api, output_api): sys.path.append(input_api.os_path.join( input_api.PresubmitLocalPath(), 'tools')) from v8_presubmit import CppLintProcessor + from v8_presubmit import GCMoleProcessor from v8_presubmit import JSLintProcessor from v8_presubmit import TorqueLintProcessor from v8_presubmit import SourceProcessor @@ -126,6 +127,9 @@ def FilterJSFile(affected_file): if not StatusFilesProcessor().RunOnFiles( input_api.AffectedFiles(include_deletes=True)): results.append(output_api.PresubmitError("Status file check failed")) + if not GCMoleProcessor().RunOnFiles( + input_api.AffectedFiles(include_deletes=False)): + results.append(output_api.PresubmitError("GCMole pattern check failed")) results.extend(input_api.canned_checks.CheckAuthorizedAuthor( input_api, output_api, bot_allowlist=[ 'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com' @@ -257,8 +261,9 @@ def FilterFile(affected_file): files_to_check=(file_inclusion_pattern, ), files_to_skip=files_to_skip) - leading_src_pattern = input_api.re.compile(r'^src/') - dash_dot_slash_pattern = input_api.re.compile(r'[-./]') + leading_src_pattern = input_api.re.compile(r'^src[\\\/]') + dash_dot_slash_pattern = input_api.re.compile(r'[-.\\\/]') + def PathToGuardMacro(path): """Guards should be of the form V8_PATH_TO_FILE_WITHOUT_SRC_H_.""" x = input_api.re.sub(leading_src_pattern, 'v8_', path) diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel index 448260de88465d..7af7ccb6067883 100644 --- a/deps/v8/bazel/config/BUILD.bazel +++ b/deps/v8/bazel/config/BUILD.bazel @@ -22,6 +22,13 @@ config_setting( }, ) +config_setting( + name = "is_opt", + values = { + "compilation_mode": "opt", + }, +) + config_setting( name = "is_debug", values = { diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index 77e86ef6f2e646..8c3e3b9d1d9a37 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -152,6 +152,14 @@ def _default_args(): ], "//conditions:default": [], }), + cxxopts = select({ + "//third_party/v8/HEAD/google3/config:is_opt": [ + "-fvisibility=hidden", + "-fvisibility-inlines-hidden", + ], + "//conditions:default": [ + ], + }), includes = ["include"], linkopts = select({ "@v8//bazel/config:is_windows": [ diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 3f093597faccdc..f297eb379bc230 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -107,6 +107,9 @@ declare_args() { # Enable advanced BigInt algorithms, costing about 10-30 KB binary size # depending on platform. Disabled on Android to save binary size. v8_advanced_bigint_algorithms = !is_android + + # TODO: macros for determining endian type are clang specific. + v8_use_libm_trig_functions = is_clang } if (v8_use_external_startup_data == "") { diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index 12489327c54210..6039a9c520b6a3 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -282,12 +282,12 @@ class V8_PLATFORM_EXPORT TracingController const char* name, uint64_t handle) override; static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag); -#endif // !defined(V8_USE_PERFETTO) void AddTraceStateObserver( v8::TracingController::TraceStateObserver* observer) override; void RemoveTraceStateObserver( v8::TracingController::TraceStateObserver* observer) override; +#endif // !defined(V8_USE_PERFETTO) void StartTracing(TraceConfig* trace_config); void StopTracing(); @@ -307,7 +307,6 @@ class V8_PLATFORM_EXPORT TracingController std::unique_ptr mutex_; std::unique_ptr trace_config_; std::atomic_bool recording_{false}; - std::unordered_set observers_; #if defined(V8_USE_PERFETTO) std::ostream* output_stream_ = nullptr; @@ -316,6 +315,7 @@ class V8_PLATFORM_EXPORT TracingController TraceEventListener* listener_for_testing_ = nullptr; std::unique_ptr tracing_session_; #else // !defined(V8_USE_PERFETTO) + std::unordered_set observers_; std::unique_ptr trace_buffer_; #endif // !defined(V8_USE_PERFETTO) diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h index 841bd02a793333..804fc42c4b56dd 100644 --- a/deps/v8/include/v8-array-buffer.h +++ b/deps/v8/include/v8-array-buffer.h @@ -53,12 +53,28 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase { */ size_t ByteLength() const; + /** + * The maximum length (in bytes) that this backing store may grow to. + * + * If this backing store was created for a resizable ArrayBuffer or a growable + * SharedArrayBuffer, it is >= ByteLength(). Otherwise it is == + * ByteLength(). + */ + size_t MaxByteLength() const; + /** * Indicates whether the backing store was created for an ArrayBuffer or * a SharedArrayBuffer. */ bool IsShared() const; + /** + * Indicates whether the backing store was created for a resizable ArrayBuffer + * or a growable SharedArrayBuffer, and thus may be resized by user JavaScript + * code. + */ + bool IsResizableByUserJavaScript() const; + /** * Prevent implicit instantiation of operator delete with size_t argument. * The size_t argument would be incorrect because ptr points to the @@ -189,6 +205,11 @@ class V8_EXPORT ArrayBuffer : public Object { */ size_t ByteLength() const; + /** + * Maximum length in bytes. + */ + size_t MaxByteLength() const; + /** * Create a new ArrayBuffer. Allocate |byte_length| bytes. * Allocated memory will be owned by a created ArrayBuffer and @@ -235,6 +256,21 @@ class V8_EXPORT ArrayBuffer : public Object { void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter, void* deleter_data); + /** + * Returns a new resizable standalone BackingStore that is allocated using the + * array buffer allocator of the isolate. The result can be later passed to + * ArrayBuffer::New. + * + * |byte_length| must be <= |max_byte_length|. + * + * This function is usable without an isolate. Unlike |NewBackingStore| calls + * with an isolate, GCs cannot be triggered, and there are no + * retries. Allocation failure will cause the function to crash with an + * out-of-memory error. + */ + static std::unique_ptr NewResizableBackingStore( + size_t byte_length, size_t max_byte_length); + /** * Returns true if this ArrayBuffer may be detached. */ @@ -392,6 +428,11 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ size_t ByteLength() const; + /** + * Maximum length in bytes. + */ + size_t MaxByteLength() const; + /** * Create a new SharedArrayBuffer. Allocate |byte_length| bytes. * Allocated memory will be owned by a created SharedArrayBuffer and diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h index 3a6582cd258496..4a457027c9f76b 100644 --- a/deps/v8/include/v8-cppgc.h +++ b/deps/v8/include/v8-cppgc.h @@ -77,6 +77,12 @@ struct WrapperDescriptor final { }; struct V8_EXPORT CppHeapCreateParams { + CppHeapCreateParams( + std::vector> custom_spaces, + WrapperDescriptor wrapper_descriptor) + : custom_spaces(std::move(custom_spaces)), + wrapper_descriptor(wrapper_descriptor) {} + CppHeapCreateParams(const CppHeapCreateParams&) = delete; CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete; diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index 9ea43fe2535397..0fe7cd2489b05e 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -247,6 +247,7 @@ class CTypeInfo { kUint64, kFloat32, kFloat64, + kPointer, kV8Value, kSeqOneByteString, kApiObject, // This will be deprecated once all users have @@ -435,6 +436,7 @@ struct AnyCType { uint64_t uint64_value; float float_value; double double_value; + void* pointer_value; Local object_value; Local sequence_value; const FastApiTypedArray* uint8_ta_value; @@ -620,6 +622,7 @@ class CFunctionInfoImpl : public CFunctionInfo { kReturnType == CTypeInfo::Type::kUint32 || kReturnType == CTypeInfo::Type::kFloat32 || kReturnType == CTypeInfo::Type::kFloat64 || + kReturnType == CTypeInfo::Type::kPointer || kReturnType == CTypeInfo::Type::kAny, "64-bit int, string and api object values are not currently " "supported return types."); @@ -658,13 +661,14 @@ struct CTypeInfoTraits {}; #define PRIMITIVE_C_TYPES(V) \ V(bool, kBool) \ + V(uint8_t, kUint8) \ V(int32_t, kInt32) \ V(uint32_t, kUint32) \ V(int64_t, kInt64) \ V(uint64_t, kUint64) \ V(float, kFloat32) \ V(double, kFloat64) \ - V(uint8_t, kUint8) + V(void*, kPointer) // Same as above, but includes deprecated types for compatibility. #define ALL_C_TYPES(V) \ @@ -698,13 +702,13 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS) }; #define TYPED_ARRAY_C_TYPES(V) \ + V(uint8_t, kUint8) \ V(int32_t, kInt32) \ V(uint32_t, kUint32) \ V(int64_t, kInt64) \ V(uint64_t, kUint64) \ V(float, kFloat32) \ - V(double, kFloat64) \ - V(uint8_t, kUint8) + V(double, kFloat64) TYPED_ARRAY_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA) diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index aa5a044afb61c4..563ad196d6e607 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -32,19 +32,19 @@ namespace Debugger { namespace API { class SearchMatch; } -} +} // namespace Debugger namespace Runtime { namespace API { class RemoteObject; class StackTrace; class StackTraceId; -} -} +} // namespace API +} // namespace Runtime namespace Schema { namespace API { class Domain; } -} +} // namespace Schema } // namespace protocol class V8_EXPORT StringView { @@ -134,6 +134,13 @@ class V8_EXPORT V8DebuggerId { int64_t m_second = 0; }; +struct V8_EXPORT V8StackFrame { + StringView sourceURL; + StringView functionName; + int lineNumber; + int columnNumber; +}; + class V8_EXPORT V8StackTrace { public: virtual StringView firstNonEmptySourceURL() const = 0; @@ -151,6 +158,8 @@ class V8_EXPORT V8StackTrace { // Safe to pass between threads, drops async chain. virtual std::unique_ptr clone() = 0; + + virtual std::vector frames() const = 0; }; class V8_EXPORT V8InspectorSession { @@ -203,6 +212,9 @@ class V8_EXPORT V8InspectorSession { std::unique_ptr* objectGroup) = 0; virtual void releaseObjectGroup(StringView) = 0; virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0; + + // Prepare for shutdown (disables debugger pausing, etc.). + virtual void stop() = 0; }; class V8_EXPORT WebDriverValue { @@ -365,9 +377,12 @@ class V8_EXPORT V8Inspector { virtual void flushProtocolNotifications() = 0; }; enum ClientTrustLevel { kUntrusted, kFullyTrusted }; + enum SessionPauseState { kWaitingForDebugger, kNotWaitingForDebugger }; + // TODO(chromium:1352175): remove default value once downstream change lands. virtual std::unique_ptr connect( int contextGroupId, Channel*, StringView state, - ClientTrustLevel client_trust_level) { + ClientTrustLevel client_trust_level, + SessionPauseState = kNotWaitingForDebugger) { return nullptr; } diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index a4b8ee0824d81d..c56130300f4d17 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -345,6 +345,14 @@ using ExternalPointer_t = Address; // that the Embedder is not using this byte (really only this one bit) for any // other purpose. This bit also does not collide with the memory tagging // extension (MTE) which would use bits [56, 60). +// +// External pointer tables are also available even when the sandbox is off but +// pointer compression is on. In that case, the mechanism can be used to easy +// alignment requirements as it turns unaligned 64-bit raw pointers into +// aligned 32-bit indices. To "opt-in" to the external pointer table mechanism +// for this purpose, instead of using the ExternalPointer accessors one needs to +// use ExternalPointerHandles directly and use them to access the pointers in an +// ExternalPointerTable. constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62; constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000; constexpr uint64_t kExternalPointerTagShift = 48; @@ -367,71 +375,58 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001, 0b11100010, 0b11100100, 0b11101000, 0b11110000}; +#define TAG(i) \ + ((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \ + kExternalPointerMarkBit) + // clang-format off -// New entries should be added with state "sandboxed". + // When adding new tags, please ensure that the code using these tags is // "substitution-safe", i.e. still operate safely if external pointers of the // same type are swapped by an attacker. See comment above for more details. -#define TAG(i) (kAllExternalPointerTypeTags[i]) // Shared external pointers are owned by the shared Isolate and stored in the // shared external pointer table associated with that Isolate, where they can // be accessed from multiple threads at the same time. The objects referenced // in this way must therefore always be thread-safe. -#define SHARED_EXTERNAL_POINTER_TAGS(V) \ - V(kFirstSharedTag, sandboxed, TAG(0)) \ - V(kWaiterQueueNodeTag, sandboxed, TAG(0)) \ - V(kExternalStringResourceTag, sandboxed, TAG(1)) \ - V(kExternalStringResourceDataTag, sandboxed, TAG(2)) \ - V(kLastSharedTag, sandboxed, TAG(2)) +#define SHARED_EXTERNAL_POINTER_TAGS(V) \ + V(kFirstSharedTag, TAG(0)) \ + V(kWaiterQueueNodeTag, TAG(0)) \ + V(kExternalStringResourceTag, TAG(1)) \ + V(kExternalStringResourceDataTag, TAG(2)) \ + V(kLastSharedTag, TAG(2)) // External pointers using these tags are kept in a per-Isolate external // pointer table and can only be accessed when this Isolate is active. -#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \ - V(kForeignForeignAddressTag, sandboxed, TAG(10)) \ - V(kNativeContextMicrotaskQueueTag, sandboxed, TAG(11)) \ - V(kEmbedderDataSlotPayloadTag, sandboxed, TAG(12)) \ - V(kExternalObjectValueTag, sandboxed, TAG(13)) \ - V(kCallHandlerInfoCallbackTag, sandboxed, TAG(14)) \ - V(kAccessorInfoGetterTag, sandboxed, TAG(15)) \ - V(kAccessorInfoSetterTag, sandboxed, TAG(16)) \ - V(kWasmInternalFunctionCallTargetTag, sandboxed, TAG(17)) \ - V(kWasmTypeInfoNativeTypeTag, sandboxed, TAG(18)) \ - V(kWasmExportedFunctionDataSignatureTag, sandboxed, TAG(19)) \ - V(kWasmContinuationJmpbufTag, sandboxed, TAG(20)) \ - V(kArrayBufferExtensionTag, sandboxed, TAG(21)) +#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \ + V(kForeignForeignAddressTag, TAG(10)) \ + V(kNativeContextMicrotaskQueueTag, TAG(11)) \ + V(kEmbedderDataSlotPayloadTag, TAG(12)) \ +/* This tag essentially stands for a `void*` pointer in the V8 API, and */ \ +/* it is the Embedder's responsibility to ensure type safety (against */ \ +/* substitution) and lifetime validity of these objects. */ \ + V(kExternalObjectValueTag, TAG(13)) \ + V(kCallHandlerInfoCallbackTag, TAG(14)) \ + V(kAccessorInfoGetterTag, TAG(15)) \ + V(kAccessorInfoSetterTag, TAG(16)) \ + V(kWasmInternalFunctionCallTargetTag, TAG(17)) \ + V(kWasmTypeInfoNativeTypeTag, TAG(18)) \ + V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \ + V(kWasmContinuationJmpbufTag, TAG(20)) \ + V(kArrayBufferExtensionTag, TAG(21)) // All external pointer tags. #define ALL_EXTERNAL_POINTER_TAGS(V) \ SHARED_EXTERNAL_POINTER_TAGS(V) \ PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) -// When the sandbox is enabled, external pointers marked as "sandboxed" above -// use the external pointer table (i.e. are sandboxed). This allows a gradual -// rollout of external pointer sandboxing. If the sandbox is off, no external -// pointers are sandboxed. -// -// Sandboxed external pointer tags are available when compressing pointers even -// when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used -// manually with the external pointer table even when the sandbox is off to ease -// alignment requirements. -#define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit -#define unsandboxed(X) kUnsandboxedExternalPointerTag -#if defined(V8_COMPRESS_POINTERS) -#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits), -#else -#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits), -#endif - +#define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag, #define MAKE_TAG(HasMarkBit, TypeTag) \ ((static_cast(TypeTag) << kExternalPointerTagShift) | \ (HasMarkBit ? kExternalPointerMarkBit : 0)) enum ExternalPointerTag : uint64_t { // Empty tag value. Mostly used as placeholder. kExternalPointerNullTag = MAKE_TAG(0, 0b00000000), - // Tag to use for unsandboxed external pointers, which are still stored as - // raw pointers on the heap. - kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000), // External pointer tag that will match any external pointer. Use with care! kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111), // The free entry tag has all type bits set so every type check with a @@ -445,20 +440,11 @@ enum ExternalPointerTag : uint64_t { }; #undef MAKE_TAG -#undef unsandboxed -#undef sandboxed #undef TAG #undef EXTERNAL_POINTER_TAG_ENUM // clang-format on -// True if the external pointer is sandboxed and so must be referenced through -// an external pointer table. -V8_INLINE static constexpr bool IsSandboxedExternalPointerType( - ExternalPointerTag tag) { - return tag != kUnsandboxedExternalPointerTag; -} - // True if the external pointer must be accessed from the shared isolate's // external pointer table. V8_INLINE static constexpr bool IsSharedExternalPointerType( @@ -467,12 +453,10 @@ V8_INLINE static constexpr bool IsSharedExternalPointerType( } // Sanity checks. -#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ - static_assert(!IsSandboxedExternalPointerType(Tag) || \ - IsSharedExternalPointerType(Tag)); +#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ + static_assert(IsSharedExternalPointerType(Tag)); #define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ - static_assert(!IsSandboxedExternalPointerType(Tag) || \ - !IsSharedExternalPointerType(Tag)); + static_assert(!IsSharedExternalPointerType(Tag)); SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS) PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS) @@ -545,7 +529,7 @@ class Internals { static const int kVariousBooleanFlagsOffset = kIsolateStackGuardOffset + kStackGuardSize; static const int kBuiltinTier0EntryTableOffset = - kVariousBooleanFlagsOffset + kApiSystemPointerSize; + kVariousBooleanFlagsOffset + 8; static const int kBuiltinTier0TableOffset = kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize; static const int kIsolateEmbedderDataOffset = @@ -793,24 +777,23 @@ class Internals { V8_INLINE static internal::Address ReadExternalPointerField( v8::Isolate* isolate, internal::Address heap_object_ptr, int offset) { #ifdef V8_ENABLE_SANDBOX - if (IsSandboxedExternalPointerType(tag)) { - // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so - // it can be inlined and doesn't require an additional call. - internal::Address* table = - IsSharedExternalPointerType(tag) - ? GetSharedExternalPointerTableBase(isolate) - : GetExternalPointerTableBase(isolate); - internal::ExternalPointerHandle handle = - ReadRawField(heap_object_ptr, offset); - uint32_t index = handle >> kExternalPointerIndexShift; - std::atomic* ptr = - reinterpret_cast*>(&table[index]); - internal::Address entry = - std::atomic_load_explicit(ptr, std::memory_order_relaxed); - return entry & ~tag; - } -#endif + static_assert(tag != kExternalPointerNullTag); + // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so + // it can be inlined and doesn't require an additional call. + internal::Address* table = IsSharedExternalPointerType(tag) + ? GetSharedExternalPointerTableBase(isolate) + : GetExternalPointerTableBase(isolate); + internal::ExternalPointerHandle handle = + ReadRawField(heap_object_ptr, offset); + uint32_t index = handle >> kExternalPointerIndexShift; + std::atomic* ptr = + reinterpret_cast*>(&table[index]); + internal::Address entry = + std::atomic_load_explicit(ptr, std::memory_order_relaxed); + return entry & ~tag; +#else return ReadRawField
(heap_object_ptr, offset); +#endif // V8_ENABLE_SANDBOX } #ifdef V8_COMPRESS_POINTERS diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index e9f531973b2eef..9659300751ee11 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -233,7 +233,7 @@ class V8_EXPORT Isolate { * Explicitly specify a startup snapshot blob. The embedder owns the blob. * The embedder *must* ensure that the snapshot is from a trusted source. */ - StartupData* snapshot_blob = nullptr; + const StartupData* snapshot_blob = nullptr; /** * Enables the host application to provide a mechanism for recording diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h index 887012ac8c3a0b..fc21239f0eca52 100644 --- a/deps/v8/include/v8-metrics.h +++ b/deps/v8/include/v8-metrics.h @@ -12,6 +12,7 @@ #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -96,16 +97,88 @@ struct GarbageCollectionYoungCycle { }; struct WasmModuleDecoded { + WasmModuleDecoded() = default; + WasmModuleDecoded(bool async, bool streamed, bool success, + size_t module_size_in_bytes, size_t function_count, + int64_t wall_clock_duration_in_us) + : async(async), + streamed(streamed), + success(success), + module_size_in_bytes(module_size_in_bytes), + function_count(function_count), + wall_clock_duration_in_us(wall_clock_duration_in_us) {} + + V8_DEPRECATE_SOON("Use the version without cpu_duration_in_us") + WasmModuleDecoded(bool async, bool streamed, bool success, + size_t module_size_in_bytes, size_t function_count, + int64_t wall_clock_duration_in_us, + int64_t cpu_duration_in_us) + : async(async), + streamed(streamed), + success(success), + module_size_in_bytes(module_size_in_bytes), + function_count(function_count), + wall_clock_duration_in_us(wall_clock_duration_in_us), + cpu_duration_in_us(cpu_duration_in_us) {} + + START_ALLOW_USE_DEPRECATED() + // Copy constructor and copy assignment operator are allowed to copy the + // {cpu_duration_in_us} field. + WasmModuleDecoded(const WasmModuleDecoded&) = default; + WasmModuleDecoded& operator=(const WasmModuleDecoded&) = default; + END_ALLOW_USE_DEPRECATED() + bool async = false; bool streamed = false; bool success = false; size_t module_size_in_bytes = 0; size_t function_count = 0; int64_t wall_clock_duration_in_us = -1; + V8_DEPRECATE_SOON("We do not collect cpu times any more") int64_t cpu_duration_in_us = -1; }; struct WasmModuleCompiled { + WasmModuleCompiled() = default; + + WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized, + bool lazy, bool success, size_t code_size_in_bytes, + size_t liftoff_bailout_count, + int64_t wall_clock_duration_in_us) + : async(async), + streamed(streamed), + cached(cached), + deserialized(deserialized), + lazy(lazy), + success(success), + code_size_in_bytes(code_size_in_bytes), + liftoff_bailout_count(liftoff_bailout_count), + wall_clock_duration_in_us(wall_clock_duration_in_us) {} + + V8_DEPRECATE_SOON("Use the version without cpu_duration_in_us") + WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized, + bool lazy, bool success, size_t code_size_in_bytes, + size_t liftoff_bailout_count, + int64_t wall_clock_duration_in_us, + int64_t cpu_duration_in_us) + : async(async), + streamed(streamed), + cached(cached), + deserialized(deserialized), + lazy(lazy), + success(success), + code_size_in_bytes(code_size_in_bytes), + liftoff_bailout_count(liftoff_bailout_count), + wall_clock_duration_in_us(wall_clock_duration_in_us), + cpu_duration_in_us(cpu_duration_in_us) {} + + START_ALLOW_USE_DEPRECATED() + // Copy constructor and copy assignment operator are allowed to copy the + // {cpu_duration_in_us} field. + WasmModuleCompiled(const WasmModuleCompiled&) = default; + WasmModuleCompiled& operator=(const WasmModuleCompiled&) = default; + END_ALLOW_USE_DEPRECATED() + bool async = false; bool streamed = false; bool cached = false; @@ -115,6 +188,7 @@ struct WasmModuleCompiled { size_t code_size_in_bytes = 0; size_t liftoff_bailout_count = 0; int64_t wall_clock_duration_in_us = -1; + V8_DEPRECATE_SOON("We do not collect cpu times any more") int64_t cpu_duration_in_us = -1; }; diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 32a82f881e7f93..32898e7ef92079 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -285,6 +285,8 @@ class ConvertableToTraceFormat { * V8 Tracing controller. * * Can be implemented by an embedder to record trace events from V8. + * + * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true). */ class TracingController { public: @@ -348,10 +350,16 @@ class TracingController { virtual void OnTraceDisabled() = 0; }; - /** Adds tracing state change observer. */ + /** + * Adds tracing state change observer. + * Does nothing in Perfetto SDK build (v8_use_perfetto = true). + */ virtual void AddTraceStateObserver(TraceStateObserver*) {} - /** Removes tracing state change observer. */ + /** + * Removes tracing state change observer. + * Does nothing in Perfetto SDK build (v8_use_perfetto = true). + */ virtual void RemoveTraceStateObserver(TraceStateObserver*) {} }; diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h index 2400357cf6e069..b15a2b19220f10 100644 --- a/deps/v8/include/v8-snapshot.h +++ b/deps/v8/include/v8-snapshot.h @@ -91,7 +91,7 @@ class V8_EXPORT SnapshotCreator { */ SnapshotCreator(Isolate* isolate, const intptr_t* external_references = nullptr, - StartupData* existing_blob = nullptr); + const StartupData* existing_blob = nullptr); /** * Create and enter an isolate, and set it up for serialization. @@ -102,7 +102,7 @@ class V8_EXPORT SnapshotCreator { * that must be equivalent to CreateParams::external_references. */ SnapshotCreator(const intptr_t* external_references = nullptr, - StartupData* existing_blob = nullptr); + const StartupData* existing_blob = nullptr); /** * Destroy the snapshot creator, and exit and dispose of the Isolate diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h index 669012a9814465..11296cd48896dd 100644 --- a/deps/v8/include/v8-template.h +++ b/deps/v8/include/v8-template.h @@ -30,7 +30,9 @@ class Signature; F(AsyncIteratorPrototype, initial_async_iterator_prototype) \ F(ErrorPrototype, initial_error_prototype) \ F(IteratorPrototype, initial_iterator_prototype) \ - F(ObjProto_valueOf, object_value_of_function) + F(MapIteratorPrototype, initial_map_iterator_prototype) \ + F(ObjProto_valueOf, object_value_of_function) \ + F(SetIteratorPrototype, initial_set_iterator_prototype) enum Intrinsic { #define V8_DECL_INTRINSIC(name, iname) k##name, diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index a9d6f92aff4599..cc8af8edb3d539 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -8,10 +8,10 @@ // These macros define the version number for the current version. // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. -#define V8_MAJOR_VERSION 10 -#define V8_MINOR_VERSION 9 -#define V8_BUILD_NUMBER 194 -#define V8_PATCH_LEVEL 9 +#define V8_MAJOR_VERSION 11 +#define V8_MINOR_VERSION 0 +#define V8_BUILD_NUMBER 226 +#define V8_PATCH_LEVEL 12 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 2ac27b36b47ee3..c79cd04e0f16bc 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -308,6 +308,9 @@ path. Add it with -I to the command line // V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported // V8_HAS_BUILTIN_FRAME_ADDRESS - __builtin_frame_address() supported // V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported +// V8_HAS_BUILTIN_ADD_OVERFLOW - __builtin_add_overflow() supported +// V8_HAS_BUILTIN_SUB_OVERFLOW - __builtin_sub_overflow() supported +// V8_HAS_BUILTIN_MUL_OVERFLOW - __builtin_mul_overflow() supported // V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported // V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported // V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported @@ -339,6 +342,7 @@ path. Add it with -I to the command line # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) # define V8_HAS_ATTRIBUTE_CONSTINIT \ (__has_attribute(require_constant_initialization)) +# define V8_HAS_ATTRIBUTE_CONST (__has_attribute(const)) # define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) # define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused)) @@ -360,6 +364,9 @@ path. Add it with -I to the command line # define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect)) # define V8_HAS_BUILTIN_FRAME_ADDRESS (__has_builtin(__builtin_frame_address)) # define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) +# define V8_HAS_BUILTIN_ADD_OVERFLOW (__has_builtin(__builtin_add_overflow)) +# define V8_HAS_BUILTIN_SUB_OVERFLOW (__has_builtin(__builtin_sub_overflow)) +# define V8_HAS_BUILTIN_MUL_OVERFLOW (__has_builtin(__builtin_mul_overflow)) # define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow)) # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow)) # define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow)) @@ -455,6 +462,16 @@ path. Add it with -I to the command line #endif +// A macro to mark functions whose values don't change (e.g. across calls) +// and thereby compiler is free to hoist and fold multiple calls together. +// Use like: +// V8_CONST int foo() { ... } +#if V8_HAS_ATTRIBUTE_CONST +# define V8_CONST __attribute__((const)) +#else +# define V8_CONST +#endif + // A macro to mark a declaration as requiring constant initialization. // Use like: // int* foo V8_CONSTINIT; diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index b5d6231600488a..c494280b72909b 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -66,7 +66,6 @@ 'V8 Linux64 - debug builder': 'debug_x64', 'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space', 'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom', - 'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox', 'V8 Linux64 - internal snapshot - builder': 'release_x64_internal', 'V8 Linux64 - debug - header includes - builder': 'debug_x64_header_includes', 'V8 Linux64 - no sandbox - debug builder': 'debug_x64_no_sandbox', @@ -108,9 +107,9 @@ 'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan', # FYI. 'V8 iOS - sim - builder': 'release_x64_ios_simulator', - 'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim', 'V8 Linux64 - arm64 - sim - no pointer compression - builder': 'release_simulate_arm64_no_pointer_compression', + 'V8 Linux64 - coverage': 'release_x64_coverage', 'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', 'V8 Linux64 - disable runtime call stats - builder': 'release_x64_disable_runtime_call_stats', @@ -168,7 +167,7 @@ 'V8 Clusterfuzz Linux64 UBSan - release builder': 'release_x64_ubsan_recover', 'V8 Clusterfuzz Linux64 ASAN sandbox testing - release builder': - 'release_x64_asan_sandbox_testing', + 'release_x64_asan_symbolized_expose_memory_corruption', }, 'client.v8.perf' : { 'V8 Arm - builder - perf': 'official_arm', @@ -225,6 +224,7 @@ 'release_simulate_arm64_no_pointer_compression', 'v8_linux64_cppgc_non_default_compile_dbg': 'debug_x64_non_default_cppgc', 'v8_linux64_compile_dbg': 'debug_x64_trybot', + 'v8_linux64_coverage': 'release_x64_coverage', 'v8_linux64_no_sandbox_compile_dbg': 'debug_x64_no_sandbox', 'v8_linux64_dict_tracking_compile_dbg': 'debug_x64_dict_tracking_trybot', 'v8_linux64_disable_runtime_call_stats_compile_rel': 'release_x64_disable_runtime_call_stats', @@ -237,9 +237,7 @@ 'v8_linux64_gcc_compile_rel': 'release_x64_gcc', 'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage', 'v8_linux64_header_includes_dbg': 'debug_x64_header_includes', - 'v8_linux64_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox', 'v8_linux64_minor_mc_compile_dbg': 'debug_x64_trybot', - 'v8_linux_arm64_sim_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox_arm64_sim', 'v8_linux64_fyi_compile_rel': 'release_x64_test_features_trybot', 'v8_linux64_nodcheck_compile_rel': 'release_x64', 'v8_linux64_perfetto_compile_dbg': 'debug_x64_perfetto', @@ -284,7 +282,7 @@ 'v8_mac64_compile_rel': 'release_x64_trybot', 'v8_mac64_dbg': 'debug_x64', 'v8_mac64_compile_dbg': 'debug_x64', - 'v8_mac64_compile_full_compile_dbg': 'full_debug_x64', + 'v8_mac64_noopt_compile_dbg': 'full_debug_x64', 'v8_mac64_asan_compile_rel': 'release_x64_asan_no_lsan', 'v8_linux_arm_compile_rel': 'release_simulate_arm_trybot', 'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite', @@ -412,7 +410,7 @@ 'release_simulate_arm64_cfi': [ 'release_bot', 'simulate_arm64', 'v8_control_flow_integrity'], 'release_simulate_arm64_no_pointer_compression': [ - 'release_bot', 'simulate_arm64_no_sandbox', 'dcheck_always_on', + 'release_bot', 'simulate_arm64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_disable_pointer_compression'], 'release_simulate_arm64_msan': [ 'release_bot', 'simulate_arm64', 'msan'], @@ -445,7 +443,7 @@ 'debug_arm64': [ 'debug_bot', 'arm64'], 'debug_arm64_no_pointer_compression': [ - 'debug_bot', 'arm64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks', + 'debug_bot', 'arm64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks', 'v8_disable_pointer_compression'], 'full_debug_arm64': [ 'debug_bot', 'arm64', 'v8_full_debug'], @@ -492,6 +490,9 @@ 'release_x64_asan_no_lsan_verify_heap_dchecks': [ 'release_bot', 'x64', 'asan', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap'], + 'release_x64_asan_symbolized_expose_memory_corruption': [ + 'release_bot', 'x64', 'asan', 'symbolized', + 'v8_expose_memory_corruption_api'], 'release_x64_asan_symbolized_verify_heap': [ 'release_bot', 'x64', 'asan', 'lsan', 'symbolized', 'v8_verify_heap'], @@ -499,6 +500,8 @@ 'release_bot', 'x64', 'cfi'], 'release_x64_cfi_clusterfuzz': [ 'release_bot', 'x64', 'cfi_clusterfuzz'], + 'release_x64_coverage': [ + 'release_bot', 'x64', 'clang_coverage'], 'release_x64_fuzzilli': [ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap', 'v8_verify_csa', 'fuzzilli'], @@ -526,12 +529,12 @@ 'release_x64_minimal_symbols_reclient': [ 'release_bot_reclient', 'x64', 'minimal_symbols'], 'release_x64_no_pointer_compression': [ - 'release_bot', 'x64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks', - 'v8_disable_pointer_compression'], + 'release_bot', 'x64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', + 'v8_enable_javascript_promise_hooks', 'v8_disable_pointer_compression'], 'release_x64_reclient': [ 'release_bot_reclient', 'x64'], 'release_x64_no_sandbox': [ - 'release_bot', 'x64_no_sandbox'], + 'release_bot', 'x64', 'no_sandbox'], 'release_x64_trybot': [ 'release_trybot', 'x64'], 'release_x64_test_features_trybot': [ @@ -555,9 +558,6 @@ 'v8_enable_slow_dchecks', 'v8_verify_csa'], 'release_x64_webassembly_disabled': [ 'release_bot', 'x64', 'webassembly_disabled'], - 'release_x64_asan_sandbox_testing': [ - 'release_bot', 'x64', 'asan', 'symbolized', 'v8_enable_sandbox_future', - 'v8_expose_memory_corruption_api'], # Official configs for x64. 'official_x64': [ @@ -583,10 +583,6 @@ 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx'], 'debug_x64_header_includes': [ 'debug_bot', 'x64', 'v8_check_header_includes'], - 'debug_x64_heap_sandbox': [ - 'debug_bot', 'x64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'], - 'debug_x64_heap_sandbox_arm64_sim': [ - 'debug_bot', 'simulate_arm64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'], 'debug_x64_minimal_symbols': [ 'debug_bot', 'x64', 'minimal_symbols'], 'debug_x64_non_default_cppgc': [ @@ -594,7 +590,7 @@ 'debug_x64_perfetto': [ 'debug_bot', 'x64', 'perfetto'], 'debug_x64_no_sandbox': [ - 'debug_bot', 'x64_no_sandbox'], + 'debug_bot', 'x64', 'no_sandbox'], 'debug_x64_single_generation': [ 'debug_bot', 'x64', 'v8_enable_single_generation'], 'debug_x64_trybot': [ @@ -671,11 +667,7 @@ }, 'arm64': { - 'gn_args': 'target_cpu="arm64" v8_enable_sandbox=true', - }, - - 'arm64_no_sandbox': { - 'gn_args': 'target_cpu="arm64" v8_enable_sandbox=false', + 'gn_args': 'target_cpu="arm64"', }, 'asan': { @@ -699,6 +691,10 @@ 'gn_args': 'is_clang=true', }, + 'clang_coverage': { + 'gn_args': 'use_clang_coverage=true', + }, + 'conservative_stack_scanning': { 'gn_args': 'v8_enable_conservative_stack_scanning=true ' 'v8_enable_inner_pointer_resolution_mb=true', @@ -793,12 +789,12 @@ 'msan': { 'mixins': ['v8_enable_test_features'], - 'gn_args': 'is_msan=true msan_track_origins=2', + 'gn_args': 'is_msan=true msan_track_origins=2 instrumented_libraries_release="xenial"', }, 'msan_no_origins': { 'mixins': ['v8_enable_test_features'], - 'gn_args': 'is_msan=true msan_track_origins=0', + 'gn_args': 'is_msan=true msan_track_origins=0 instrumented_libraries_release="xenial"', }, 'msvc': { @@ -813,6 +809,10 @@ 'gn_args': 'use_goma=false', }, + 'no_sandbox': { + 'gn_args': 'v8_enable_sandbox=false', + }, + 'no_sysroot': { 'gn_args': 'use_sysroot=false', }, @@ -862,11 +862,7 @@ }, 'simulate_arm64': { - 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=true', - }, - - 'simulate_arm64_no_sandbox': { - 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=false', + 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"', }, 'simulate_loong64': { @@ -938,10 +934,6 @@ 'gn_args': 'v8_enable_runtime_call_stats=false', }, - 'v8_enable_sandbox_future': { - 'gn_args': 'v8_enable_sandbox_future=true', - }, - 'v8_expose_memory_corruption_api': { 'gn_args': 'v8_expose_memory_corruption_api=true', }, @@ -1031,11 +1023,7 @@ }, 'x64': { - 'gn_args': 'target_cpu="x64" v8_enable_sandbox=true', - }, - - 'x64_no_sandbox': { - 'gn_args': 'target_cpu="x64" v8_enable_sandbox=false', + 'gn_args': 'target_cpu="x64"', }, 'x86': { diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 19d5e1845249ec..df8a90a1bf5960 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -89,7 +89,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -149,7 +150,7 @@ }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'default', 'shards': 2}, ], }, @@ -179,7 +180,27 @@ ], 'shards': 4, }, - {'name': 'gcmole'}, + {'name': 'gcmole_v2', 'variant': 'ia32'}, + {'name': 'gcmole_v2', 'variant': 'x64'}, + {'name': 'gcmole_v2', 'variant': 'arm'}, + {'name': 'gcmole_v2', 'variant': 'arm64'}, + # TODO(https://crbug.com/v8/12660): Remove v2 above after testing. + {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4}, + { + 'name': 'gcmole_v2', + 'variant': 'x64', + 'suffix': 'test single host', + 'test_args': ['--test-run'], + }, + { + 'name': 'gcmole_v3', + 'variant': 'x64', + 'suffix': 'test multi host', + 'test_args': ['--test-run'], + }, ], }, 'v8_linux_optional_rel': { @@ -210,6 +231,7 @@ '--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx', ], + 'shards': 2, }, { 'name': 'v8testing', @@ -237,6 +259,7 @@ '--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx', ], + 'shards': 2, }, { 'name': 'v8testing', @@ -258,6 +281,7 @@ 'suffix': 'nosse4', 'variant': 'default', 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'], + 'shards': 2, }, { 'name': 'v8testing', @@ -275,7 +299,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2, }, { 'name': 'v8testing', @@ -348,10 +373,22 @@ {'name': 'benchmarks'}, {'name': 'mozilla'}, {'name': 'optimize_for_size'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 3}, ], }, + 'v8_linux64_coverage': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + # TODO(https://crbug.com/1265931): Speed things up for now. Later replace + # mjsunit and unittests with full v8testing. + # {'name': 'v8testing'}, + {'name': 'mjsunit', 'variant': 'default'}, + {'name': 'unittests', 'variant': 'default'}, + ], + }, 'v8_linux64_cppgc_non_default_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', @@ -361,6 +398,14 @@ {'name': 'v8testing', 'shards': 3}, ], }, + 'v8_linux64_css_dbg': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 5}, + ], + }, 'v8_linux64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', @@ -489,14 +534,6 @@ {'name': 'v8testing'}, ], }, - 'v8_linux64_heap_sandbox_dbg': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 4}, - ], - }, 'v8_linux64_minor_mc_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', @@ -699,14 +736,6 @@ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12}, ], }, - 'v8_linux_arm64_sim_heap_sandbox_dbg': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 14}, - ], - }, 'v8_linux_arm64_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -809,7 +838,7 @@ # Win64 'v8_win64_asan_rel': { 'swarming_dimensions' : { - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -818,7 +847,7 @@ 'v8_win64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, @@ -830,7 +859,7 @@ 'v8_win64_msvc_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, @@ -841,7 +870,7 @@ 'v8_win64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, @@ -855,7 +884,7 @@ 'v8_mac64_asan_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -864,7 +893,7 @@ 'v8_mac64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'mozilla'}, @@ -876,16 +905,25 @@ 'v8_mac64_gc_stress_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6}, ], }, + 'v8_mac64_noopt_dbg': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-12', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 6}, + ], + }, 'v8_mac64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'mozilla'}, @@ -937,7 +975,7 @@ 'v8_mac_arm64_sim_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -947,7 +985,7 @@ 'v8_mac_arm64_sim_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -957,7 +995,7 @@ 'v8_mac_arm64_sim_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -1042,7 +1080,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1050,7 +1089,27 @@ 'test_args': ['--extra-flags', '--noenable-avx'], 'shards': 2 }, - {'name': 'gcmole'}, + {'name': 'gcmole_v2', 'variant': 'ia32'}, + {'name': 'gcmole_v2', 'variant': 'x64'}, + {'name': 'gcmole_v2', 'variant': 'arm'}, + {'name': 'gcmole_v2', 'variant': 'arm64'}, + # TODO(https://crbug.com/v8/12660): Remove v2 above after testing. + {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4}, + {'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4}, + { + 'name': 'gcmole_v2', + 'variant': 'x64', + 'suffix': 'test single host', + 'test_args': ['--test-run'], + }, + { + 'name': 'gcmole_v3', + 'variant': 'x64', + 'suffix': 'test multi host', + 'test_args': ['--test-run'], + }, ], }, 'V8 Linux - arm64 - sim - CFI': { @@ -1086,9 +1145,9 @@ {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262', 'shards': 6}, + {'name': 'test262', 'shards': 12}, {'name': 'test262', 'variant': 'code_serializer', 'shards': 2}, - {'name': 'test262', 'variant': 'extra', 'shards': 5}, + {'name': 'test262', 'variant': 'extra', 'shards': 10}, {'name': 'v8testing', 'shards': 3}, { 'name': 'v8testing', @@ -1096,7 +1155,7 @@ 'test_args': ['--isolates'], 'shards': 4 }, - {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 4}, # Nosse3. { 'name': 'mozilla', @@ -1107,7 +1166,8 @@ 'name': 'test262', 'suffix': 'nosse3', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1125,7 +1185,8 @@ 'name': 'test262', 'suffix': 'nossse3', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1143,7 +1204,8 @@ 'name': 'test262', 'suffix': 'nosse4', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1161,7 +1223,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1203,7 +1266,7 @@ }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'default'}, ], }, @@ -1224,7 +1287,7 @@ }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing'}, ], }, @@ -1253,7 +1316,7 @@ {'name': 'optimize_for_size'}, {'name': 'perf_integration'}, {'name': 'test262', 'shards': 2}, - {'name': 'test262', 'variant': 'assert_types'}, + {'name': 'test262', 'variant': 'assert_types', 'shards': 2}, {'name': 'test262', 'variant': 'extra', 'shards': 2}, {'name': 'v8initializers'}, {'name': 'v8testing'}, @@ -1274,7 +1337,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1291,10 +1355,22 @@ {'name': 'benchmarks'}, {'name': 'mozilla'}, {'name': 'optimize_for_size'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, ], }, + 'V8 Linux64 - coverage': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + # TODO(https://crbug.com/1265931): Speed things up for now. Later replace + # mjsunit and unittests with full v8testing. + # {'name': 'v8testing'}, + {'name': 'mjsunit', 'variant': 'default'}, + {'name': 'unittests', 'variant': 'default'}, + ], + }, 'V8 Linux64 - custom snapshot - debug': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -1318,13 +1394,13 @@ {'name': 'test262', 'shards': 7}, {'name': 'test262', 'variant': 'extra', 'shards': 5}, {'name': 'v8testing', 'shards': 2}, - {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 4}, {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, - {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'}, + {'name': 'v8testing', 'variant': 'stress_concurrent_inlining', 'shards': 2}, # Maglev -- move to extra once more architectures are supported. {'name': 'mjsunit', 'variant': 'maglev'}, # Noavx. @@ -1337,7 +1413,8 @@ 'name': 'test262', 'suffix': 'noavx', 'variant': 'default', - 'test_args': ['--extra-flags', '--noenable-avx'] + 'test_args': ['--extra-flags', '--noenable-avx'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1497,14 +1574,6 @@ {'name': 'v8testing'}, ], }, - 'V8 Linux64 - heap sandbox - debug': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 2}, - ], - }, 'V8 Linux64 - internal snapshot': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -1545,7 +1614,7 @@ }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing'}, ], }, @@ -1568,6 +1637,14 @@ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, ], }, + 'V8 Linux64 css - debug': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 5}, + ], + }, 'V8 Linux64 GC Stress - custom snapshot': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -1667,7 +1744,7 @@ 'V8 Mac64': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'mozilla'}, @@ -1679,7 +1756,7 @@ 'V8 Mac64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'mozilla'}, @@ -1691,7 +1768,7 @@ 'V8 Mac64 ASAN': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'v8testing', 'shards': 10}, @@ -1700,7 +1777,7 @@ 'V8 Mac64 GC Stress': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6}, @@ -1751,7 +1828,7 @@ 'V8 Mac - arm64 - sim - debug': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1766,7 +1843,7 @@ 'V8 Mac - arm64 - sim - release': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-10.15', + 'os': 'Mac-12', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1785,7 +1862,7 @@ }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing'}, ], }, @@ -1796,24 +1873,24 @@ }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 5}, ], }, 'V8 Win64': { 'swarming_dimensions': { - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, 'V8 Win64 - debug': { 'swarming_dimensions': { - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, @@ -1824,17 +1901,17 @@ }, 'V8 Win64 - msvc': { 'swarming_dimensions': { - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, ], }, 'V8 Win64 ASAN': { 'swarming_dimensions': { - 'os': 'Windows-10-19042', + 'os': 'Windows-10-19045', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -1951,7 +2028,8 @@ 'name': 'test262', 'suffix': 'armv8-a', 'variant': 'default', - 'test_args': ['--extra-flags', '--enable-armv8'] + 'test_args': ['--extra-flags', '--enable-armv8'], + 'shards': 2 }, { 'name': 'v8testing', @@ -1970,6 +2048,7 @@ 'suffix': 'novfp3', 'variant': 'default', 'test_args': ['--novfp3'], + 'shards': 2 }, { 'name': 'v8testing', @@ -2055,7 +2134,7 @@ 'tests': [ {'name': 'mjsunit_sp_frame_access'}, {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], @@ -2093,19 +2172,6 @@ }, ], }, - 'V8 Linux64 - arm64 - sim - heap sandbox - debug': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', - }, - 'swarming_task_attrs': { - 'expiration': 14400, - 'hard_timeout': 7200, - 'priority': 35, - }, - 'tests': [ - {'name': 'v8testing', 'shards': 14}, - ], - }, 'V8 Linux - loong64 - sim': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', @@ -2129,7 +2195,7 @@ 'priority': 35, }, 'tests': [ - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 4}, ], }, diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index da75db119baf10..b425d61ce31d54 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -358,19 +358,12 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { namespace { #ifdef V8_ENABLE_SANDBOX -// ArrayBufferAllocator to use when sandboxed pointers are used in which case -// all ArrayBuffer backing stores need to be allocated inside the sandbox. -// Note, the current implementation is extremely inefficient as it uses the -// BoundedPageAllocator. In the future, we'll need a proper allocator -// implementation. +// ArrayBufferAllocator to use when the sandbox is enabled in which case all +// ArrayBuffer backing stores need to be allocated inside the sandbox. class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { public: - ArrayBufferAllocator() { CHECK(page_allocator_); } - void* Allocate(size_t length) override { - return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_), - page_size_, - PageAllocator::kReadWrite); + return allocator_->Allocate(length); } void* AllocateUninitialized(size_t length) override { @@ -378,12 +371,136 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { } void Free(void* data, size_t length) override { - page_allocator_->FreePages(data, RoundUp(length, page_size_)); + return allocator_->Free(data); } private: - PageAllocator* page_allocator_ = internal::GetArrayBufferPageAllocator(); - const size_t page_size_ = page_allocator_->AllocatePageSize(); + // Backend allocator shared by all ArrayBufferAllocator instances. This way, + // there is a single region of virtual addres space reserved inside the + // sandbox from which all ArrayBufferAllocators allocate their memory, + // instead of each allocator creating their own region, which may cause + // address space exhaustion inside the sandbox. + // TODO(chromium:1340224): replace this with a more efficient allocator. + class BackendAllocator { + public: + BackendAllocator() { + CHECK(i::GetProcessWideSandbox()->is_initialized()); + VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space(); + constexpr size_t max_backing_memory_size = 8ULL * i::GB; + constexpr size_t min_backing_memory_size = 1ULL * i::GB; + size_t backing_memory_size = max_backing_memory_size; + i::Address backing_memory_base = 0; + while (!backing_memory_base && + backing_memory_size >= min_backing_memory_size) { + backing_memory_base = vas->AllocatePages( + VirtualAddressSpace::kNoHint, backing_memory_size, kChunkSize, + PagePermissions::kNoAccess); + if (!backing_memory_base) { + backing_memory_size /= 2; + } + } + if (!backing_memory_base) { + i::V8::FatalProcessOutOfMemory( + nullptr, + "Could not reserve backing memory for ArrayBufferAllocators"); + } + DCHECK(IsAligned(backing_memory_base, kChunkSize)); + + region_alloc_ = std::make_unique( + backing_memory_base, backing_memory_size, kAllocationGranularity); + end_of_accessible_region_ = region_alloc_->begin(); + + // Install a on-merge callback to discard or decommit unused pages. + region_alloc_->set_on_merge_callback([this](i::Address start, + size_t size) { + mutex_.AssertHeld(); + VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space(); + i::Address end = start + size; + if (end == region_alloc_->end() && + start <= end_of_accessible_region_ - kChunkSize) { + // Can shrink the accessible region. + i::Address new_end_of_accessible_region = RoundUp(start, kChunkSize); + size_t size = + end_of_accessible_region_ - new_end_of_accessible_region; + CHECK(vas->DecommitPages(new_end_of_accessible_region, size)); + end_of_accessible_region_ = new_end_of_accessible_region; + } else if (size >= 2 * kChunkSize) { + // Can discard pages. The pages stay accessible, so the size of the + // accessible region doesn't change. + i::Address chunk_start = RoundUp(start, kChunkSize); + i::Address chunk_end = RoundDown(start + size, kChunkSize); + CHECK(vas->DiscardSystemPages(chunk_start, chunk_end - chunk_start)); + } + }); + } + + ~BackendAllocator() { + // The sandbox may already have been torn down, in which case there's no + // need to free any memory. + if (i::GetProcessWideSandbox()->is_initialized()) { + VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space(); + vas->FreePages(region_alloc_->begin(), region_alloc_->size()); + } + } + + BackendAllocator(const BackendAllocator&) = delete; + BackendAllocator& operator=(const BackendAllocator&) = delete; + + void* Allocate(size_t length) { + base::MutexGuard guard(&mutex_); + + length = RoundUp(length, kAllocationGranularity); + i::Address region = region_alloc_->AllocateRegion(length); + if (region == base::RegionAllocator::kAllocationFailure) return nullptr; + + // Check if the memory is inside the accessible region. If not, grow it. + i::Address end = region + length; + size_t length_to_memset = length; + if (end > end_of_accessible_region_) { + VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space(); + i::Address new_end_of_accessible_region = RoundUp(end, kChunkSize); + size_t size = new_end_of_accessible_region - end_of_accessible_region_; + if (!vas->SetPagePermissions(end_of_accessible_region_, size, + PagePermissions::kReadWrite)) { + CHECK(region_alloc_->FreeRegion(region)); + return nullptr; + } + + // The pages that were inaccessible are guaranteed to be zeroed, so only + // memset until the previous end of the accessible region. + length_to_memset = end_of_accessible_region_ - region; + end_of_accessible_region_ = new_end_of_accessible_region; + } + + void* mem = reinterpret_cast(region); + memset(mem, 0, length_to_memset); + return mem; + } + + void Free(void* data) { + base::MutexGuard guard(&mutex_); + region_alloc_->FreeRegion(reinterpret_cast(data)); + } + + static BackendAllocator* SharedInstance() { + static base::LeakyObject instance; + return instance.get(); + } + + private: + // Use a region allocator with a "page size" of 128 bytes as a reasonable + // compromise between the number of regions it has to manage and the amount + // of memory wasted due to rounding allocation sizes up to the page size. + static constexpr size_t kAllocationGranularity = 128; + // The backing memory's accessible region is grown in chunks of this size. + static constexpr size_t kChunkSize = 1 * i::MB; + + std::unique_ptr region_alloc_; + size_t end_of_accessible_region_; + base::Mutex mutex_; + }; + + BackendAllocator* allocator_ = BackendAllocator::SharedInstance(); }; #else @@ -429,7 +546,7 @@ struct SnapshotCreatorData { SnapshotCreator::SnapshotCreator(Isolate* v8_isolate, const intptr_t* external_references, - StartupData* existing_snapshot) { + const StartupData* existing_snapshot) { SnapshotCreatorData* data = new SnapshotCreatorData(v8_isolate); i::Isolate* i_isolate = reinterpret_cast(v8_isolate); i_isolate->set_array_buffer_allocator(&data->allocator_); @@ -451,7 +568,7 @@ SnapshotCreator::SnapshotCreator(Isolate* v8_isolate, } SnapshotCreator::SnapshotCreator(const intptr_t* external_references, - StartupData* existing_snapshot) + const StartupData* existing_snapshot) : SnapshotCreator(Isolate::Allocate(), external_references, existing_snapshot) {} @@ -4074,10 +4191,18 @@ size_t v8::BackingStore::ByteLength() const { return reinterpret_cast(this)->byte_length(); } +size_t v8::BackingStore::MaxByteLength() const { + return reinterpret_cast(this)->max_byte_length(); +} + bool v8::BackingStore::IsShared() const { return reinterpret_cast(this)->is_shared(); } +bool v8::BackingStore::IsResizableByUserJavaScript() const { + return reinterpret_cast(this)->is_resizable_by_js(); +} + // static std::unique_ptr v8::BackingStore::Reallocate( v8::Isolate* v8_isolate, std::unique_ptr backing_store, @@ -6144,7 +6269,8 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { .store_aligned_pointer(obj->GetIsolate(), value), location, "Unaligned pointer"); DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); - internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj)); + internal::WriteBarrier::CombinedBarrierFromInternalFields( + i::JSObject::cast(*obj), value); } void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], @@ -6167,7 +6293,8 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], location, "Unaligned pointer"); DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); } - internal::WriteBarrier::MarkingFromInternalFields(js_obj); + internal::WriteBarrier::CombinedBarrierFromInternalFields(js_obj, argc, + values); } // --- E n v i r o n m e n t --- @@ -7122,7 +7249,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { // TODO(v8:12007): Consider adding // MakeExternal(Isolate*, ExternalStringResource*). i::Isolate* i_isolate; - if (obj.IsShared()) { + if (obj.InSharedWritableHeap()) { i_isolate = i::Isolate::Current(); } else { // It is safe to call GetIsolateFromWritableHeapObject because @@ -7155,7 +7282,7 @@ bool v8::String::MakeExternal( // TODO(v8:12007): Consider adding // MakeExternal(Isolate*, ExternalOneByteStringResource*). i::Isolate* i_isolate; - if (obj.IsShared()) { + if (obj.InSharedWritableHeap()) { i_isolate = i::Isolate::Current(); } else { // It is safe to call GetIsolateFromWritableHeapObject because @@ -8141,7 +8268,12 @@ void v8::ArrayBuffer::SetDetachKey(v8::Local key) { size_t v8::ArrayBuffer::ByteLength() const { i::Handle obj = Utils::OpenHandle(this); - return obj->byte_length(); + return obj->GetByteLength(); +} + +size_t v8::ArrayBuffer::MaxByteLength() const { + i::Handle obj = Utils::OpenHandle(this); + return obj->max_byte_length(); } Local v8::ArrayBuffer::New(Isolate* v8_isolate, @@ -8218,6 +8350,41 @@ std::unique_ptr v8::ArrayBuffer::NewBackingStore( static_cast(backing_store.release())); } +// static +std::unique_ptr v8::ArrayBuffer::NewResizableBackingStore( + size_t byte_length, size_t max_byte_length) { + Utils::ApiCheck(i::v8_flags.harmony_rab_gsab, + "v8::ArrayBuffer::NewResizableBackingStore", + "Constructing resizable ArrayBuffers is not supported"); + Utils::ApiCheck(byte_length <= max_byte_length, + "v8::ArrayBuffer::NewResizableBackingStore", + "Cannot construct resizable ArrayBuffer, byte_length must be " + "<= max_byte_length"); + Utils::ApiCheck( + byte_length <= i::JSArrayBuffer::kMaxByteLength, + "v8::ArrayBuffer::NewResizableBackingStore", + "Cannot construct resizable ArrayBuffer, requested length is too big"); + + size_t page_size, initial_pages, max_pages; + if (i::JSArrayBuffer::GetResizableBackingStorePageConfiguration( + nullptr, byte_length, max_byte_length, i::kDontThrow, &page_size, + &initial_pages, &max_pages) + .IsNothing()) { + i::V8::FatalProcessOutOfMemory(nullptr, + "v8::ArrayBuffer::NewResizableBackingStore"); + } + std::unique_ptr backing_store = + i::BackingStore::TryAllocateAndPartiallyCommitMemory( + nullptr, byte_length, max_byte_length, page_size, initial_pages, + max_pages, i::WasmMemoryFlag::kNotWasm, i::SharedFlag::kNotShared); + if (!backing_store) { + i::V8::FatalProcessOutOfMemory(nullptr, + "v8::ArrayBuffer::NewResizableBackingStore"); + } + return std::unique_ptr( + static_cast(backing_store.release())); +} + Local v8::ArrayBufferView::Buffer() { i::Handle obj = Utils::OpenHandle(this); i::Handle buffer; @@ -8267,13 +8434,21 @@ size_t v8::ArrayBufferView::ByteOffset() { } size_t v8::ArrayBufferView::ByteLength() { - i::Handle obj = Utils::OpenHandle(this); - return obj->WasDetached() ? 0 : obj->byte_length(); + i::DisallowGarbageCollection no_gc; + i::JSArrayBufferView obj = *Utils::OpenHandle(this); + if (obj.WasDetached()) { + return 0; + } + if (obj.IsJSTypedArray()) { + return i::JSTypedArray::cast(obj).GetByteLength(); + } + return i::JSDataView::cast(obj).GetByteLength(); } size_t v8::TypedArray::Length() { - i::Handle obj = Utils::OpenHandle(this); - return obj->WasDetached() ? 0 : obj->length(); + i::DisallowGarbageCollection no_gc; + i::JSTypedArray obj = *Utils::OpenHandle(this); + return obj.WasDetached() ? 0 : obj.GetLength(); } static_assert( @@ -8322,6 +8497,7 @@ static_assert( TYPED_ARRAYS(TYPED_ARRAY_NEW) #undef TYPED_ARRAY_NEW +// TODO(v8:11111): Support creating length tracking DataViews via the API. Local DataView::New(Local array_buffer, size_t byte_offset, size_t byte_length) { i::Handle buffer = Utils::OpenHandle(*array_buffer); @@ -8347,7 +8523,12 @@ Local DataView::New(Local shared_array_buffer, size_t v8::SharedArrayBuffer::ByteLength() const { i::Handle obj = Utils::OpenHandle(this); - return obj->byte_length(); + return obj->GetByteLength(); +} + +size_t v8::SharedArrayBuffer::MaxByteLength() const { + i::Handle obj = Utils::OpenHandle(this); + return obj->max_byte_length(); } Local v8::SharedArrayBuffer::New(Isolate* v8_isolate, diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index a66e4ea93f91a6..404822d1ab0f69 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -1263,8 +1263,9 @@ Declaration* DeclarationScope::CheckConflictingVarDeclarations( if (decl->IsVariableDeclaration() && decl->AsVariableDeclaration()->AsNested() != nullptr) { Scope* current = decl->AsVariableDeclaration()->AsNested()->scope(); - DCHECK(decl->var()->mode() == VariableMode::kVar || - decl->var()->mode() == VariableMode::kDynamic); + if (decl->var()->mode() != VariableMode::kVar && + decl->var()->mode() != VariableMode::kDynamic) + continue; // Iterate through all scopes until the declaration scope. do { // There is a conflict if there exists a non-VAR binding. diff --git a/deps/v8/src/base/DEPS b/deps/v8/src/base/DEPS index a9c31c20d6dc3e..3cead70516d71c 100644 --- a/deps/v8/src/base/DEPS +++ b/deps/v8/src/base/DEPS @@ -5,3 +5,9 @@ include_rules = [ "-src", "+src/base", ] + +specific_include_rules = { + "ieee754.h": [ + "+third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h" + ], +} diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index ccfc23a065d1a5..06db44e3b2e0df 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -62,7 +62,7 @@ class BitField final { } // Returns a type U with the bit field value updated. - static constexpr U update(U previous, T value) { + V8_NODISCARD static constexpr U update(U previous, T value) { return (previous & ~kMask) | encode(value); } diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index 0cb22a9a904aea..2d618782745cf9 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -310,9 +310,13 @@ inline bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) { // |rhs| and stores the result into the variable pointed to by |val| and // returns true if the signed summation resulted in an overflow. inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { +#if V8_HAS_BUILTIN_ADD_OVERFLOW + return __builtin_add_overflow(lhs, rhs, val); +#else uint64_t res = static_cast(lhs) + static_cast(rhs); *val = base::bit_cast(res); return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0; +#endif } @@ -320,9 +324,34 @@ inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { // |rhs| and stores the result into the variable pointed to by |val| and // returns true if the signed subtraction resulted in an overflow. inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { +#if V8_HAS_BUILTIN_SUB_OVERFLOW + return __builtin_sub_overflow(lhs, rhs, val); +#else uint64_t res = static_cast(lhs) - static_cast(rhs); *val = base::bit_cast(res); return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0; +#endif +} + +// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs| +// and |rhs| and stores the result into the variable pointed to by |val| and +// returns true if the signed multiplication resulted in an overflow. +inline bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { +#if V8_HAS_BUILTIN_MUL_OVERFLOW + return __builtin_mul_overflow(lhs, rhs, val); +#else + int64_t res = base::bit_cast(static_cast(lhs) * + static_cast(rhs)); + *val = res; + + // Check for INT64_MIN / -1 as it's undefined behaviour and could cause + // hardware exceptions. + if ((res == INT64_MIN && lhs == -1)) { + return true; + } + + return lhs != 0 && (res / lhs) != rhs; +#endif } // SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and diff --git a/deps/v8/src/base/container-utils.h b/deps/v8/src/base/container-utils.h index 66a94bbd433966..bf6826d5a86b3c 100644 --- a/deps/v8/src/base/container-utils.h +++ b/deps/v8/src/base/container-utils.h @@ -6,6 +6,7 @@ #define V8_BASE_CONTAINER_UTILS_H_ #include +#include #include #include @@ -14,16 +15,16 @@ namespace v8::base { // Returns true iff the {element} is found in the {container}. template bool contains(const C& container, const T& element) { - const auto e = end(container); - return std::find(begin(container), e, element) != e; + const auto e = std::end(container); + return std::find(std::begin(container), e, element) != e; } // Returns the first index of {element} in {container}. Returns std::nullopt if // {container} does not contain {element}. template std::optional index_of(const C& container, const T& element) { - const auto b = begin(container); - const auto e = end(container); + const auto b = std::begin(container); + const auto e = std::end(container); if (auto it = std::find(b, e, element); it != e) { return {std::distance(b, it)}; } @@ -34,8 +35,8 @@ std::optional index_of(const C& container, const T& element) { // {predicate}. Returns std::nullopt if no element satisfies {predicate}. template std::optional index_of_if(const C& container, const P& predicate) { - const auto b = begin(container); - const auto e = end(container); + const auto b = std::begin(container); + const auto e = std::end(container); if (auto it = std::find_if(b, e, predicate); it != e) { return {std::distance(b, it)}; } @@ -48,9 +49,9 @@ std::optional index_of_if(const C& container, const P& predicate) { template inline size_t erase_at(C& container, size_t index, size_t count = 1) { // TODO(C++20): Replace with std::erase. - if (size(container) <= index) return 0; - auto start = begin(container) + index; - count = std::min(count, std::distance(start, end(container))); + if (std::size(container) <= index) return 0; + auto start = std::begin(container) + index; + count = std::min(count, std::distance(start, std::end(container))); container.erase(start, start + count); return count; } @@ -60,43 +61,48 @@ inline size_t erase_at(C& container, size_t index, size_t count = 1) { // TODO(C++20): Replace with std::erase_if. template inline size_t erase_if(C& container, const P& predicate) { - size_t count = 0; - auto e = end(container); - for (auto it = begin(container); it != e;) { - it = std::find_if(it, e, predicate); - if (it == e) break; - it = container.erase(it); - e = end(container); - ++count; - } + auto it = + std::remove_if(std::begin(container), std::end(container), predicate); + auto count = std::distance(it, std::end(container)); + container.erase(it, std::end(container)); return count; } // Helper for std::count_if. template inline size_t count_if(const C& container, const P& predicate) { - return std::count_if(begin(container), end(container), predicate); + return std::count_if(std::begin(container), std::end(container), predicate); } // Helper for std::all_of. template inline bool all_of(const C& container, const P& predicate) { - return std::all_of(begin(container), end(container), predicate); + return std::all_of(std::begin(container), std::end(container), predicate); } // Helper for std::none_of. template inline bool none_of(const C& container, const P& predicate) { - return std::none_of(begin(container), end(container), predicate); + return std::none_of(std::begin(container), std::end(container), predicate); +} + +// Helper for std::sort. +template +inline void sort(C& container) { + std::sort(std::begin(container), std::end(container)); +} +template +inline void sort(C& container, Comp comp) { + std::sort(std::begin(container), std::end(container), comp); } // Returns true iff all elements of {container} compare equal using operator==. template inline bool all_equal(const C& container) { - if (size(container) <= 1) return true; - auto b = begin(container); + if (std::size(container) <= 1) return true; + auto b = std::begin(container); const auto& value = *b; - return std::all_of(++b, end(container), + return std::all_of(++b, std::end(container), [&](const auto& v) { return v == value; }); } @@ -104,15 +110,15 @@ inline bool all_equal(const C& container) { // operator==. template inline bool all_equal(const C& container, const T& value) { - return std::all_of(begin(container), end(container), + return std::all_of(std::begin(container), std::end(container), [&](const auto& v) { return v == value; }); } -// Appends to vector {v} all the elements in the range {begin(container)} and -// {end(container)}. -template -inline void vector_append(std::vector& v, const C& container) { - v.insert(end(v), begin(container), end(container)); +// Appends to vector {v} all the elements in the range {std::begin(container)} +// and {std::end(container)}. +template +inline void vector_append(V& v, const C& container) { + v.insert(std::end(v), std::begin(container), std::end(container)); } } // namespace v8::base diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc index 73672001cf1996..f03044611a3b0d 100644 --- a/deps/v8/src/base/ieee754.cc +++ b/deps/v8/src/base/ieee754.cc @@ -105,10 +105,12 @@ namespace { } while (false) int32_t __ieee754_rem_pio2(double x, double* y) V8_WARN_UNUSED_RESULT; -double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT; int __kernel_rem_pio2(double* x, double* y, int e0, int nx, int prec, const int32_t* ipio2) V8_WARN_UNUSED_RESULT; +#if !defined(V8_USE_LIBM_TRIG_FUNCTIONS) +double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT; double __kernel_sin(double x, double y, int iy) V8_WARN_UNUSED_RESULT; +#endif /* __ieee754_rem_pio2(x,y) * @@ -269,6 +271,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) { return n; } +#if !defined(V8_USE_LIBM_TRIG_FUNCTIONS) /* __kernel_cos( x, y ) * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164 * Input x is assumed to be bounded by ~pi/4 in magnitude. @@ -334,6 +337,7 @@ V8_INLINE double __kernel_cos(double x, double y) { return a - (iz - (z * r - x * y)); } } +#endif /* __kernel_rem_pio2(x,y,e0,nx,prec,ipio2) * double x[],y[]; int e0,nx,prec; int ipio2[]; @@ -643,6 +647,7 @@ int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, return n & 7; } +#if !defined(V8_USE_LIBM_TRIG_FUNCTIONS) /* __kernel_sin( x, y, iy) * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854 * Input x is assumed to be bounded by ~pi/4 in magnitude. @@ -696,6 +701,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) { return x - ((z * (half * y - v * r) - y) - v * S1); } } +#endif /* __kernel_tan( x, y, k ) * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854 @@ -1318,6 +1324,7 @@ double atan2(double y, double x) { } } +#if !defined(V8_USE_LIBM_TRIG_FUNCTIONS) /* cos(x) * Return cosine function of x. * @@ -1377,6 +1384,7 @@ double cos(double x) { } } } +#endif /* exp(x) * Returns the exponential of x. @@ -2410,6 +2418,7 @@ double cbrt(double x) { return (t); } +#if !defined(V8_USE_LIBM_TRIG_FUNCTIONS) /* sin(x) * Return sine function of x. * @@ -2469,6 +2478,7 @@ double sin(double x) { } } } +#endif /* tan(x) * Return tangent function of x. @@ -3015,6 +3025,15 @@ double tanh(double x) { #undef SET_HIGH_WORD #undef SET_LOW_WORD +#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) && defined(BUILDING_V8_BASE_SHARED) +double sin(double x) { + return glibc_sin(x); +} +double cos(double x) { + return glibc_cos(x); +} +#endif + } // namespace ieee754 } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h index f2b3a3eb5808c5..53417179e6af1f 100644 --- a/deps/v8/src/base/ieee754.h +++ b/deps/v8/src/base/ieee754.h @@ -7,6 +7,10 @@ #include "src/base/base-export.h" +#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) +#include "third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h" // nogncheck +#endif + namespace v8 { namespace base { namespace ieee754 { @@ -34,7 +38,15 @@ V8_BASE_EXPORT double atan(double x); V8_BASE_EXPORT double atan2(double y, double x); // Returns the cosine of |x|, where |x| is given in radians. +#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) && \ + !defined(BUILDING_V8_BASE_SHARED) && \ + !defined(USING_V8_BASE_SHARED) +inline double cos(double x) { + return glibc_cos(x); +} +#else V8_BASE_EXPORT double cos(double x); +#endif // Returns the base-e exponential of |x|. V8_BASE_EXPORT double exp(double x); @@ -68,8 +80,16 @@ V8_BASE_EXPORT double expm1(double x); // behaviour is preserved for compatibility reasons. V8_BASE_EXPORT double pow(double x, double y); +#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) && \ + !defined(BUILDING_V8_BASE_SHARED) && \ + !defined(USING_V8_BASE_SHARED) +inline double sin(double x) { + return glibc_sin(x); +} +#else // Returns the sine of |x|, where |x| is given in radians. V8_BASE_EXPORT double sin(double x); +#endif // Returns the tangent of |x|, where |x| is given in radians. V8_BASE_EXPORT double tan(double x); diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h index a8ae4305999f56..e333aefd0cadaf 100644 --- a/deps/v8/src/base/logging.h +++ b/deps/v8/src/base/logging.h @@ -46,8 +46,14 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line, #endif // !defined(OFFICIAL_BUILD) #endif // DEBUG -#define UNIMPLEMENTED() FATAL("unimplemented code") -#define UNREACHABLE() FATAL("unreachable code") +namespace v8::base { +// These string constants are pattern-matched by fuzzers. +constexpr const char* kUnimplementedCodeMessage = "unimplemented code"; +constexpr const char* kUnreachableCodeMessage = "unreachable code"; +} // namespace v8::base + +#define UNIMPLEMENTED() FATAL(::v8::base::kUnimplementedCodeMessage) +#define UNREACHABLE() FATAL(::v8::base::kUnreachableCodeMessage) // g++ versions <= 8 cannot use UNREACHABLE() in a constexpr function. // TODO(miladfarca): Remove once all compilers handle this properly. #if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ <= 8) diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index cd5b91f745a453..c267b8f1e9f305 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -389,9 +389,9 @@ bool is_inbounds(float_t v) { // Setup for Windows shared library export. #ifdef BUILDING_V8_SHARED -#define V8_EXPORT_PRIVATE __declspec(dllexport) +#define V8_EXPORT_PRIVATE #elif USING_V8_SHARED -#define V8_EXPORT_PRIVATE __declspec(dllimport) +#define V8_EXPORT_PRIVATE #else #define V8_EXPORT_PRIVATE #endif // BUILDING_V8_SHARED @@ -401,7 +401,7 @@ bool is_inbounds(float_t v) { // Setup for Linux shared library export. #if V8_HAS_ATTRIBUTE_VISIBILITY #ifdef BUILDING_V8_SHARED -#define V8_EXPORT_PRIVATE __attribute__((visibility("default"))) +#define V8_EXPORT_PRIVATE #else #define V8_EXPORT_PRIVATE #endif diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 2659336b3b954f..885bffa340462b 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -293,9 +293,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { } void OS::SetDataReadOnly(void* address, size_t size) { - // TODO(v8:13194): Figure out which API to use on fuchsia. {vmar.protect} - // fails. - // CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead)); + CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead)); } // static diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index ac44c70e2c55b2..0eedaba0e512ee 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -1022,7 +1022,7 @@ void OS::SetDataReadOnly(void* address, size_t size) { unsigned long old_protection; CHECK(VirtualProtect(address, size, PAGE_READONLY, &old_protection)); - CHECK_EQ(PAGE_READWRITE, old_protection); + CHECK(old_protection == PAGE_READWRITE || old_protection == PAGE_WRITECOPY); } // static diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index eb65f8c930b71a..1614afbce373f5 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -110,6 +110,15 @@ class SmallVector { bool empty() const { return end_ == begin_; } size_t capacity() const { return end_of_storage_ - begin_; } + T& front() { + DCHECK_NE(0, size()); + return begin_[0]; + } + const T& front() const { + DCHECK_NE(0, size()); + return begin_[0]; + } + T& back() { DCHECK_NE(0, size()); return end_[-1]; @@ -146,6 +155,30 @@ class SmallVector { end_ -= count; } + T* insert(T* pos, const T& value) { return insert(pos, 1, value); } + T* insert(T* pos, size_t count, const T& value) { + DCHECK_LE(pos, end_); + size_t offset = pos - begin_; + size_t elements_to_move = end_ - pos; + resize_no_init(size() + count); + pos = begin_ + offset; + std::memmove(pos + count, pos, elements_to_move); + std::fill_n(pos, count, value); + return pos; + } + template + T* insert(T* pos, It begin, It end) { + DCHECK_LE(pos, end_); + size_t offset = pos - begin_; + size_t count = std::distance(begin, end); + size_t elements_to_move = end_ - pos; + resize_no_init(size() + count); + pos = begin_ + offset; + std::memmove(pos + count, pos, elements_to_move); + std::copy(begin, end, pos); + return pos; + } + void resize_no_init(size_t new_size) { // Resizing without initialization is safe if T is trivially copyable. ASSERT_TRIVIALLY_COPYABLE(T); diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc index 143aa4ae892743..3d45c8571c337c 100644 --- a/deps/v8/src/base/sys-info.cc +++ b/deps/v8/src/base/sys-info.cc @@ -127,5 +127,21 @@ int64_t SysInfo::AmountOfVirtualMemory() { #endif } +// static +uintptr_t SysInfo::AddressSpaceEnd() { +#if V8_OS_WIN + SYSTEM_INFO info; + GetSystemInfo(&info); + uintptr_t max_address = + reinterpret_cast(info.lpMaximumApplicationAddress); + return max_address + 1; +#else + // We don't query POSIX rlimits here (e.g. RLIMIT_AS) as they limit the size + // of memory mappings, but not the address space (e.g. even with a small + // RLIMIT_AS, a process can still map pages at high addresses). + return std::numeric_limits::max(); +#endif +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/sys-info.h b/deps/v8/src/base/sys-info.h index 772f44336f0d84..6865207cb32bcc 100644 --- a/deps/v8/src/base/sys-info.h +++ b/deps/v8/src/base/sys-info.h @@ -24,6 +24,12 @@ class V8_BASE_EXPORT SysInfo final { // Returns the number of bytes of virtual memory of this process. A return // value of zero means that there is no limit on the available virtual memory. static int64_t AmountOfVirtualMemory(); + + // Returns the end of the virtual address space available to this process. + // Memory mappings at or above this address cannot be addressed by this + // process, so all pointer values will be below this value. + // If the virtual address space is not limited, this will return -1. + static uintptr_t AddressSpaceEnd(); }; } // namespace base diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h index 3394b60f7afae9..4722a8fdab9bde 100644 --- a/deps/v8/src/base/template-utils.h +++ b/deps/v8/src/base/template-utils.h @@ -60,6 +60,11 @@ struct has_output_operator< T, TStream, decltype(void(std::declval() << std::declval()))> : std::true_type {}; +// turn std::tuple into std::tuple. +template +using append_tuple_type = decltype(std::tuple_cat( + std::declval(), std::declval>())); + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h index 9a488480058a80..4811a9f06cfb96 100644 --- a/deps/v8/src/base/utils/random-number-generator.h +++ b/deps/v8/src/base/utils/random-number-generator.h @@ -129,6 +129,14 @@ class V8_BASE_EXPORT RandomNumberGenerator final { static uint64_t MurmurHash3(uint64_t); + // Implement the UniformRandomBitGenerator interface. + using result_type = unsigned; + result_type operator()() { return NextInt(); } + static constexpr result_type min() { return 0; } + static constexpr result_type max() { + return std::numeric_limits::max(); + } + private: static const int64_t kMultiplier = 0x5'deec'e66d; static const int64_t kAddend = 0xb; diff --git a/deps/v8/src/base/vector.h b/deps/v8/src/base/vector.h index 1f5e103d4cd8e2..2e4fd64c6f47bd 100644 --- a/deps/v8/src/base/vector.h +++ b/deps/v8/src/base/vector.h @@ -142,8 +142,8 @@ class Vector { static Vector cast(Vector input) { // Casting is potentially dangerous, so be really restrictive here. This // might be lifted once we have use cases for that. - static_assert(std::is_pod::value); - static_assert(std::is_pod::value); + static_assert(std::is_trivial_v && std::is_standard_layout_v); + static_assert(std::is_trivial_v && std::is_standard_layout_v); DCHECK_EQ(0, (input.size() * sizeof(S)) % sizeof(T)); DCHECK_EQ(0, reinterpret_cast(input.begin()) % alignof(T)); return Vector(reinterpret_cast(input.begin()), @@ -193,22 +193,40 @@ class V8_NODISCARD ScopedVector : public Vector { template class OwnedVector { public: - MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(OwnedVector); + OwnedVector() = default; + OwnedVector(std::unique_ptr data, size_t length) : data_(std::move(data)), length_(length) { DCHECK_IMPLIES(length_ > 0, data_ != nullptr); } - // Implicit conversion from {OwnedVector} to {OwnedVector}, instantiable - // if {std::unique_ptr} can be converted to {std::unique_ptr}. - // Can be used to convert {OwnedVector} to {OwnedVector}. + // Disallow copying. + OwnedVector(const OwnedVector&) = delete; + OwnedVector& operator=(const OwnedVector&) = delete; + + // Move construction and move assignment from {OwnedVector} to + // {OwnedVector}, instantiable if {std::unique_ptr} can be converted to + // {std::unique_ptr}. Can also be used to convert {OwnedVector} to + // {OwnedVector}. + // These also function as the standard move construction/assignment operator. + // {other} is left as an empty vector. + template , std::unique_ptr>::value>::type> + OwnedVector(OwnedVector&& other) V8_NOEXCEPT { + *this = std::move(other); + } + template , std::unique_ptr>::value>::type> - OwnedVector(OwnedVector&& other) - : data_(std::move(other.data_)), length_(other.length_) { + OwnedVector& operator=(OwnedVector&& other) V8_NOEXCEPT { static_assert(sizeof(U) == sizeof(T)); + data_ = std::move(other.data_); + length_ = other.length_; + DCHECK_NULL(other.data_); other.length_ = 0; + return *this; } // Returns the length of the vector as a size_t. @@ -217,14 +235,12 @@ class OwnedVector { // Returns whether or not the vector is empty. constexpr bool empty() const { return length_ == 0; } - // Returns the pointer to the start of the data in the vector. - T* start() const { + constexpr T* begin() const { DCHECK_IMPLIES(length_ > 0, data_ != nullptr); return data_.get(); } - constexpr T* begin() const { return start(); } - constexpr T* end() const { return start() + size(); } + constexpr T* end() const { return begin() + length_; } // Access individual vector elements - checks bounds in debug mode. T& operator[](size_t index) const { @@ -233,7 +249,7 @@ class OwnedVector { } // Returns a {Vector} view of the data in this vector. - Vector as_vector() const { return Vector(start(), size()); } + Vector as_vector() const { return {begin(), size()}; } // Releases the backing data from this vector and transfers ownership to the // caller. This vector will be empty afterwards. @@ -269,7 +285,7 @@ class OwnedVector { using non_const_t = typename std::remove_const::type; auto vec = OwnedVector::NewForOverwrite(std::distance(begin, end)); - std::copy(begin, end, vec.start()); + std::copy(begin, end, vec.begin()); return vec; } diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h index 25dba27bfb2fe6..f17652bb0c0085 100644 --- a/deps/v8/src/base/vlq.h +++ b/deps/v8/src/base/vlq.h @@ -39,6 +39,16 @@ VLQEncodeUnsigned(Function&& process_byte, uint32_t value) { } while (value > kDataMask); } +inline uint32_t VLQConvertToUnsigned(int32_t value) { + // This wouldn't handle kMinInt correctly if it ever encountered it. + DCHECK_NE(value, std::numeric_limits::min()); + bool is_negative = value < 0; + // Encode sign in least significant bit. + uint32_t bits = static_cast((is_negative ? -value : value) << 1) | + static_cast(is_negative); + return bits; +} + // Encodes value using variable-length encoding and stores it using the passed // process_byte function. template @@ -46,12 +56,7 @@ inline typename std::enable_if< std::is_same()(0)), byte*>::value, void>::type VLQEncode(Function&& process_byte, int32_t value) { - // This wouldn't handle kMinInt correctly if it ever encountered it. - DCHECK_NE(value, std::numeric_limits::min()); - bool is_negative = value < 0; - // Encode sign in least significant bit. - uint32_t bits = static_cast((is_negative ? -value : value) << 1) | - static_cast(is_negative); + uint32_t bits = VLQConvertToUnsigned(value); VLQEncodeUnsigned(std::forward(process_byte), bits); } diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h index 90a06006bb3593..654f98e9a9a901 100644 --- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -608,14 +608,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { ASM_CODE_COMMENT(masm_); - if (depth > 0) { - for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); - } - if (COMPRESS_POINTERS_BOOL) { - // Decompress tagged pointer. - __ AddS64(context, context, kPtrComprCageBaseRegister); - } + for (; depth > 0; --depth) { + LoadTaggedPointerField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index 6c843de4f216d3..7b1a1446fde73f 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -17,6 +17,7 @@ #include "src/objects/contexts.h" #include "src/objects/field-index-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-shared-array-inl.h" #include "src/objects/module-inl.h" #include "src/objects/property-details.h" #include "src/objects/prototype.h" @@ -229,27 +230,6 @@ Handle Accessors::MakeArrayLengthInfo(Isolate* isolate) { &ArrayLengthGetter, &ArrayLengthSetter); } -// -// Accessors::SharedArrayLength -// - -void Accessors::SharedArrayLengthGetter( - v8::Local name, const v8::PropertyCallbackInfo& info) { - i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - DisallowGarbageCollection no_gc; - HandleScope scope(isolate); - - Object value = *Utils::OpenHandle(*v8::Local(info.This())); - - Object result = Smi::FromInt(JSObject::cast(value).elements().length()); - info.GetReturnValue().Set(Utils::ToLocal(Handle(result, isolate))); -} - -Handle Accessors::MakeSharedArrayLengthInfo(Isolate* isolate) { - return MakeAccessor(isolate, isolate->factory()->length_string(), - &SharedArrayLengthGetter, nullptr); -} - // // Accessors::ModuleNamespaceEntry // diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h index 8a8ea66b1f9d45..f2edcd8978d8c3 100644 --- a/deps/v8/src/builtins/accessors.h +++ b/deps/v8/src/builtins/accessors.h @@ -44,8 +44,6 @@ class JavaScriptFrame; kHasSideEffectToReceiver) \ V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \ kHasSideEffectToReceiver) \ - V(_, shared_array_length, SharedArrayLength, kHasNoSideEffect, \ - kHasSideEffectToReceiver) \ V(_, string_length, StringLength, kHasNoSideEffect, \ kHasSideEffectToReceiver) \ V(_, value_unavailable, ValueUnavailable, kHasNoSideEffect, \ diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq index d442f5026ca0f9..103892d740074f 100644 --- a/deps/v8/src/builtins/array-from.tq +++ b/deps/v8/src/builtins/array-from.tq @@ -140,14 +140,27 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments): let a: JSReceiver; // 9. If IsConstructor(C) is true, then - typeswitch (c) { - case (c: Constructor): { - // a. Let A be ? Construct(C, « len »). - a = Construct(c, len); - } - case (JSAny): { - // a. Let A be ? ArrayCreate(len). - a = ArrayCreate(len); + try { + // Allocate an array with PACKED elements kind for fast-path rather than + // calling the constructor which creates an array with HOLEY kind. + if (c != GetArrayFunction()) goto CreateWithConstructor; + if (len > kMaxFastArrayLength) goto CreateWithConstructor; + const smiLen: Smi = 0; + const capacity: intptr = Convert(len); + const map: Map = GetFastPackedSmiElementsJSArrayMap(); + a = AllocateJSArray( + ElementsKind::PACKED_SMI_ELEMENTS, map, capacity, smiLen, + AllocationFlag::kAllowLargeObjectAllocation); + } label CreateWithConstructor { + typeswitch (c) { + case (c: Constructor): { + // a. Let A be ? Construct(C, « len »). + a = Construct(c, len); + } + case (JSAny): { + // a. Let A be ? ArrayCreate(len). + a = ArrayCreate(len); + } } } diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq index 44c2cdca27665f..b469ec5f5c4590 100644 --- a/deps/v8/src/builtins/array-of.tq +++ b/deps/v8/src/builtins/array-of.tq @@ -19,14 +19,21 @@ ArrayOf( let a: JSReceiver; // 4. If IsConstructor(C) is true, then - typeswitch (c) { - case (c: Constructor): { - // a. Let A be ? Construct(C, « len »). - a = Construct(c, len); - } - case (JSAny): { - // a. Let A be ? ArrayCreate(len). - a = ArrayCreate(len); + try { + // Allocate an array with PACKED elements kind for fast-path rather than + // calling the constructor which creates an array with HOLEY kind. + if (c != GetArrayFunction()) goto CreateWithConstructor; + a = NewJSArrayFilledWithZero(SmiUntag(len)) otherwise CreateWithConstructor; + } label CreateWithConstructor { + typeswitch (c) { + case (c: Constructor): { + // a. Let A be ? Construct(C, « len »). + a = Construct(c, len); + } + case (JSAny): { + // a. Let A be ? ArrayCreate(len). + a = ArrayCreate(len); + } } } diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq index 69a678a51311f4..b5835f8be967ce 100644 --- a/deps/v8/src/builtins/array-reverse.tq +++ b/deps/v8/src/builtins/array-reverse.tq @@ -3,65 +3,50 @@ // found in the LICENSE file. namespace array { -macro LoadElement( +macro LoadElement( elements: FixedArrayBase, index: Smi): T; -LoadElement(implicit context: Context)( - elements: FixedArrayBase, index: Smi): Smi { +LoadElement(implicit context: Context)( + elements: FixedArrayBase, index: Smi): Object { const elements: FixedArray = UnsafeCast(elements); - return UnsafeCast(elements.objects[index]); + return elements.objects[index]; } -LoadElement(implicit context: Context)( - elements: FixedArrayBase, index: Smi): JSAny { - const elements: FixedArray = UnsafeCast(elements); - return UnsafeCast(elements.objects[index]); -} - -LoadElement( - implicit context: Context)(elements: FixedArrayBase, index: Smi): float64 { +LoadElement(implicit context: Context)( + elements: FixedArrayBase, index: Smi): float64_or_hole { const elements: FixedDoubleArray = UnsafeCast(elements); - // This macro is only used for PACKED_DOUBLE, loading the hole should - // be impossible. - return elements.floats[index].Value() otherwise unreachable; + return elements.floats[index]; } -macro StoreElement( +macro StoreElement( implicit context: Context)( elements: FixedArrayBase, index: Smi, value: T): void; -StoreElement(implicit context: Context)( - elements: FixedArrayBase, index: Smi, value: Smi): void { - const elems: FixedArray = UnsafeCast(elements); - StoreFixedArrayElement(elems, index, value); -} - -StoreElement(implicit context: Context)( - elements: FixedArrayBase, index: Smi, value: JSAny): void { +StoreElement(implicit context: Context)( + elements: FixedArrayBase, index: Smi, value: Object): void { const elements: FixedArray = UnsafeCast(elements); elements.objects[index] = value; } -StoreElement( - implicit context: Context)( - elements: FixedArrayBase, index: Smi, value: float64): void { +StoreElement(implicit context: Context)( + elements: FixedArrayBase, index: Smi, value: float64_or_hole): void { const elems: FixedDoubleArray = UnsafeCast(elements); - StoreFixedDoubleArrayElement(elems, index, value); + elems.floats[index] = value; } // Fast-path for all PACKED_* elements kinds. These do not need to check // whether a property is present, so we can simply swap them using fast // FixedArray loads/stores. -macro FastPackedArrayReverse( +macro FastArrayReverse( implicit context: Context)(elements: FixedArrayBase, length: Smi): void { let lower: Smi = 0; let upper: Smi = length - 1; while (lower < upper) { - const lowerValue: T = LoadElement(elements, lower); - const upperValue: T = LoadElement(elements, upper); - StoreElement(elements, lower, upperValue); - StoreElement(elements, upper, lowerValue); + const lowerValue: T = LoadElement(elements, lower); + const upperValue: T = LoadElement(elements, upper); + StoreElement(elements, lower, upperValue); + StoreElement(elements, upper, lowerValue); ++lower; --upper; } @@ -144,19 +129,27 @@ macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny): const array: FastJSArray = Cast(receiver) otherwise Slow; const kind: ElementsKind = array.map.elements_kind; - if (kind == ElementsKind::PACKED_SMI_ELEMENTS) { - array::EnsureWriteableFastElements(array); - FastPackedArrayReverse( - array.elements, array.length); - } else if (kind == ElementsKind::PACKED_ELEMENTS) { + if (kind == ElementsKind::PACKED_SMI_ELEMENTS || + kind == ElementsKind::PACKED_ELEMENTS) { array::EnsureWriteableFastElements(array); - FastPackedArrayReverse( - array.elements, array.length); + FastArrayReverse(array.elements, array.length); } else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { - FastPackedArrayReverse( + FastArrayReverse( array.elements, array.length); } else { - goto Slow; + if (!IsPrototypeInitialArrayPrototype(array.map)) goto Slow; + if (IsNoElementsProtectorCellInvalid()) goto Slow; + + if (kind == ElementsKind::HOLEY_SMI_ELEMENTS || + kind == ElementsKind::HOLEY_ELEMENTS) { + array::EnsureWriteableFastElements(array); + FastArrayReverse(array.elements, array.length); + } else if (kind == ElementsKind::HOLEY_DOUBLE_ELEMENTS) { + FastArrayReverse( + array.elements, array.length); + } else { + goto Slow; + } } } diff --git a/deps/v8/src/builtins/array-to-reversed.tq b/deps/v8/src/builtins/array-to-reversed.tq index 5d97d6546ea1fe..cb098e1a4555ae 100644 --- a/deps/v8/src/builtins/array-to-reversed.tq +++ b/deps/v8/src/builtins/array-to-reversed.tq @@ -3,14 +3,57 @@ // found in the LICENSE file. namespace array { -macro FastPackedArrayToReversed( + +macro FastPackedDoubleArrayToReversed(implicit context: Context)( + elements: FixedDoubleArray, length: Smi): JSArray { + // 3. Let A be ? ArrayCreate(𝔽(len)). + const copy: FixedDoubleArray = + UnsafeCast(AllocateFixedArray( + ElementsKind::PACKED_DOUBLE_ELEMENTS, SmiUntag(length), + AllocationFlag::kAllowLargeObjectAllocation)); + + // 4. Let k be 0. + let k: Smi = 0; + + // 5. Repeat, while k < len, + while (k < length) { + // a. Let from be ! ToString(𝔽(len - k - 1)). + // b. Let Pk be ! ToString(𝔽(k)). + const from = length - k - 1; + + // c. Let fromValue be ? Get(O, from). + const fromValue: float64 = + elements.floats[from].Value() otherwise unreachable; + + // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). + StoreFixedDoubleArrayElement(copy, k, fromValue); + + // e. Set k to k + 1. + ++k; + } + + // 6. Return A. + const map: Map = LoadJSArrayElementsMap( + ElementsKind::PACKED_DOUBLE_ELEMENTS, LoadNativeContext(context)); + return NewJSArray(map, copy); +} + +macro FastArrayToReversed( implicit context: Context)( - kind: constexpr ElementsKind, elements: FixedArrayBase, - length: Smi): JSArray { + kind: constexpr ElementsKind, elements: FromElements, length: Smi, + initializeArray: constexpr bool): JSArray { // 3. Let A be ? ArrayCreate(𝔽(len)). const copy: FixedArrayBase = AllocateFixedArray( kind, SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation); + // Reversing HOLEY_DOUBLE_ELEMENTS array may allocate heap numbers. + // We need to initialize the array to avoid running GC with garbage values. + if (initializeArray) { + dcheck(Is(copy)); + FillFixedArrayWithSmiZero( + kind, UnsafeCast(copy), 0, SmiUntag(length)); + } + // 4. Let k be 0. let k: Smi = 0; @@ -21,10 +64,10 @@ macro FastPackedArrayToReversed( const from = length - k - 1; // c. Let fromValue be ? Get(O, from). - const fromValue: T = LoadElement(elements, from); + const fromValue: Object = LoadElementOrUndefined(elements, from); // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). - StoreElement(copy, k, fromValue); + StoreElement(copy, k, fromValue); // e. Set k to k + 1. ++k; @@ -35,7 +78,7 @@ macro FastPackedArrayToReversed( return NewJSArray(map, copy); } -macro TryFastPackedArrayToReversed(implicit context: Context)(receiver: JSAny): +macro TryFastArrayToReversed(implicit context: Context)(receiver: JSAny): JSArray labels Slow { const array: FastJSArray = Cast(receiver) otherwise Slow; @@ -43,19 +86,33 @@ macro TryFastPackedArrayToReversed(implicit context: Context)(receiver: JSAny): const kind: ElementsKind = array.map.elements_kind; if (kind == ElementsKind::PACKED_SMI_ELEMENTS) { - return FastPackedArrayToReversed( - ElementsKind::PACKED_SMI_ELEMENTS, array.elements, array.length); + return FastArrayToReversed( + ElementsKind::PACKED_SMI_ELEMENTS, + UnsafeCast(array.elements), array.length, false); + } else if (kind == ElementsKind::PACKED_ELEMENTS) { + return FastArrayToReversed( + ElementsKind::PACKED_ELEMENTS, UnsafeCast(array.elements), + array.length, false); + } else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { + return FastPackedDoubleArrayToReversed( + UnsafeCast(array.elements), array.length); + } else { + if (!IsPrototypeInitialArrayPrototype(array.map)) goto Slow; + if (IsNoElementsProtectorCellInvalid()) goto Slow; + + if (kind == ElementsKind::HOLEY_SMI_ELEMENTS || + kind == ElementsKind::HOLEY_ELEMENTS) { + return FastArrayToReversed( + ElementsKind::PACKED_ELEMENTS, UnsafeCast(array.elements), + array.length, false); + } else if (kind == ElementsKind::HOLEY_DOUBLE_ELEMENTS) { + return FastArrayToReversed( + ElementsKind::PACKED_ELEMENTS, + UnsafeCast(array.elements), array.length, true); + } + + goto Slow; } - if (kind == ElementsKind::PACKED_ELEMENTS) { - return FastPackedArrayToReversed( - ElementsKind::PACKED_ELEMENTS, array.elements, array.length); - } - if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { - return FastPackedArrayToReversed( - ElementsKind::PACKED_DOUBLE_ELEMENTS, array.elements, array.length); - } - - goto Slow; } transitioning builtin GenericArrayToReversed(implicit context: Context)( @@ -96,7 +153,7 @@ transitioning builtin GenericArrayToReversed(implicit context: Context)( transitioning javascript builtin ArrayPrototypeToReversed( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { try { - return TryFastPackedArrayToReversed(receiver) otherwise Slow; + return TryFastArrayToReversed(receiver) otherwise Slow; } label Slow { return GenericArrayToReversed(receiver); } diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index e1de5dee655d7a..b5de00e6b76eda 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -358,7 +358,12 @@ constexpr 'CodeStubAssembler::ExtractFixedArrayFlag' { ... } +const kBigIntMaxLengthBits: + constexpr uintptr generates 'BigInt::kMaxLengthBits'; const kBigIntMaxLength: constexpr intptr generates 'BigInt::kMaxLength'; +const kBigIntDigitSize: constexpr intptr generates 'kSystemPointerSize'; +const kBitsPerByte: constexpr intptr generates 'kBitsPerByte'; +const kBigIntDigitBits: intptr = kBigIntDigitSize * kBitsPerByte; extern enum MessageTemplate { kAllPromisesRejected, @@ -442,6 +447,9 @@ extern enum MessageTemplate { kInvalidWeakRefsRegisterTarget, kInvalidWeakRefsUnregisterToken, kInvalidWeakRefsWeakRefConstructorTarget, + kObjectGetterCallable, + kObjectSetterCallable, + kPropertyDescObject, ... } @@ -777,6 +785,8 @@ extern transitioning runtime NormalizeElements(Context, JSObject): void; extern transitioning runtime TransitionElementsKindWithKind( Context, JSObject, Smi): void; +extern macro LoadObjectField(HeapObject, constexpr int32): Object; + extern macro LoadBufferObject(RawPtr, constexpr int32): Object; extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr; extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi; @@ -960,6 +970,8 @@ extern operator '*' macro IntPtrMul(intptr, intptr): intptr; extern operator '*' macro Int64Mul(int64, int64): int64; extern operator '/' macro IntPtrDiv(intptr, intptr): intptr; extern operator '/' macro Int64Div(int64, int64): int64; +extern operator '%' macro IntPtrMod(intptr, intptr): intptr; +extern operator '%' macro Int64Mod(int64, int64): int64; extern operator '<<' macro WordShl(intptr, intptr): intptr; extern operator '>>' macro WordSar(intptr, intptr): intptr; extern operator '&' macro WordAnd(intptr, intptr): intptr; @@ -1475,6 +1487,23 @@ extern macro BranchIfSameValue(JSAny, JSAny): never labels Taken, NotTaken; macro SameValue(a: JSAny, b: JSAny): bool { BranchIfSameValue(a, b) otherwise return true, return false; } +macro SameValue(a: (JSAny|TheHole), b: (JSAny|TheHole)): bool { + typeswitch (a) { + case (a: TheHole): { + return UnsafeCast(b) == a; + } + case (a: JSAny): { + typeswitch (b) { + case (TheHole): { + return false; + } + case (b: JSAny): { + return SameValue(a, b); + } + } + } + } +} // Does "if (index1 + index2 > limit) goto IfOverflow" in an uintptr overflow // friendly way where index1 and index2 are in [0, kMaxSafeInteger] range. diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index bfb248e0b24bf9..f72d3793370c5a 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -241,7 +241,7 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( a_ = processor(this, value.value(), index); } }, - incr, advance_mode); + incr, LoopUnrollingMode::kNo, advance_mode); } TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index 49fe48d6987a46..eee5ba5337ec69 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -1104,7 +1104,7 @@ bool IterateElements(Isolate* isolate, Handle receiver, !HasOnlySimpleElements(isolate, *receiver)) { return IterateElementsSlow(isolate, receiver, length, visitor); } - Handle array = Handle::cast(receiver); + Handle array = Handle::cast(receiver); switch (array->GetElementsKind()) { case PACKED_SMI_ELEMENTS: @@ -1228,17 +1228,14 @@ bool IterateElements(Isolate* isolate, Handle receiver, UNIMPLEMENTED(); case NO_ELEMENTS: break; + // JSArrays cannot have the following elements kinds: #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: TYPED_ARRAYS(TYPED_ARRAY_CASE) - return IterateElementsSlow(isolate, receiver, length, visitor); RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) - // TODO(v8:11111): Support RAB / GSAB. - UNREACHABLE(); #undef TYPED_ARRAY_CASE case FAST_STRING_WRAPPER_ELEMENTS: case SLOW_STRING_WRAPPER_ELEMENTS: case SHARED_ARRAY_ELEMENTS: - // |array| is guaranteed to be an array or typed array. UNREACHABLE(); } visitor->increase_index_offset(length); diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index 26dcabe6c3b320..517d45274488c1 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -598,8 +598,9 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) { TakeFirstAsyncGeneratorRequestFromQueue(generator); TNode promise = LoadPromiseFromAsyncGeneratorRequest(next); + // No debug event needed, there was already a debug event that got us here. Return(CallBuiltin(Builtin::kRejectPromise, context, promise, value, - TrueConstant())); + FalseConstant())); } TF_BUILTIN(AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h index c6f5888b9d9505..0c292104e0a806 100644 --- a/deps/v8/src/builtins/builtins-bigint-gen.h +++ b/deps/v8/src/builtins/builtins-bigint-gen.h @@ -147,6 +147,125 @@ class BigIntBuiltinsAssembler : public CodeStubAssembler { std::make_pair(MachineType::AnyTagged(), y)); } + void CppBitwiseOrPosPosAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_or_pos_pos_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_or_pp_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_or_pos_pos_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppBitwiseOrNegNegAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_or_neg_neg_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_or_nn_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_or_neg_neg_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppBitwiseOrPosNegAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_or_pos_neg_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_or_pn_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_or_pos_neg_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppBitwiseXorPosPosAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_xor_pos_pos_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_xor_pp_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_xor_pos_pos_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppBitwiseXorNegNegAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_xor_neg_neg_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_xor_nn_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_xor_neg_neg_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppBitwiseXorPosNegAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode + mutable_big_int_bitwise_xor_pos_neg_and_canonicalize = ExternalConstant( + ExternalReference:: + mutable_big_int_bitwise_xor_pn_and_canonicalize_function()); + CallCFunction(mutable_big_int_bitwise_xor_pos_neg_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppLeftShiftAndCanonicalize(TNode result, TNode x, + TNode shift) { + TNode mutable_big_int_left_shift_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_left_shift_and_canonicalize_function()); + CallCFunction(mutable_big_int_left_shift_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::IntPtr(), shift)); + } + + TNode CppRightShiftResultLength(TNode x, + TNode x_sign, + TNode shift) { + TNode big_int_right_shift_result_length = + ExternalConstant( + ExternalReference::big_int_right_shift_result_length_function()); + return UncheckedCast( + CallCFunction(big_int_right_shift_result_length, MachineType::Uint32(), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::Uint32(), x_sign), + std::make_pair(MachineType::IntPtr(), shift))); + } + + void CppRightShiftAndCanonicalize(TNode result, TNode x, + TNode shift, + TNode must_round_down) { + TNode mutable_big_int_right_shift_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_right_shift_and_canonicalize_function()); + CallCFunction(mutable_big_int_right_shift_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::IntPtr(), shift), + std::make_pair(MachineType::Uint32(), must_round_down)); + } + TNode CppAbsoluteCompare(TNode x, TNode y) { TNode mutable_big_int_absolute_compare = ExternalConstant( diff --git a/deps/v8/src/builtins/builtins-bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq index feacfccdf98b58..ade83dc965e501 100644 --- a/deps/v8/src/builtins/builtins-bigint.tq +++ b/deps/v8/src/builtins/builtins-bigint.tq @@ -9,6 +9,8 @@ namespace bigint { const kPositiveSign: uint32 = 0; const kNegativeSign: uint32 = 1; +const kMustRoundDownBitShift: uint32 = 30; + extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize( MutableBigInt, BigIntBase, BigIntBase): void; extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize( @@ -25,6 +27,24 @@ extern macro BigIntBuiltinsAssembler::CppBitwiseAndNegNegAndCanonicalize( MutableBigInt, BigIntBase, BigIntBase): void; extern macro BigIntBuiltinsAssembler::CppBitwiseAndPosNegAndCanonicalize( MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseOrPosPosAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseOrNegNegAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseOrPosNegAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseXorPosPosAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseXorNegNegAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppBitwiseXorPosNegAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; +extern macro BigIntBuiltinsAssembler::CppLeftShiftAndCanonicalize( + MutableBigInt, BigIntBase, intptr): void; +extern macro BigIntBuiltinsAssembler::CppRightShiftResultLength( + BigIntBase, uint32, intptr): uint32; +extern macro BigIntBuiltinsAssembler::CppRightShiftAndCanonicalize( + MutableBigInt, BigIntBase, intptr, uint32): void; extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare( BigIntBase, BigIntBase): int32; @@ -474,6 +494,310 @@ builtin BigIntBitwiseAnd(implicit context: Context)( } } +macro BigIntBitwiseOrImpl(implicit context: Context)( + x: BigInt, y: BigInt): BigInt { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + + // case: 0n | y + if (xlength == 0) { + return y; + } + + // case: x | 0n + if (ylength == 0) { + return x; + } + + const xsign = ReadBigIntSign(x); + const ysign = ReadBigIntSign(y); + const resultLength = (xlength > ylength) ? xlength : ylength; + + if (xsign == kPositiveSign && ysign == kPositiveSign) { + const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength) + otherwise unreachable; + CppBitwiseOrPosPosAndCanonicalize(result, x, y); + return Convert(result); + } else if (xsign == kNegativeSign && ysign == kNegativeSign) { + const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength) + otherwise unreachable; + CppBitwiseOrNegNegAndCanonicalize(result, x, y); + return Convert(result); + } else if (xsign == kPositiveSign && ysign == kNegativeSign) { + const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength) + otherwise unreachable; + CppBitwiseOrPosNegAndCanonicalize(result, x, y); + return Convert(result); + } else { + const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength) + otherwise unreachable; + CppBitwiseOrPosNegAndCanonicalize(result, y, x); + return Convert(result); + } +} + +builtin BigIntBitwiseOrNoThrow(implicit context: Context)( + x: BigInt, y: BigInt): Numeric { + return BigIntBitwiseOrImpl(x, y); +} + +builtin BigIntBitwiseOr(implicit context: Context)( + xNum: Numeric, yNum: Numeric): BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntBitwiseOrImpl(x, y); + } label MixedTypes { + ThrowTypeError(MessageTemplate::kBigIntMixedTypes); + } +} + +macro BigIntBitwiseXorImpl(implicit context: Context)( + x: BigInt, y: BigInt): BigInt labels BigIntTooBig { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + + // case: 0n ^ y + if (xlength == 0) { + return y; + } + + // case: x ^ 0n + if (ylength == 0) { + return x; + } + + const xsign = ReadBigIntSign(x); + const ysign = ReadBigIntSign(y); + + if (xsign == kPositiveSign && ysign == kPositiveSign) { + const resultLength = (xlength > ylength) ? xlength : ylength; + const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength) + otherwise unreachable; + CppBitwiseXorPosPosAndCanonicalize(result, x, y); + return Convert(result); + } else if (xsign == kNegativeSign && ysign == kNegativeSign) { + const resultLength = (xlength > ylength) ? xlength : ylength; + const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength) + otherwise unreachable; + CppBitwiseXorNegNegAndCanonicalize(result, x, y); + return Convert(result); + } else if (xsign == kPositiveSign && ysign == kNegativeSign) { + const resultLength = ((xlength > ylength) ? xlength : ylength) + 1; + const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength) + otherwise BigIntTooBig; + CppBitwiseXorPosNegAndCanonicalize(result, x, y); + return Convert(result); + } else { + const resultLength = ((xlength > ylength) ? xlength : ylength) + 1; + const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength) + otherwise BigIntTooBig; + CppBitwiseXorPosNegAndCanonicalize(result, y, x); + return Convert(result); + } +} + +builtin BigIntBitwiseXorNoThrow(implicit context: Context)( + x: BigInt, y: BigInt): Numeric { + try { + return BigIntBitwiseXorImpl(x, y) otherwise BigIntTooBig; + } label BigIntTooBig { + // Smi sentinel 0 is used to signal BigIntTooBig exception. + return Convert(0); + } +} + +builtin BigIntBitwiseXor(implicit context: Context)( + xNum: Numeric, yNum: Numeric): BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntBitwiseXorImpl(x, y) otherwise BigIntTooBig; + } label MixedTypes { + ThrowTypeError(MessageTemplate::kBigIntMixedTypes); + } label BigIntTooBig { + ThrowRangeError(MessageTemplate::kBigIntTooBig); + } +} + +macro MutableBigIntLeftShiftByAbsolute(implicit context: Context)( + x: BigInt, y: BigInt): BigInt labels BigIntTooBig { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + + // case: 0n << y + if (xlength == 0) { + return x; + } + + // case: x << 0n + if (ylength == 0) { + return x; + } + + if (ylength > 1) { + // Depends on kBigIntMaxLengthBits <= (1 << kBigIntDigitSize). + goto BigIntTooBig; + } + const shiftAbs = LoadBigIntDigit(y, 0); + if (shiftAbs > kBigIntMaxLengthBits) { + goto BigIntTooBig; + } + + // {shift} is positive. + const shift = Convert(shiftAbs); + let resultLength = xlength + shift / kBigIntDigitBits; + const bitsShift = shift % kBigIntDigitBits; + const xmsd = LoadBigIntDigit(x, xlength - 1); + if (bitsShift != 0 && + xmsd >>> Convert(kBigIntDigitBits - bitsShift) != 0) { + resultLength++; + } + const result = AllocateEmptyBigIntNoThrow(ReadBigIntSign(x), resultLength) + otherwise BigIntTooBig; + CppLeftShiftAndCanonicalize(result, x, shift); + return Convert(result); +} + +macro RightShiftByMaximum(implicit context: Context)(sign: uint32): BigInt { + if (sign == kNegativeSign) { + const minusOne = AllocateEmptyBigInt(kNegativeSign, 1); + StoreBigIntDigit(minusOne, 0, 1); + return Convert(minusOne); + } else { + return Convert(AllocateEmptyBigInt(kPositiveSign, 0)); + } +} + +macro MutableBigIntRightShiftByAbsolute(implicit context: Context)( + x: BigInt, y: BigInt): BigInt { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + + // case: 0n >> y + if (xlength == 0) { + return x; + } + + // case: x >> 0n + if (ylength == 0) { + return x; + } + + const sign = ReadBigIntSign(x); + if (ylength > 1) { + // Depends on kBigIntMaxLengthBits <= (1 << kBigIntDigitSize). + return RightShiftByMaximum(sign); + } + const shiftAbs = LoadBigIntDigit(y, 0); + if (shiftAbs > kBigIntMaxLengthBits) { + return RightShiftByMaximum(sign); + } + + // {shift} is positive. + const shift = Convert(shiftAbs); + const returnVal = CppRightShiftResultLength(x, sign, shift); + const mustRoundDown = returnVal >>> kMustRoundDownBitShift; + const lengthMask = (1 << kMustRoundDownBitShift) - 1; + const resultLength = Convert(returnVal & lengthMask); + if (resultLength == 0) { + return RightShiftByMaximum(sign); + } + + const result = AllocateEmptyBigIntNoThrow(sign, resultLength) + otherwise unreachable; + CppRightShiftAndCanonicalize(result, x, shift, mustRoundDown); + return Convert(result); +} + +macro BigIntShiftLeftImpl(implicit context: Context)( + x: BigInt, y: BigInt): BigInt labels BigIntTooBig { + if (ReadBigIntSign(y) == kNegativeSign) { + return MutableBigIntRightShiftByAbsolute(x, y); + } else { + return MutableBigIntLeftShiftByAbsolute(x, y) otherwise BigIntTooBig; + } +} + +macro BigIntShiftRightImpl(implicit context: Context)( + x: BigInt, y: BigInt): BigInt labels BigIntTooBig { + if (ReadBigIntSign(y) == kNegativeSign) { + return MutableBigIntLeftShiftByAbsolute(x, y) otherwise BigIntTooBig; + } else { + return MutableBigIntRightShiftByAbsolute(x, y); + } +} + +builtin BigIntShiftLeftNoThrow(implicit context: Context)( + x: BigInt, y: BigInt): Numeric { + try { + return BigIntShiftLeftImpl(x, y) otherwise BigIntTooBig; + } label BigIntTooBig { + // Smi sentinel 0 is used to signal BigIntTooBig exception. + return Convert(0); + } +} + +builtin BigIntShiftLeft(implicit context: Context)( + xNum: Numeric, yNum: Numeric): BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntShiftLeftImpl(x, y) otherwise BigIntTooBig; + } label MixedTypes { + ThrowTypeError(MessageTemplate::kBigIntMixedTypes); + } label BigIntTooBig { + ThrowRangeError(MessageTemplate::kBigIntTooBig); + } +} + +builtin BigIntShiftRightNoThrow(implicit context: Context)( + x: BigInt, y: BigInt): Numeric { + try { + return BigIntShiftRightImpl(x, y) otherwise BigIntTooBig; + } label BigIntTooBig { + // Smi sentinel 0 is used to signal BigIntTooBig exception. + return Convert(0); + } +} + +builtin BigIntShiftRight(implicit context: Context)( + xNum: Numeric, yNum: Numeric): BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntShiftRightImpl(x, y) otherwise BigIntTooBig; + } label MixedTypes { + ThrowTypeError(MessageTemplate::kBigIntMixedTypes); + } label BigIntTooBig { + ThrowRangeError(MessageTemplate::kBigIntTooBig); + } +} + +builtin BigIntEqual(implicit context: Context)(x: BigInt, y: BigInt): Boolean { + if (ReadBigIntSign(x) != ReadBigIntSign(y)) { + return False; + } + + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + if (xlength != ylength) { + return False; + } + + for (let i: intptr = 0; i < xlength; ++i) { + if (LoadBigIntDigit(x, i) != LoadBigIntDigit(y, i)) { + return False; + } + } + + return True; +} + builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt { const length = ReadBigIntLength(bigint); diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index ba70f973680155..896875cada149a 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -737,7 +737,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( TNode call_data = LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset); TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, - args.GetLengthWithoutReceiver(), call_data, holder); + TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()), + call_data, holder); } TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index bf8d735a0c7230..9e78c49adc84bc 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -178,7 +178,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( // to the collection does not call user code that could mutate the elements // or collection. BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - IndexAdvanceMode::kPost); + LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); Goto(&exit); } BIND(&if_doubles); @@ -199,7 +199,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( AddConstructorEntry(variant, context, collection, add_func, entry); }; BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - IndexAdvanceMode::kPost); + LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); Goto(&exit); } } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index cb2b79bef7ce72..c90bbddc9ea1f8 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -382,7 +382,7 @@ TNode ConstructorBuiltinsAssembler::FastNewFunctionContext( [=](TNode offset) { StoreObjectFieldNoWriteBarrier(function_context, offset, undefined); }, - kTaggedSize, IndexAdvanceMode::kPost); + kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost); return function_context; } @@ -664,7 +664,7 @@ TNode ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( TNode field = LoadObjectField(boilerplate, offset); StoreObjectFieldNoWriteBarrier(copy, offset, field); }, - kTaggedSize, IndexAdvanceMode::kPost); + kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost); CopyMutableHeapNumbersInObject(copy, offset.value(), instance_size); Goto(&done_init); } @@ -714,7 +714,7 @@ void ConstructorBuiltinsAssembler::CopyMutableHeapNumbersInObject( } BIND(&continue_loop); }, - kTaggedSize, IndexAdvanceMode::kPost); + kTaggedSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index 093b5e978ae919..b38e9f381ae702 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -21,6 +21,13 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) { Return(ToNumber(context, input)); } +TF_BUILTIN(ToBigInt, CodeStubAssembler) { + auto context = Parameter(Descriptor::kContext); + auto input = Parameter(Descriptor::kArgument); + + Return(ToBigInt(context, input)); +} + TF_BUILTIN(ToNumber_Baseline, CodeStubAssembler) { auto input = Parameter(Descriptor::kArgument); auto slot = UncheckedParameter(Descriptor::kSlot); @@ -63,6 +70,13 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) { Return(ToNumber(context, input, BigIntHandling::kConvertToNumber)); } +TF_BUILTIN(ToBigIntConvertNumber, CodeStubAssembler) { + auto context = Parameter(Descriptor::kContext); + auto input = Parameter(Descriptor::kArgument); + + Return(ToBigIntConvertNumber(context, input)); +} + // ES6 section 7.1.2 ToBoolean ( argument ) // Requires parameter on stack so that it can be used as a continuation from a // LAZY deopt. diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 859b5cee9a6731..65d57b1ea0f4ef 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -257,10 +257,12 @@ namespace internal { \ /* Type conversions */ \ TFC(ToNumber, TypeConversion) \ + TFC(ToBigInt, TypeConversion) \ TFC(ToNumber_Baseline, TypeConversion_Baseline) \ TFC(ToNumeric_Baseline, TypeConversion_Baseline) \ TFC(PlainPrimitiveToNumber, TypeConversionNoContext) \ TFC(ToNumberConvertBigInt, TypeConversion) \ + TFC(ToBigIntConvertNumber, TypeConversion) \ TFC(Typeof, Typeof) \ TFC(BigIntToI64, BigIntToI64) \ TFC(BigIntToI32Pair, BigIntToI32Pair) \ @@ -834,7 +836,6 @@ namespace internal { ASM(ReflectApply, JSTrampoline) \ ASM(ReflectConstruct, JSTrampoline) \ CPP(ReflectDefineProperty) \ - CPP(ReflectGetOwnPropertyDescriptor) \ CPP(ReflectOwnKeys) \ CPP(ReflectSet) \ \ @@ -1094,10 +1095,12 @@ namespace internal { TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \ TFS(SetProperty, kReceiver, kKey, kValue) \ TFS(CreateDataProperty, kReceiver, kKey, kValue) \ + TFS(GetOwnPropertyDescriptor, kReceiver, kKey) \ ASM(MemCopyUint8Uint8, CCall) \ ASM(MemMove, CCall) \ TFC(FindNonDefaultConstructorOrConstruct, \ FindNonDefaultConstructorOrConstruct) \ + TFS(OrdinaryGetOwnPropertyDescriptor, kReceiver, kKey) \ \ /* Trace */ \ CPP(IsTraceCategoryEnabled) \ diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc index 09e7f2e4ec12c7..751003076d1b95 100644 --- a/deps/v8/src/builtins/builtins-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-generator-gen.cc @@ -243,7 +243,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) { TimesSystemPointerSize(reg_index)); UnsafeStoreFixedArrayElement(parameters_and_registers, index, value); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); // Iterate over register file and write values into array. // The mapping of register to array index must match that used in @@ -262,7 +262,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) { TimesSystemPointerSize(reg_index)); UnsafeStoreFixedArrayElement(parameters_and_registers, index, value); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); // The return value is unused, defaulting to undefined. Return(UndefinedConstant()); @@ -304,7 +304,7 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) { StaleRegisterConstant(), SKIP_WRITE_BARRIER); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); Return(LoadJSGeneratorObjectInputOrDebugPos(generator)); } diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 907b41e8da641d..ced3e9f85cca91 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -126,6 +126,21 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { Int32Constant(0)); } + TNode IsSharedSpaceIsolate() { + TNode is_shared_space_isolate_addr = ExternalConstant( + ExternalReference::is_shared_space_isolate_flag_address( + this->isolate())); + return Word32NotEqual(Load(is_shared_space_isolate_addr), + Int32Constant(0)); + } + + TNode UsesSharedHeap() { + TNode uses_shared_heap_addr = ExternalConstant( + ExternalReference::uses_shared_heap_flag_address(this->isolate())); + return Word32NotEqual(Load(uses_shared_heap_addr), + Int32Constant(0)); + } + TNode IsPageFlagSet(TNode object, int mask) { TNode page = PageFromAddress(object); TNode flags = UncheckedCast( @@ -314,8 +329,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { fp_mode); BIND(&incremental_barrier); - TNode value = BitcastTaggedToWord(Load(slot)); - IncrementalWriteBarrier(slot, value, fp_mode); + IncrementalWriteBarrier(slot, fp_mode); Goto(next); } @@ -389,51 +403,123 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void IncrementalWriteBarrierMajor(TNode slot, TNode value, SaveFPRegsMode fp_mode, Label* next) { - Label call_incremental_wb(this); + Label marking_cpp_slow_path(this); - // There are two cases we need to call incremental write barrier. - // 1) value_is_white - GotoIf(IsWhite(value), &call_incremental_wb); + IsValueUnmarkedOrRecordSlot(value, &marking_cpp_slow_path, next); - // 2) is_compacting && value_in_EC && obj_isnt_skip - // is_compacting = true when is_marking = true + BIND(&marking_cpp_slow_path); + { + TNode function = ExternalConstant( + ExternalReference::write_barrier_marking_from_code_function()); + TNode object = BitcastTaggedToWord( + UncheckedParameter(WriteBarrierDescriptor::kObject)); + CallCFunctionWithCallerSavedRegisters( + function, MachineTypeOf::value, fp_mode, + std::make_pair(MachineTypeOf::value, object), + std::make_pair(MachineTypeOf::value, slot)); + Goto(next); + } + } + + void IsValueUnmarkedOrRecordSlot(TNode value, Label* true_label, + Label* false_label) { + // This code implements the following condition: + // IsWhite(value) || + // OnEvacuationCandidate(value) && + // !SkipEvacuationCandidateRecording(value) + + // 1) IsWhite(value) || .... + GotoIf(IsWhite(value), true_label); + + // 2) OnEvacuationCandidate(value) && + // !SkipEvacuationCandidateRecording(value) GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask), - next); + false_label); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); Branch( IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask), - next, &call_incremental_wb); + false_label, true_label); } - BIND(&call_incremental_wb); + } + + void IncrementalWriteBarrier(TNode slot, SaveFPRegsMode fp_mode) { + Label next(this), write_into_shared_object(this), + write_into_local_object(this), local_object_and_value(this); + + TNode object = BitcastTaggedToWord( + UncheckedParameter(WriteBarrierDescriptor::kObject)); + TNode value = BitcastTaggedToWord(Load(slot)); + + // Without a shared heap, all objects are local. This is the fast path + // always used when no shared heap exists. + GotoIfNot(UsesSharedHeap(), &local_object_and_value); + + // From the point-of-view of the shared space isolate (= the main isolate) + // shared heap objects are just local objects. + GotoIf(IsSharedSpaceIsolate(), &local_object_and_value); + + // These checks here are now only reached by client isolates (= worker + // isolates). Now first check whether incremental marking is activated for + // that particular object's space. Incrementally marking might only be + // enabled for either local or shared objects on client isolates. + GotoIfNot(IsPageFlagSet(object, MemoryChunk::kIncrementalMarking), &next); + + // We now know that incremental marking is enabled for the given object. + // Decide whether to run the shared or local incremental marking barrier. + InSharedHeap(object, &write_into_shared_object, &write_into_local_object); + + BIND(&write_into_shared_object); + + // Run the shared incremental marking barrier. + IncrementalWriteBarrierShared(object, slot, value, fp_mode, &next); + + BIND(&write_into_local_object); + + // When writing into a local object we can ignore stores of shared object + // values since for those no slot recording or marking is required. + InSharedHeap(value, &next, &local_object_and_value); + + // Both object and value are now guaranteed to be local objects, run the + // local incremental marking barrier. + BIND(&local_object_and_value); + IncrementalWriteBarrierLocal(slot, value, fp_mode, &next); + + BIND(&next); + } + + void IncrementalWriteBarrierShared(TNode object, TNode slot, + TNode value, + SaveFPRegsMode fp_mode, Label* next) { + Label shared_marking_cpp_slow_path(this); + + IsValueUnmarkedOrRecordSlot(value, &shared_marking_cpp_slow_path, next); + + BIND(&shared_marking_cpp_slow_path); { TNode function = ExternalConstant( - ExternalReference::write_barrier_marking_from_code_function()); - TNode object = BitcastTaggedToWord( - UncheckedParameter(WriteBarrierDescriptor::kObject)); + ExternalReference::write_barrier_shared_marking_from_code_function()); CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, object), std::make_pair(MachineTypeOf::value, slot)); + Goto(next); } } - void IncrementalWriteBarrier(TNode slot, TNode value, - SaveFPRegsMode fp_mode) { - Label call_incremental_wb(this), is_minor(this), is_major(this), next(this); - + void IncrementalWriteBarrierLocal(TNode slot, TNode value, + SaveFPRegsMode fp_mode, Label* next) { + Label is_minor(this), is_major(this); Branch(IsMinorMarking(), &is_minor, &is_major); BIND(&is_minor); - IncrementalWriteBarrierMinor(slot, value, fp_mode, &next); + IncrementalWriteBarrierMinor(slot, value, fp_mode, next); BIND(&is_major); - IncrementalWriteBarrierMajor(slot, value, fp_mode, &next); - - BIND(&next); + IncrementalWriteBarrierMajor(slot, value, fp_mode, next); } void GenerateRecordWrite(SaveFPRegsMode fp_mode) { @@ -908,7 +994,7 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler { BranchIfSameValue(key, property, &skip, &continue_label); Bind(&continue_label); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); } CallBuiltin(Builtin::kCreateDataProperty, context, target, key, @@ -1537,5 +1623,27 @@ TF_BUILTIN(FindNonDefaultConstructorOrConstruct, CodeStubAssembler) { } } +// Dispatcher for different implementations of the [[GetOwnProperty]] internal +// method, returning a PropertyDescriptorObject (a Struct representation of the +// spec PropertyDescriptor concept) +TF_BUILTIN(GetOwnPropertyDescriptor, CodeStubAssembler) { + auto context = Parameter(Descriptor::kContext); + auto receiver = Parameter(Descriptor::kReceiver); + auto key = Parameter(Descriptor::kKey); + + Label call_runtime(this); + + TNode map = LoadMap(receiver); + TNode instance_type = LoadMapInstanceType(map); + + GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime); + TailCallBuiltin(Builtin::kOrdinaryGetOwnPropertyDescriptor, context, receiver, + key); + + BIND(&call_runtime); + TailCallRuntime(Runtime::kGetOwnPropertyDescriptorObject, context, receiver, + key); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc index 26be109106a056..8902662407f1df 100644 --- a/deps/v8/src/builtins/builtins-intl-gen.cc +++ b/deps/v8/src/builtins/builtins-intl-gen.cc @@ -113,10 +113,6 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( ToLowerCaseKind kind, std::function)> ReturnFct) { Label call_c(this), return_string(this), runtime(this, Label::kDeferred); - // Early exit on empty strings. - const TNode length = LoadStringLengthAsWord32(string); - GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string); - // Unpack strings if possible, and bail to runtime unless we get a one-byte // flat string. ToDirectStringAssembler to_direct( @@ -153,6 +149,10 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( Bind(&fast); } + // Early exit on empty string. + const TNode length = LoadStringLengthAsWord32(string); + GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string); + const TNode instance_type = to_direct.instance_type(); CSA_DCHECK(this, Word32BinaryNot(IsIndirectStringInstanceType(instance_type))); @@ -196,7 +196,7 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( Increment(&var_cursor); }, - kCharSize, IndexAdvanceMode::kPost); + kCharSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); // Return the original string if it remained unchanged in order to preserve // e.g. internalization and private symbols (such as the preserved object diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index ed9f72cae178e8..8ba828568251d0 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/builtins/builtins-object-gen.h" + #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" -#include "src/codegen/code-stub-assembler.h" #include "src/common/globals.h" #include "src/heap/factory-inl.h" #include "src/ic/accessor-assembler.h" @@ -18,43 +19,6 @@ namespace v8 { namespace internal { -// ----------------------------------------------------------------------------- -// ES6 section 19.1 Object Objects - -class ObjectBuiltinsAssembler : public CodeStubAssembler { - public: - explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state) - : CodeStubAssembler(state) {} - - protected: - void ReturnToStringFormat(TNode context, TNode string); - - // TODO(v8:11167) remove |context| and |object| once OrderedNameDictionary - // supported. - void AddToDictionaryIf(TNode condition, TNode context, - TNode object, - TNode name_dictionary, Handle name, - TNode value, Label* bailout); - TNode FromPropertyDescriptor(TNode context, - TNode desc); - TNode FromPropertyDetails(TNode context, - TNode raw_value, - TNode details, - Label* if_bailout); - TNode ConstructAccessorDescriptor(TNode context, - TNode getter, - TNode setter, - TNode enumerable, - TNode configurable); - TNode ConstructDataDescriptor(TNode context, - TNode value, - TNode writable, - TNode enumerable, - TNode configurable); - TNode GetAccessorOrUndefined(TNode accessor, - Label* if_bailout); -}; - class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { public: explicit ObjectEntriesValuesBuiltinsAssembler( @@ -1319,6 +1283,67 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { } } +TF_BUILTIN(OrdinaryGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto object = Parameter(Descriptor::kReceiver); + auto name = Parameter(Descriptor::kKey); + CSA_DCHECK(this, Word32BinaryNot(IsSpecialReceiverInstanceType( + LoadMapInstanceType(LoadMap(object))))); + + Label if_notunique_name(this), if_iskeyunique(this), done(this), + if_keyisindex(this), call_runtime(this); + + TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); + TVARIABLE(Name, var_name, name); + TVARIABLE(HeapObject, result, UndefinedConstant()); + + TryToName(name, &if_keyisindex, &var_index, &if_iskeyunique, &var_name, + &call_runtime, &if_notunique_name); + + BIND(&if_notunique_name); + { + Label not_in_string_table(this); + // If the string was not found in the string table, then no regular + // object can have a property with that name, so return |undefined|. + TryInternalizeString(CAST(name), &if_keyisindex, &var_index, + &if_iskeyunique, &var_name, &done, &call_runtime); + } + + BIND(&if_iskeyunique); + { + Label if_found_value(this), if_not_found(this); + + TVARIABLE(Object, var_value); + TVARIABLE(Uint32T, var_details); + TVARIABLE(Object, var_raw_value); + TNode map = LoadMap(object); + TNode instance_type = LoadMapInstanceType(map); + + TryGetOwnProperty(context, object, object, map, instance_type, + var_name.value(), &if_found_value, &var_value, + &var_details, &var_raw_value, &done, &call_runtime, + kReturnAccessorPair); + + BIND(&if_found_value); + + // 4. Return FromPropertyDetails(desc). + result = AllocatePropertyDescriptorObject(context); + InitializePropertyDescriptorObject(CAST(result.value()), var_value.value(), + var_details.value(), &call_runtime); + Goto(&done); + } + + BIND(&done); + Return(result.value()); + + BIND(&if_keyisindex); + Goto(&call_runtime); + + BIND(&call_runtime); + TailCallRuntime(Runtime::kGetOwnPropertyDescriptorObject, context, object, + var_name.value()); +} + // ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P ) TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { auto argc = UncheckedParameter(Descriptor::kJSActualArgumentsCount); @@ -1336,81 +1361,13 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { key = CallBuiltin(Builtin::kToName, context, key); // 3. Let desc be ? obj.[[GetOwnProperty]](key). - Label if_keyisindex(this), if_iskeyunique(this), - call_runtime(this, Label::kDeferred), - return_undefined(this, Label::kDeferred), if_notunique_name(this); + TNode desc = + CallBuiltin(Builtin::kGetOwnPropertyDescriptor, context, object, key); - TNode map = LoadMap(object); - TNode instance_type = LoadMapInstanceType(map); - GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime); - { - TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); - TVARIABLE(Name, var_name); - - TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name, - &call_runtime, &if_notunique_name); - - BIND(&if_notunique_name); - { - Label not_in_string_table(this); - TryInternalizeString(CAST(key), &if_keyisindex, &var_index, - &if_iskeyunique, &var_name, ¬_in_string_table, - &call_runtime); + // 4. Return FromPropertyDescriptor(desc). + TNode result = FromPropertyDescriptor(context, desc); - BIND(¬_in_string_table); - { - // If the string was not found in the string table, then no regular - // object can have a property with that name, so return |undefined|. - Goto(&return_undefined); - } - } - - BIND(&if_iskeyunique); - { - Label if_found_value(this), return_empty(this), if_not_found(this); - - TVARIABLE(Object, var_value); - TVARIABLE(Uint32T, var_details); - TVARIABLE(Object, var_raw_value); - - TryGetOwnProperty(context, object, object, map, instance_type, - var_name.value(), &if_found_value, &var_value, - &var_details, &var_raw_value, &return_empty, - &if_not_found, kReturnAccessorPair); - - BIND(&if_found_value); - // 4. Return FromPropertyDetails(desc). - TNode js_desc = FromPropertyDetails( - context, var_value.value(), var_details.value(), &call_runtime); - args.PopAndReturn(js_desc); - - BIND(&return_empty); - var_value = UndefinedConstant(); - args.PopAndReturn(UndefinedConstant()); - - BIND(&if_not_found); - Goto(&call_runtime); - } - } - - BIND(&if_keyisindex); - Goto(&call_runtime); - - BIND(&call_runtime); - { - TNode desc = - CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, key); - - GotoIf(IsUndefined(desc), &return_undefined); - - TNode desc_object = CAST(desc); - - // 4. Return FromPropertyDescriptor(desc). - TNode js_desc = FromPropertyDescriptor(context, desc_object); - args.PopAndReturn(js_desc); - } - BIND(&return_undefined); - args.PopAndReturn(UndefinedConstant()); + args.PopAndReturn(result); } // TODO(v8:11167) remove remove |context| and |object| parameters once @@ -1533,6 +1490,24 @@ TNode ObjectBuiltinsAssembler::FromPropertyDescriptor( return js_descriptor.value(); } +TNode ObjectBuiltinsAssembler::FromPropertyDescriptor( + TNode context, TNode desc) { + CSA_DCHECK(this, TaggedIsNotSmi(desc)); + + if (IsUndefinedConstant(desc)) return UndefinedConstant(); + + Label done(this); + TVARIABLE(HeapObject, result, UndefinedConstant()); + GotoIf(IsUndefined(desc), &done); + + TNode property_descriptor = CAST(desc); + result = FromPropertyDescriptor(context, property_descriptor); + Goto(&done); + + BIND(&done); + return result.value(); +} + TNode ObjectBuiltinsAssembler::FromPropertyDetails( TNode context, TNode raw_value, TNode details, Label* if_bailout) { diff --git a/deps/v8/src/builtins/builtins-object-gen.h b/deps/v8/src/builtins/builtins-object-gen.h new file mode 100644 index 00000000000000..4c7bbe12c4fa0d --- /dev/null +++ b/deps/v8/src/builtins/builtins-object-gen.h @@ -0,0 +1,58 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_BUILTINS_OBJECT_GEN_H_ +#define V8_BUILTINS_BUILTINS_OBJECT_GEN_H_ + +#include "src/codegen/code-stub-assembler.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// ES6 section 19.1 Object Objects + +class ObjectBuiltinsAssembler : public CodeStubAssembler { + public: + explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + TNode FromPropertyDescriptor(TNode, TNode desc); + + protected: + void ReturnToStringFormat(TNode context, TNode string); + + // TODO(v8:11167) remove |context| and |object| once OrderedNameDictionary + // supported. + void AddToDictionaryIf(TNode condition, TNode context, + TNode object, + TNode name_dictionary, Handle name, + TNode value, Label* bailout); + TNode FromPropertyDescriptor(TNode context, + TNode desc); + TNode FromPropertyDetails(TNode context, + TNode raw_value, + TNode details, + Label* if_bailout); + TNode DescriptorFromPropertyDetails( + TNode context, TNode raw_value, TNode details, + Label* if_bailout); + TNode ConstructAccessorDescriptor(TNode context, + TNode getter, + TNode setter, + TNode enumerable, + TNode configurable); + TNode ConstructDataDescriptor(TNode context, + TNode value, + TNode writable, + TNode enumerable, + TNode configurable); + TNode GetAccessorOrUndefined(TNode accessor, + Label* if_bailout); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_BUILTINS_BUILTINS_OBJECT_GEN_H_ diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc index 5ce624cab981fc..7f5b2095bfc762 100644 --- a/deps/v8/src/builtins/builtins-reflect.cc +++ b/deps/v8/src/builtins/builtins-reflect.cc @@ -46,32 +46,6 @@ BUILTIN(ReflectDefineProperty) { return *isolate->factory()->ToBoolean(result.FromJust()); } -// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor -BUILTIN(ReflectGetOwnPropertyDescriptor) { - HandleScope scope(isolate); - DCHECK_LE(3, args.length()); - Handle target = args.at(1); - Handle key = args.at(2); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.getOwnPropertyDescriptor"))); - } - - Handle name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name, - Object::ToName(isolate, key)); - - PropertyDescriptor desc; - Maybe found = JSReceiver::GetOwnPropertyDescriptor( - isolate, Handle::cast(target), name, &desc); - MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception()); - if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value(); - return *desc.ToObject(isolate); -} - // ES6 section 26.1.11 Reflect.ownKeys BUILTIN(ReflectOwnKeys) { HandleScope scope(isolate); diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index c40e21b1a1c13d..c30c43792b0c5d 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -712,7 +712,7 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( var_to_offset.value(), smi_value); Increment(&var_to_offset, kTaggedSize); }, - kInt32Size, IndexAdvanceMode::kPost); + kInt32Size, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost); } var_result = match_info; @@ -779,10 +779,8 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( Label out(this); TVARIABLE(BoolT, var_result); -#ifdef V8_ENABLE_FORCE_SLOW_PATH var_result = Int32FalseConstant(); GotoIfForceSlowPath(&out); -#endif const TNode native_context = LoadNativeContext(context); const TNode regexp_fun = @@ -1132,7 +1130,6 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, #undef CASE_FOR_FLAG if (is_fastpath) { -#ifdef V8_ENABLE_FORCE_SLOW_PATH result = string; Goto(&done); @@ -1145,9 +1142,6 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, BIND(&done); return result.value(); -#else - return string; -#endif } else { return string; } diff --git a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc index 7c2223bbc29ac8..11705c2b1fc83f 100644 --- a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc +++ b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc @@ -259,7 +259,7 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) { StoreFixedArrayElement( wrapped_args, IntPtrAdd(index, IntPtrConstant(1)), wrapped_value); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); TVARIABLE(Object, var_exception); TNode result; diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 37f78793576baf..e5937e5f3d44ad 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -1289,7 +1289,7 @@ TNode StringBuiltinsAssembler::StringToArray( StoreFixedArrayElement(elements, index, entry); }, - 1, IndexAdvanceMode::kPost); + 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context); result_array = AllocateJSArray(array_map, elements, length_smi); @@ -1573,7 +1573,7 @@ void StringBuiltinsAssembler::CopyStringCharacters( Increment(¤t_to_offset, to_increment); } }, - from_increment, IndexAdvanceMode::kPost); + from_increment, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost); } // A wrapper around CopyStringCharacters which determines the correct string diff --git a/deps/v8/src/builtins/builtins-struct.cc b/deps/v8/src/builtins/builtins-struct.cc index cc2e1278ec1020..4f6b432b4a480a 100644 --- a/deps/v8/src/builtins/builtins-struct.cc +++ b/deps/v8/src/builtins/builtins-struct.cc @@ -60,37 +60,47 @@ BUILTIN(SharedStructTypeConstructor) { } int num_properties = static_cast(num_properties_double); - Handle descriptors = factory->NewDescriptorArray( - num_properties, 0, AllocationType::kSharedOld); - - // Build up the descriptor array. - UniqueNameHandleSet all_field_names; - for (int i = 0; i < num_properties; ++i) { - Handle raw_field_name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, raw_field_name, - JSReceiver::GetElement(isolate, field_names_arg, i)); - Handle field_name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name, - Object::ToName(isolate, raw_field_name)); - field_name = factory->InternalizeName(field_name); - - // Check that there are no duplicates. - const bool is_duplicate = !all_field_names.insert(field_name).second; - if (is_duplicate) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kDuplicateTemplateProperty, - field_name)); + Handle maybe_descriptors; + if (num_properties != 0) { + maybe_descriptors = factory->NewDescriptorArray(num_properties, 0, + AllocationType::kSharedOld); + + // Build up the descriptor array. + UniqueNameHandleSet all_field_names; + for (int i = 0; i < num_properties; ++i) { + Handle raw_field_name; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, raw_field_name, + JSReceiver::GetElement(isolate, field_names_arg, i)); + Handle field_name; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, field_name, Object::ToName(isolate, raw_field_name)); + field_name = factory->InternalizeName(field_name); + + // TOOD(v8:12547): Support Symbols? + if (field_name->IsSymbol()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kSymbolToString)); + } + + // Check that there are no duplicates. + const bool is_duplicate = !all_field_names.insert(field_name).second; + if (is_duplicate) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kDuplicateTemplateProperty, + field_name)); + } + + // Shared structs' fields need to be aligned, so make it all tagged. + PropertyDetails details( + PropertyKind::kData, SEALED, PropertyLocation::kField, + PropertyConstness::kMutable, Representation::Tagged(), i); + maybe_descriptors->Set(InternalIndex(i), *field_name, + MaybeObject::FromObject(FieldType::Any()), + details); } - - // Shared structs' fields need to be aligned, so make it all tagged. - PropertyDetails details( - PropertyKind::kData, SEALED, PropertyLocation::kField, - PropertyConstness::kMutable, Representation::Tagged(), i); - descriptors->Set(InternalIndex(i), *field_name, - MaybeObject::FromObject(FieldType::Any()), details); + maybe_descriptors->Sort(); } - descriptors->Sort(); Handle info = isolate->factory()->NewSharedFunctionInfoForBuiltin( @@ -101,7 +111,7 @@ BUILTIN(SharedStructTypeConstructor) { Handle constructor = Factory::JSFunctionBuilder{isolate, info, isolate->native_context()} - .set_map(isolate->strict_function_map()) + .set_map(isolate->strict_function_with_readonly_prototype_map()) .Build(); int instance_size; @@ -113,7 +123,6 @@ BUILTIN(SharedStructTypeConstructor) { JS_SHARED_STRUCT_TYPE, instance_size, TERMINAL_FAST_ELEMENTS_KIND, in_object_properties, AllocationType::kSharedMap); - instance_map->InitializeDescriptors(isolate, *descriptors); // Structs have fixed layout ahead of time, so there's no slack. int out_of_object_properties = num_properties - in_object_properties; if (out_of_object_properties == 0) { @@ -123,17 +132,14 @@ BUILTIN(SharedStructTypeConstructor) { } instance_map->set_is_extensible(false); JSFunction::SetInitialMap(isolate, constructor, instance_map, - factory->null_value()); - - // The constructor is not a shared object, so the shared map should not point - // to it. - instance_map->set_constructor_or_back_pointer(*factory->null_value()); + factory->null_value(), factory->null_value()); // Pre-create the enum cache in the shared space, as otherwise for-in // enumeration will incorrectly create an enum cache in the per-thread heap. if (num_properties == 0) { instance_map->SetEnumLength(0); } else { + instance_map->InitializeDescriptors(isolate, *maybe_descriptors); FastKeyAccumulator::InitializeFastPropertyEnumCache( isolate, instance_map, num_properties, AllocationType::kSharedOld); DCHECK_EQ(num_properties, instance_map->EnumLength()); diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc index 55666ff76104a9..eb4eea831f0788 100644 --- a/deps/v8/src/builtins/builtins-typed-array.cc +++ b/deps/v8/src/builtins/builtins-typed-array.cc @@ -189,6 +189,7 @@ BUILTIN(TypedArrayPrototypeFill) { isolate->factory()->NewStringFromAsciiChecked(method_name); THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation)); } + end = std::min(end, static_cast(array->GetLength())); } int64_t count = end - start; diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index 23995e50e3dc5e..9e56866ca5eaeb 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -263,6 +263,9 @@ Convert(i: intptr): int32 { Convert(i: int64): int32 { return TruncateInt64ToInt32(i); } +Convert(i: uint64): uint32 { + return Unsigned(TruncateInt64ToInt32(Signed(i))); +} Convert(n: Number): int32 { typeswitch (n) { case (s: Smi): { diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq index 37dfea20beb06d..f88466bc9fe8e2 100644 --- a/deps/v8/src/builtins/number.tq +++ b/deps/v8/src/builtins/number.tq @@ -761,13 +761,25 @@ builtin BitwiseAnd(implicit context: Context)( } builtin BitwiseOr(implicit context: Context)( - left: JSAny, right: JSAny): Object { - return Generate_BitwiseBinaryOp(Operation::kBitwiseOr, left, right, context); + left: JSAny, right: JSAny): Numeric { + try { + BinaryOp1(left, right) otherwise Number, AtLeastOneBigInt; + } label Number(left: Number, right: Number) { + tail BitwiseOr(left, right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail bigint::BigIntBitwiseOr(left, right); + } } builtin BitwiseXor(implicit context: Context)( - left: JSAny, right: JSAny): Object { - return Generate_BitwiseBinaryOp(Operation::kBitwiseXor, left, right, context); + left: JSAny, right: JSAny): Numeric { + try { + BinaryOp1(left, right) otherwise Number, AtLeastOneBigInt; + } label Number(left: Number, right: Number) { + tail BitwiseXor(left, right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail bigint::BigIntBitwiseXor(left, right); + } } // Relational builtins. diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq index 0eb82061156d66..664b76830b9a88 100644 --- a/deps/v8/src/builtins/object.tq +++ b/deps/v8/src/builtins/object.tq @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include 'src/builtins/builtins-object-gen.h' + namespace runtime { extern transitioning runtime ObjectIsExtensible(implicit context: Context)(JSAny): JSAny; @@ -201,4 +203,149 @@ transitioning javascript builtin ObjectPrototypeToLocaleString( const method = GetProperty(receiver, 'toString'); return Call(context, method, receiver); } + +// JSDataPropertyDescriptor constants +const kJSDataPropertyDescriptorWritableOffset: constexpr int31 + generates 'JSDataPropertyDescriptor::kWritableOffset'; +const kJSDataPropertyDescriptorEnumerableOffset: constexpr int31 + generates 'JSDataPropertyDescriptor::kEnumerableOffset'; +const kJSDataPropertyDescriptorConfigurableOffset: constexpr int31 + generates 'JSDataPropertyDescriptor::kConfigurableOffset'; +const kJSDataPropertyDescriptorValueOffset: constexpr int31 + generates 'JSDataPropertyDescriptor::kValueOffset'; + +// JSAccessorPropertyDescriptor constants +const kJSAccessorPropertyDescriptorEnumerableOffset: constexpr int31 + generates 'JSAccessorPropertyDescriptor::kEnumerableOffset'; +const kJSAccessorPropertyDescriptorConfigurableOffset: constexpr int31 + generates 'JSAccessorPropertyDescriptor::kConfigurableOffset'; +const kJSAccessorPropertyDescriptorGetOffset: constexpr int31 + generates 'JSAccessorPropertyDescriptor::kGetOffset'; +const kJSAccessorPropertyDescriptorSetOffset: constexpr int31 + generates 'JSAccessorPropertyDescriptor::kSetOffset'; + +// ToPropertyDescriptor (https://tc39.es/ecma262/#sec-topropertydescriptor) +transitioning macro ToPropertyDescriptor(implicit context: Context)( + object: JSReceiver): PropertyDescriptorObject { + const result: PropertyDescriptorObject = AllocatePropertyDescriptorObject(); + + if (object.map == *NativeContextSlot( + context, ContextSlot::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX)) { + const writable = UnsafeCast( + LoadObjectField(object, kJSDataPropertyDescriptorWritableOffset)); + result.flags.has_writable = true; + result.flags.is_writable = ToBoolean(writable); + + const enumerable = UnsafeCast( + LoadObjectField(object, kJSDataPropertyDescriptorEnumerableOffset)); + result.flags.has_enumerable = true; + result.flags.is_enumerable = ToBoolean(enumerable); + + const configurable = UnsafeCast( + LoadObjectField(object, kJSDataPropertyDescriptorConfigurableOffset)); + result.flags.has_configurable = true; + result.flags.is_configurable = ToBoolean(configurable); + + result.flags.has_value = true; + result.value = UnsafeCast( + LoadObjectField(object, kJSDataPropertyDescriptorValueOffset)); + } else if ( + object.map == *NativeContextSlot( + context, ContextSlot::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX)) { + const enumerable = UnsafeCast( + LoadObjectField(object, kJSAccessorPropertyDescriptorEnumerableOffset)); + result.flags.has_enumerable = true; + result.flags.is_enumerable = ToBoolean(enumerable); + + const configurable = UnsafeCast(LoadObjectField( + object, kJSAccessorPropertyDescriptorConfigurableOffset)); + result.flags.has_configurable = true; + result.flags.is_configurable = ToBoolean(configurable); + + result.flags.has_get = true; + result.get = UnsafeCast( + LoadObjectField(object, kJSAccessorPropertyDescriptorGetOffset)); + + result.flags.has_set = true; + result.set = UnsafeCast( + LoadObjectField(object, kJSAccessorPropertyDescriptorSetOffset)); + } else { + const hasEnumerable = HasProperty(object, 'enumerable'); + if (hasEnumerable == True) { + const enumerable = ToBoolean(GetProperty(object, 'enumerable')); + result.flags.has_enumerable = true; + result.flags.is_enumerable = enumerable; + } + + const hasConfigurable = HasProperty(object, 'configurable'); + if (hasConfigurable == True) { + const configurable = ToBoolean(GetProperty(object, 'configurable')); + result.flags.has_configurable = true; + result.flags.is_configurable = configurable; + } + + const hasValue = HasProperty(object, 'value'); + if (hasValue == True) { + const value = GetProperty(object, 'value'); + result.flags.has_value = true; + result.value = value; + } + + const hasWritable = HasProperty(object, 'writable'); + if (hasWritable == True) { + const writable = ToBoolean(GetProperty(object, 'writable')); + result.flags.has_writable = true; + result.flags.is_writable = writable; + } + + const hasGet = HasProperty(object, 'get'); + if (hasGet == True) { + let getter = GetProperty(object, 'get'); + if (!Is(getter) && !Is(getter)) { + getter = Cast(getter) otherwise ThrowTypeError( + MessageTemplate::kObjectGetterCallable, getter); + } + result.flags.has_get = true; + result.get = getter; + } + + const hasSet = HasProperty(object, 'set'); + if (hasSet == True) { + let setter = GetProperty(object, 'set'); + if (!Is(setter) && !Is(setter)) { + setter = Cast(setter) otherwise ThrowTypeError( + MessageTemplate::kObjectSetterCallable, setter); + } + result.flags.has_set = true; + result.set = setter; + } + } + return result; +} + +@export +transitioning macro ToPropertyDescriptor(implicit context: Context)( + object: JSAny): PropertyDescriptorObject|Undefined { + typeswitch (object) { + case (Undefined): { + return Undefined; + } + case (receiver: JSReceiver): { + return ToPropertyDescriptor(receiver); + } + case (JSAny): { + ThrowTypeError(MessageTemplate::kPropertyDescObject, object); + } + } +} + +extern transitioning macro ObjectBuiltinsAssembler::FromPropertyDescriptor( + Context, JSAny): JSAny; + +@export +transitioning macro FromPropertyDescriptor(implicit context: Context)( + object: JSAny): JSAny { + return FromPropertyDescriptor(context, object); +} + } // namespace object diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq index c5ad5eefd5c42b..8a5a90bd7fc543 100644 --- a/deps/v8/src/builtins/promise-resolve.tq +++ b/deps/v8/src/builtins/promise-resolve.tq @@ -162,6 +162,8 @@ ResolvePromise(implicit context: Context)( goto Slow; } label Slow deferred { // Skip "then" lookup for Wasm objects as they are opaque. + // TODO(v8:13523): Drop this special case after changing what [[Get]] + // does in general. @if(V8_ENABLE_WEBASSEMBLY) if (Is(resolution)) { return FulfillPromise(promise, resolution); diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq index c0591e7f6c92c2..5ed70c81c5a100 100644 --- a/deps/v8/src/builtins/reflect.tq +++ b/deps/v8/src/builtins/reflect.tq @@ -94,4 +94,20 @@ ReflectHas(js-implicit context: NativeContext)( otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Reflect.has'); return HasProperty(objectJSReceiver, key); } + +extern transitioning builtin GetOwnPropertyDescriptor( + implicit context: Context)(JSAny, Name): JSAny; + +// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor +transitioning javascript builtin +ReflectGetOwnPropertyDescriptor(js-implicit context: NativeContext)( + target: JSAny, propertyKey: JSAny): JSAny { + const targetReceiver = Cast(target) + otherwise ThrowTypeError( + MessageTemplate::kCalledOnNonObject, 'Reflect.getOwnPropertyDescriptor'); + const name = ToName(propertyKey); + + const desc = GetOwnPropertyDescriptor(targetReceiver, name); + return object::FromPropertyDescriptor(desc); +} } // namespace reflect diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq index b36741e1dd5de2..fcd0d4a702fc0b 100644 --- a/deps/v8/src/builtins/torque-internal.tq +++ b/deps/v8/src/builtins/torque-internal.tq @@ -119,6 +119,11 @@ struct Slice { return this.TryAtIndex(i) otherwise unreachable; } + macro AtIndex(index: uint32): Reference { + const i: intptr = Convert(index); + return this.TryAtIndex(i) otherwise unreachable; + } + macro Iterator(): SliceIterator { const end = this.offset + TimesSizeOf(this.length); return SliceIterator{ diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index a8d253312314eb..7f594cc1b817ba 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -59,13 +59,14 @@ transitioning macro AllocateTypedArray(implicit context: Context)( typedArray.byte_offset = byteOffset; if (isLengthTracking) { dcheck(IsResizableArrayBuffer(buffer)); - // Make the byte_length of length-tracking TAs zero, so that we won't - // accidentally use it and access invalid data. + // Set the byte_length and length fields of length-tracking TAs to zero, so + // that we won't accidentally use them and access invalid data. typedArray.byte_length = 0; + typedArray.length = 0; } else { typedArray.byte_length = byteLength; + typedArray.length = length; } - typedArray.length = length; typedArray.bit_field.is_length_tracking = isLengthTracking; typedArray.bit_field.is_backed_by_rab = IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer); @@ -477,9 +478,16 @@ transitioning macro TypedArraySpeciesCreateByLength(implicit context: Context)( const typedArray: JSTypedArray = TypedArraySpeciesCreate( methodName, numArgs, exemplar, Convert(length), Undefined, Undefined); - if (typedArray.length < length) deferred { - ThrowTypeError(MessageTemplate::kTypedArrayTooShort); - } + try { + const createdArrayLength = + LoadJSTypedArrayLengthAndCheckDetached(typedArray) + otherwise DetachedOrOutOfBounds; + if (createdArrayLength < length) deferred { + ThrowTypeError(MessageTemplate::kTypedArrayTooShort); + } + } label DetachedOrOutOfBounds { + ThrowTypeError(MessageTemplate::kTypedArrayTooShort); + } return typedArray; } diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 5916c234ec0854..a56fc22efc4f90 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -59,6 +59,7 @@ extern runtime WasmStringViewWtf8Encode( Context, WasmInstanceObject, Smi, ByteArray, Number, Number, Number): JSAny; extern runtime WasmStringViewWtf8Slice( Context, ByteArray, Number, Number): String; +extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny; } namespace unsafe { @@ -67,10 +68,8 @@ extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject; } namespace wasm { -const kAnyTableType: constexpr int31 +const kAnyType: constexpr int31 generates 'wasm::kWasmAnyRef.raw_bit_field()'; -const kAnyNonNullTableType: constexpr int31 - generates 'wasm::kWasmAnyNonNullableRef.raw_bit_field()'; const kMaxPolymorphism: constexpr int31 generates 'wasm::kMaxPolymorphism'; @@ -289,10 +288,11 @@ builtin WasmRefFunc(index: uint32): Object { const instance: WasmInstanceObject = LoadInstanceFromFrame(); try { const table: FixedArray = LoadInternalFunctionsFromInstance(instance); - if (table == Undefined) goto CallRuntime; const functionIndex: intptr = Signed(ChangeUint32ToWord(index)); const result: Object = LoadFixedArrayElement(table, functionIndex); - if (result == Undefined) goto CallRuntime; + // {result} is either a funcref or nullptr. A Smi check is the fastest + // way to distinguish these two cases. + if (TaggedIsSmi(result)) goto CallRuntime; return result; } label CallRuntime deferred { tail runtime::WasmRefFunc( @@ -831,20 +831,112 @@ builtin WasmStringNewWtf16( LoadContextFromInstance(instance), instance, SmiFromUint32(memory), WasmUint32ToNumber(offset), WasmUint32ToNumber(size)); } + +struct TwoByteToOneByteIterator { + macro Next(): char8 labels NoMore { + if (this.offset == this.end_offset) goto NoMore; + const raw: char16 = *torque_internal::unsafe::NewReference( + this.array, this.offset); + const result: char8 = %RawDownCast(raw & 0xFF); + this.offset += 2; + return result; + } + + array: WasmArray; + offset: intptr; + end_offset: intptr; +} + builtin WasmStringNewWtf16Array( array: WasmArray, start: uint32, end: uint32): String { - const context = LoadContextFromFrame(); try { if (array.length < end) goto OffsetOutOfRange; if (end < start) goto OffsetOutOfRange; - tail runtime::WasmStringNewWtf16Array( - context, array, SmiFromUint32(start), SmiFromUint32(end)); + const length: uint32 = end - start; + if (length == 0) return kEmptyString; + // Calling into the runtime has overhead, but once we're there it's faster, + // so it pays off for long strings. The threshold has been determined + // experimentally. + if (length >= 32) goto Runtime; + const intptrLength = Convert(length); + const arrayContent = torque_internal::unsafe::NewConstSlice( + array, kWasmArrayHeaderSize, Convert(array.length)); + const substring = + Subslice(arrayContent, Convert(start), intptrLength) + otherwise goto OffsetOutOfRange; + + // Ideas for additional future improvements: + // (1) We could add a fast path for very short strings, e.g. <= 8 chars, + // and just allocate two-byte strings for them. That would save time + // here, and would only waste a couple of bytes at most. A concern is + // that such strings couldn't take one-byte fast paths later on, e.g. + // in toLower/toUpper case conversions. + // (2) We could load more than one array element at a time, e.g. using + // intptr-wide loads, or possibly even wider SIMD instructions. We'd + // have to make sure that non-aligned start offsets are handled, + // and the implementation would become more platform-specific. + // (3) We could shift the problem around by allocating two-byte strings + // here and checking whether they're one-byte-compatible later, e.g. + // when promoting them from new to old space. Drawback: rewriting + // strings to different maps isn't great for optimized code that's + // based on collected type feedback, or that wants to elide duplicate + // map checks within the function. + // (4) We could allocate space for a two-byte string, then optimistically + // start writing one-byte characters into it, and then either restart + // in two-byte mode if needed, or return the over-allocated bytes to + // the allocator in the end. + // (5) We could standardize a `string.new_ascii_array` instruction, which + // could safely produce one-byte strings without checking characters. + // See https://github.com/WebAssembly/stringref/issues/53. + + try { + // To reduce the amount of branching, check 8 code units at a time. The + // tradeoff for choosing 8 is that we want to check for early termination + // of the loop often (to avoid unnecessary work) but not too often + // (because each check has a cost). + let i: intptr = 0; + const eightElementLoopEnd = intptrLength - 8; + while (i <= eightElementLoopEnd) { + const bits = Convert(*substring.UncheckedAtIndex(i)) | + Convert(*substring.UncheckedAtIndex(i + 1)) | + Convert(*substring.UncheckedAtIndex(i + 2)) | + Convert(*substring.UncheckedAtIndex(i + 3)) | + Convert(*substring.UncheckedAtIndex(i + 4)) | + Convert(*substring.UncheckedAtIndex(i + 5)) | + Convert(*substring.UncheckedAtIndex(i + 6)) | + Convert(*substring.UncheckedAtIndex(i + 7)); + if (bits > 0xFF) goto TwoByte; + i += 8; + } + let bits: uint32 = 0; + while (i < intptrLength) { + bits |= Convert(*substring.UncheckedAtIndex(i)); + i += 1; + } + if (bits > 0xFF) goto TwoByte; + } label TwoByte { + return AllocateSeqTwoByteString(length, substring.Iterator()); + } + + return AllocateNonEmptySeqOneByteString(length, TwoByteToOneByteIterator{ + array: array, + offset: kWasmArrayHeaderSize + + torque_internal::TimesSizeOf(Convert(start)), + end_offset: kWasmArrayHeaderSize + + torque_internal::TimesSizeOf(Convert(end)) + }); } label OffsetOutOfRange deferred { + const context = LoadContextFromFrame(); const error = MessageTemplate::kWasmTrapArrayOutOfBounds; runtime::ThrowWasmError(context, SmiConstant(error)); unreachable; + } label Runtime deferred { + const context = LoadContextFromFrame(); + tail runtime::WasmStringNewWtf16Array( + context, array, SmiFromUint32(start), SmiFromUint32(end)); } } + builtin WasmStringConst(index: uint32): String { const instance = LoadInstanceFromFrame(); tail runtime::WasmStringConst( @@ -1151,28 +1243,11 @@ builtin WasmStringViewIterSlice( string::SubString(string, Convert(start), Convert(end)); } -transitioning builtin WasmExternInternalize(implicit context: Context)( - externObject: JSAny): JSAny { - const innerObject = - WasmGetOwnProperty(externObject, WasmWrappedObjectSymbolConstant()); - if (innerObject == Undefined) { - return externObject; - } - return innerObject; -} +builtin WasmExternInternalize(externObject: JSAny): JSAny { + const instance = LoadInstanceFromFrame(); + const context = LoadContextFromInstance(instance); -transitioning builtin WasmExternExternalize(implicit context: Context)( - anyObject: JSAny): JSAny { - typeswitch (anyObject) { - case (wasmArray: WasmArray): { - return WasmAllocateObjectWrapper(wasmArray); - } - case (wasmStruct: WasmStruct): { - return WasmAllocateObjectWrapper(wasmStruct); - } - case (JSAny): { - return anyObject; - } - } + tail runtime::WasmJSToWasmObject( + context, externObject, SmiConstant(kAnyType)); } } diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index 97a7b2b5638960..d476577f06dffa 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -8,5 +8,6 @@ leszeks@chromium.org mslekova@chromium.org nicohartmann@chromium.org tebbi@chromium.org +victorgomes@chromium.org per-file compiler.*=marja@chromium.org diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index dc06c743a02d30..e3ca3f9535c23a 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -374,7 +374,7 @@ void Assembler::AllocateAndInstallRequestedHeapNumbers(Isolate* isolate) { } void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, + SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a // raw workflow to create Code objects (mostly in tests), add another Align diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 68f773a92436c3..1377e68ec9dda7 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -190,9 +190,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; - static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; + static constexpr SafepointTableBuilderBase* kNoSafepointTable = nullptr; void GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, + SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset); // Convenience wrapper for code without safepoint or handler tables. diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h index 7a054863d17c90..4a276aaad47f17 100644 --- a/deps/v8/src/codegen/arm64/constants-arm64.h +++ b/deps/v8/src/codegen/arm64/constants-arm64.h @@ -300,25 +300,25 @@ SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) constexpr int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; // Condition codes. -enum Condition { - eq = 0, - ne = 1, - hs = 2, - cs = hs, - lo = 3, - cc = lo, - mi = 4, - pl = 5, - vs = 6, - vc = 7, - hi = 8, - ls = 9, - ge = 10, - lt = 11, - gt = 12, - le = 13, - al = 14, - nv = 15 // Behaves as always/al. +enum Condition : uint8_t { + eq = 0, // Equal + ne = 1, // Not equal + hs = 2, // Unsigned higher or same (or carry set) + cs = hs, // -- + lo = 3, // Unsigned lower (or carry clear) + cc = lo, // -- + mi = 4, // Negative + pl = 5, // Positive or zero + vs = 6, // Signed overflow + vc = 7, // No signed overflow + hi = 8, // Unsigned higher + ls = 9, // Unsigned lower or same + ge = 10, // Signed greater than or equal + lt = 11, // Signed less than + gt = 12, // Signed greater than + le = 13, // Signed less than or equal + al = 14, // Always executed + nv = 15 // Behaves as always/al. }; inline Condition NegateCondition(Condition cond) { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 0c7e7357534fc9..22ff843bb74f58 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -1106,16 +1106,18 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } -void TurboAssembler::SmiToInt32(Register smi) { - DCHECK(smi.Is64Bits()); +void TurboAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); } + +void TurboAssembler::SmiToInt32(Register dst, Register smi) { + DCHECK(dst.Is64Bits()); if (v8_flags.enable_slow_asserts) { AssertSmi(smi); } DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { - Asr(smi.W(), smi.W(), kSmiShift); + Asr(dst.W(), smi.W(), kSmiShift); } else { - Lsr(smi, smi, kSmiShift); + Lsr(dst, smi, kSmiShift); } } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 25970c1cc3f704..0c03d34ba4d91f 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1503,6 +1503,12 @@ void TurboAssembler::AssertFPCRState(Register fpcr) { Bind(&done); } +Condition TurboAssembler::CheckSmi(Register object) { + static_assert(kSmiTag == 0); + Tst(object, kSmiTagMask); + return eq; +} + void TurboAssembler::AssertSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); @@ -2447,6 +2453,28 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) { return is_int26(offset); } +// Check if the code object is marked for deoptimization. If it is, then it +// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need +// to: +// 1. read from memory the word that contains that bit, which can be found in +// the flags in the referenced {CodeDataContainer} object; +// 2. test kMarkedForDeoptimizationBit in those flags; and +// 3. if it is not zero then it jumps to the builtin. +void TurboAssembler::BailoutIfDeoptimized() { + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + LoadTaggedPointerField(scratch, + MemOperand(kJavaScriptCallCodeStartRegister, offset)); + Ldr(scratch.W(), + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + Label not_deoptimized; + Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, ¬_deoptimized); + Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), + RelocInfo::CODE_TARGET); + Bind(¬_deoptimized); +} + void TurboAssembler::CallForDeoptimization( Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label) { @@ -3324,15 +3352,14 @@ void TurboAssembler::LoadExternalPointerField(Register destination, DCHECK(!AreAliased(destination, isolate_root)); ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX - if (IsSandboxedExternalPointerType(tag)) { - DCHECK_NE(kExternalPointerNullTag, tag); - DCHECK(!IsSharedExternalPointerType(tag)); - UseScratchRegisterScope temps(this); - Register external_table = temps.AcquireX(); - if (isolate_root == no_reg) { - DCHECK(root_array_available_); - isolate_root = kRootRegister; - } + DCHECK_NE(tag, kExternalPointerNullTag); + DCHECK(!IsSharedExternalPointerType(tag)); + UseScratchRegisterScope temps(this); + Register external_table = temps.AcquireX(); + if (isolate_root == no_reg) { + DCHECK(root_array_available_); + isolate_root = kRootRegister; + } Ldr(external_table, MemOperand(isolate_root, IsolateData::external_pointer_table_offset() + @@ -3345,10 +3372,9 @@ void TurboAssembler::LoadExternalPointerField(Register destination, Mov(destination, Operand(destination, LSR, shift_amount)); Ldr(destination, MemOperand(external_table, destination)); And(destination, destination, Immediate(~tag)); - return; - } -#endif // V8_ENABLE_SANDBOX +#else Ldr(destination, field_operand); +#endif // V8_ENABLE_SANDBOX } void TurboAssembler::MaybeSaveRegisters(RegList registers) { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 99121e3f4b9de3..beac8778107613 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -561,6 +561,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { inline void SmiTag(Register smi); inline void SmiToInt32(Register smi); + inline void SmiToInt32(Register dst, Register smi); // Calls Abort(msg) if the condition cond is not satisfied. // Use --debug_code to enable. @@ -577,6 +578,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Like Assert(), but always enabled. void Check(Condition cond, AbortReason reason); + // Functions performing a check on a known or potential smi. Returns + // a condition that is satisfied if the check is successful. + Condition CheckSmi(Register src); + inline void Debug(const char* message, uint32_t code, Instr params = BREAK); void Trap(); @@ -1003,6 +1008,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // The return address on the stack is used by frame iteration. void StoreReturnAddressAndCall(Register target); + void BailoutIfDeoptimized(); void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label); @@ -1796,6 +1802,37 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { PopSizeRegList(regs, kSRegSizeInBits); } + inline void PushAll(RegList registers) { + if (registers.Count() % 2 != 0) { + DCHECK(!registers.has(xzr)); + registers.set(xzr); + } + PushXRegList(registers); + } + inline void PopAll(RegList registers) { + if (registers.Count() % 2 != 0) { + DCHECK(!registers.has(xzr)); + registers.set(xzr); + } + PopXRegList(registers); + } + inline void PushAll(DoubleRegList registers, + int stack_slot_size = kDoubleSize) { + if (registers.Count() % 2 != 0) { + DCHECK(!registers.has(fp_zero)); + registers.set(fp_zero); + } + PushDRegList(registers); + } + inline void PopAll(DoubleRegList registers, + int stack_slot_size = kDoubleSize) { + if (registers.Count() % 2 != 0) { + DCHECK(!registers.has(fp_zero)); + registers.set(fp_zero); + } + PopDRegList(registers); + } + // Push the specified register 'count' times. void PushMultipleTimes(CPURegister src, Register count); @@ -1963,6 +2000,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // other registers. void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type); + // A version of CompareObjectType which does not set the {type_reg} and has + // the same signatureas the x64 version of CmpObjectType. + void CmpObjectType(Register heap_object, InstanceType type, Register map) { + CompareObjectType(heap_object, map, xzr, type); + } // Compare object type for heap object, and branch if equal (or not.) // heap_object contains a non-Smi whose object type should be compared with diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 961eb74e4d838e..05165f1bfd4d0b 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -102,11 +102,11 @@ class DefaultAssemblerBuffer : public AssemblerBuffer { : buffer_(base::OwnedVector::NewForOverwrite( std::max(AssemblerBase::kMinimalBufferSize, size))) { #ifdef DEBUG - ZapCode(reinterpret_cast
(buffer_.start()), buffer_.size()); + ZapCode(reinterpret_cast
(buffer_.begin()), buffer_.size()); #endif } - byte* start() const override { return buffer_.start(); } + byte* start() const override { return buffer_.begin(); } int size() const override { return static_cast(buffer_.size()); } diff --git a/deps/v8/src/codegen/background-merge-task.h b/deps/v8/src/codegen/background-merge-task.h index c80624cd59b517..033e26b2e88dc6 100644 --- a/deps/v8/src/codegen/background-merge-task.h +++ b/deps/v8/src/codegen/background-merge-task.h @@ -33,6 +33,11 @@ class V8_EXPORT_PRIVATE BackgroundMergeTask { const ScriptDetails& script_details, LanguageMode language_mode); + // Alternative step 1: on the main thread, if the caller has already looked up + // the script in the Isolate compilation cache, set up the necessary + // persistent data for the background merge. + void SetUpOnMainThread(Isolate* isolate, Handle