Skip to content

Commit

Permalink
[vm] Enable multiple entry-points on ARM64.
Browse files Browse the repository at this point in the history
Adresses #34162

Change-Id: I7126f8c9b470041aaa260255293327f67d64d1bc
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/128063
Commit-Queue: Samir Jindel <[email protected]>
Reviewed-by: Martin Kustermann <[email protected]>
  • Loading branch information
sjindel-google authored and [email protected] committed Dec 19, 2019
1 parent 302184f commit f134164
Show file tree
Hide file tree
Showing 7 changed files with 99 additions and 49 deletions.
2 changes: 1 addition & 1 deletion runtime/tests/vm/vm.status
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ cc/Service_Profile: SkipByDesign
[ $compiler == dartkb ]
dart/generic_field_invocation_test: SkipByDesign # KBC interpreters do not support --no_lazy_dispatchers

[ $builder_tag == bytecode_interpreter || $hot_reload || $hot_reload_rollback || $arch != arm && $arch != simarm && $arch != x64 || $compiler != dartk && $compiler != dartkp && $compiler != dartkb ]
[ $builder_tag == bytecode_interpreter || $hot_reload || $hot_reload_rollback || $arch != arm && $arch != simarm && $arch != x64 && $arch != simarm64 && $arch != arm64 || $compiler != dartk && $compiler != dartkp && $compiler != dartkb ]
dart/entrypoints/*: SkipByDesign # These tests are for compiler optimizations and very sensitive to when functions are optimized, so they are disabled on hotreload and optcounter bots.

[ $builder_tag == crossword || $builder_tag == crossword_ast || $compiler != dartkp || $system != linux && $system != macos && $system != windows ]
Expand Down
25 changes: 13 additions & 12 deletions runtime/vm/compiler/assembler/assembler_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -619,16 +619,14 @@ void Assembler::Branch(const Code& target,
br(TMP);
}

void Assembler::BranchPatchable(const Code& code) {
Branch(code, PP, ObjectPoolBuilderEntry::kPatchable);
}

void Assembler::BranchLink(const Code& target,
ObjectPoolBuilderEntry::Patchability patchable) {
ObjectPoolBuilderEntry::Patchability patchable,
CodeEntryKind entry_kind) {
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
ldr(TMP,
FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
blr(TMP);
}

Expand All @@ -638,11 +636,13 @@ void Assembler::BranchLinkToRuntime() {
}

void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence) {
const Object& equivalence,
CodeEntryKind entry_kind) {
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), equivalence));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
ldr(TMP,
FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
blr(TMP);
}

Expand Down Expand Up @@ -1514,18 +1514,19 @@ void Assembler::LeaveStubFrame() {
// R0 receiver, R5 ICData entries array
// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
void Assembler::MonomorphicCheckedEntryJIT() {
ASSERT(has_single_entry_point_);
has_single_entry_point_ = false;
const bool saved_use_far_branches = use_far_branches();
set_use_far_branches(false);
const intptr_t start = CodeSize();

Label immediate, miss;
Bind(&miss);
ldr(IP0, Address(THR, target::Thread::monomorphic_miss_entry_offset()));
br(IP0);

Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() == target::Instructions::kMonomorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetJIT);

const intptr_t cid_offset = target::Array::element_offset(0);
const intptr_t count_offset = target::Array::element_offset(1);
Expand All @@ -1541,15 +1542,15 @@ void Assembler::MonomorphicCheckedEntryJIT() {
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.

// Fall through to unchecked entry.
ASSERT(CodeSize() == target::Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetJIT);

set_use_far_branches(saved_use_far_branches);
}

// R0 receiver, R5 guarded cid as Smi.
// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
void Assembler::MonomorphicCheckedEntryAOT() {
ASSERT(has_single_entry_point_);
has_single_entry_point_ = false;
bool saved_use_far_branches = use_far_branches();
set_use_far_branches(false);
Expand Down
20 changes: 12 additions & 8 deletions runtime/vm/compiler/assembler/assembler_arm64.h
Original file line number Diff line number Diff line change
Expand Up @@ -1371,14 +1371,15 @@ class Assembler : public AssemblerBase {
Register pp,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
void BranchPatchable(const Code& code);

void BranchLink(const Code& code,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
ObjectPoolBuilderEntry::kNotPatchable,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);

void BranchLinkPatchable(const Code& code) {
BranchLink(code, ObjectPoolBuilderEntry::kPatchable);
void BranchLinkPatchable(const Code& code,
CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
BranchLink(code, ObjectPoolBuilderEntry::kPatchable, entry_kind);
}
void BranchLinkToRuntime();

Expand All @@ -1388,7 +1389,10 @@ class Assembler : public AssemblerBase {

// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
void BranchLinkWithEquivalence(const Code& code, const Object& equivalence);
void BranchLinkWithEquivalence(
const Code& code,
const Object& equivalence,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);

void AddImmediate(Register dest, int64_t imm) {
AddImmediate(dest, dest, imm);
Expand Down Expand Up @@ -1675,9 +1679,9 @@ class Assembler : public AssemblerBase {
// Returns object data offset for address calculation; for heap objects also
// accounts for the tag.
static int32_t HeapDataOffset(bool is_external, intptr_t cid) {
return is_external ?
0 :
(target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
return is_external
? 0
: (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
}

static int32_t EncodeImm26BranchOffset(int64_t imm, int32_t instr) {
Expand Down
27 changes: 13 additions & 14 deletions runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -967,8 +967,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
// TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
__ BranchLinkPatchable(stub);
__ BranchLinkPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}

Expand All @@ -978,7 +977,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
// TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
AddPcRelativeCallTarget(target, entry_kind);
__ GenerateUnRelocatedPcRelativeCall();
Expand All @@ -990,7 +988,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
// instead.
ASSERT(is_optimizing());
const auto& stub = StubCode::CallStaticFunction();
__ BranchLinkWithEquivalence(stub, target);
__ BranchLinkWithEquivalence(stub, target, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target, entry_kind);
}
Expand Down Expand Up @@ -1026,7 +1024,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
// TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
// Each ICData propagated from unoptimized to optimized code contains the
// function that corresponds to the Dart function of that IC call. Due
Expand All @@ -1038,7 +1035,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
__ LoadObject(R6, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R5, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.CountWithTypeArgs());
}

Expand Down Expand Up @@ -1122,7 +1120,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
// TODO(34162): Support multiple entry-points on ARM64.
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::UnlinkedCall();
const UnlinkedCall& data =
Expand All @@ -1147,9 +1144,13 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
} else {
__ LoadDoubleWordFromPoolOffset(R5, CODE_REG,
ObjectPool::element_offset(data_index));
__ ldr(LR, compiler::FieldAddress(
CODE_REG,
Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphic)
: compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
}
__ blr(LR);

Expand All @@ -1164,12 +1165,11 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t count_with_type_args,
LocationSummary* locs,
const ICData& ic_data,
Code::EntryKind entry_kind) {
// TODO(34162): Support multiple entry-points on ARM64.
const Code& stub =
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R5, ic_data);
GenerateDartCall(deopt_id, token_pos, stub,
RawPcDescriptors::kUnoptStaticCall, locs);
RawPcDescriptors::kUnoptStaticCall, locs, entry_kind);
__ Drop(count_with_type_args);
}

Expand All @@ -1181,7 +1181,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
// TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
__ LoadObject(R4, arguments_descriptor);
Expand All @@ -1193,7 +1192,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, token_pos, RawPcDescriptors::kOther, locs,
function);
function, entry_kind);
__ Drop(count_with_type_args);
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/vm/compiler/backend/il_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,8 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// R0: Function.
ASSERT(locs()->in(0).reg() == R0);
__ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
__ LoadFieldFromOffset(R2, R0, Function::entry_point_offset());
__ LoadFieldFromOffset(
R2, CODE_REG, compiler::target::Code::entry_point_offset(entry_kind()));

// R2: instructions.
// R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
Expand Down
Loading

0 comments on commit f134164

Please sign in to comment.