//获取该 OAT 方法 Code 的入口地址,表示该方法已编译成机器码
// Install entry point from interpreter.
const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
//获取该 Dex 方法 Code 的入口地址,表示该方法尚未编译,需要解释执行
bool enter_interpreter = class_linker->ShouldUseInterpreterEntrypoint(method, quick_code);
if (!method->IsInvokable()) {
EnsureThrowsInvocationError(class_linker, method);
return;
}
//如果是静态方法,并且不是构造函数,则把代码入口设置成一个桩函数的地址
//这个函数是通用的,应为所有 static 方法都要在类初始化时候去 resolve。
//那么先把这个方法设置成一个通用的跳板,当有其他方法调用到的时候,跳板方法将出发该类的初始化
//在该类初始化的时候,这些跳板方法才会被替换成真正的地址 ClassLinker::InitializeClass -> ClassLinker::FixupStaticTrampolines
if (method->IsStatic() && !method->IsConstructor()) {
// For static methods excluding the class initializer, install the trampoline.
// It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
// after initializing class (see ClassLinker::InitializeClass method).
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
}
//如果是 JNI 方法,设置成通用的 JNI 函数跳板
else if (quick_code == nullptr && method->IsNative()) {
method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
}
//如果方法需要解释执行,则设置成解释执行的跳板
else if (enter_interpreter) {
// Set entry point from compiled code if there's no code or in interpreter only mode.
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
}
// Determines the location of the code pointer.
enum class CodePtrLocation {
// 顾名思义,递归调用自己,此时不需要重新加载 ArtMethod
// 直接跳转到方法开头
kCallSelf,
// 直接 B 到偏移地址,多见于调用附近的方法
kCallPCRelative,
// 可以直接知道编译完成的入口代码
// 则可以跳过 ArtMethod->CodeEntry 查询,直接 blx entry
// 多见于调用系统方法,这些方法中都是绝对地址,不需要重定向
kCallDirect,
// link OAT 文件的时候,才能确定方法在内存中的位置
// 方法入口需要 linker 重定向,也不需要查询 ArtMethod
kCallDirectWithFixup,
// 此种需要在 Runtime 期间得知方法入口
// 需要查询 ArtMethod->CodeEntry
// 那么由此可见只有在此种情况下,入口替换的 Hook 才有可能生效
kCallArtMethod,
};
void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
//处理 ArtMethod 加载
...........
//生成跳转代码
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
__ Bl(&frame_entry_label_);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
relative_call_patches_.emplace_back(invoke->GetTargetMethod());
vixl::Label* label = &relative_call_patches_.back().label;
vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(label);
__ bl(0); // Branch and link to itself. This will be overriden at link time.
break;
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
// LR prepared above for better instruction scheduling.
DCHECK(direct_code_loaded);
// lr()
__ Blr(lr);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// LR = callee_method->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
// lr()
__ Blr(lr);
break;
}
}
_functionxxx:
...
...
bl _functionxxx
blr lr
ldr lr [RegMethod(X0), #CodeEntryOffset]
blr lr
ldr lr [RegMethod(X0), #CodeEntryOffset]
// Determines the location of the code pointer.
enum class CodePtrLocation {
kCallSelf,
kCallArtMethod,
};
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
{
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(),
kInstructionSize,
CodeBufferCheckScope::kExactSize);
__ bl(&frame_entry_label_);
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// LR = callee_method->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
{
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(),
kInstructionSize,
CodeBufferCheckScope::kExactSize);
// lr()
__ blr(lr);
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
break;
}
{
// Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
// /* HeapReference<Class> */ temp = receiver->klass_
__ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
MaybeRecordImplicitNullCheck(invoke);
}
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// intermediate/temporary reference and because the current
// concurrent copying collector keeps the from-space memory
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
// temp = temp->GetMethodAt(method_offset);
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
{
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
// lr();
__ blr(lr);
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
// Set by the class linker for a method that has only one implementation for a
// virtual call.
static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
ArtMethod* single_impl = interface_method->GetSingleImplementation(pointer_size);
if (single_impl == nullptr) {
// implementation_method becomes the first implementation for
// interface_method.
interface_method->SetSingleImplementation(implementation_method, pointer_size);
// Keep interface_method's single-implementation status.
return;
}
ArtMethod {
// Depending on the method type, the data is
// - native method: pointer to the JNI function registered to this method
// or a function to resolve the JNI function,
// - conflict method: ImtConflictTable,
// - abstract/interface method: the single-implementation if any,
// - proxy method: the original interface method or constructor,
// - other methods: the profiling data.
void* data_;
}
// Try CHA-based devirtualization to change virtual method calls into
// direct calls.
// Returns the actual method that resolved_method can be devirtualized to.
ArtMethod* TryCHADevirtualization(ArtMethod* resolved_method)
REQUIRES_SHARED(Locks::mutator_lock_);
private void test() {
IDog dog = new DogImpl();
dog.dosth();
}
void Dbg::SuspendVM() {
// Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335.
gc::ScopedGCCriticalSection gcs(Thread::Current(),
gc::kGcCauseDebugger,
gc::kCollectorTypeDebugger);
Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
}
void Dbg::ResumeVM() {
Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
}
if (!m->IsStatic()) {
// Replace calls to String.<init> with equivalent StringFactory call.
if (declaring_class->IsStringClass() && m->IsConstructor()) {
m = WellKnownClasses::StringInitToStringFactory(m);
CHECK(javaReceiver == nullptr);
} else {
// Check that the receiver is non-null and an instance of the field's declaring class.
receiver = soa.Decode<mirror::Object>(javaReceiver);
if (!VerifyObjectIsClass(receiver, declaring_class)) {
return nullptr;
}
// Find the actual implementation of the virtual method.
m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, kRuntimePointerSize);
}
}
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method,
PointerSize pointer_size) {
if (method->IsDirect()) {
return method;
}
if (method->GetDeclaringClass()->IsInterface() && !method->IsCopied()) {
return FindVirtualMethodForInterface(method, pointer_size);
}
return FindVirtualMethodForVirtual(method, pointer_size);
}
bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code) {
if (UNLIKELY(method->IsNative() || method->IsProxyMethod())) {
return false;
}
if (quick_code == nullptr) {
return true;
}
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
if (instr->InterpretOnly()) {
return true;
}
if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
// Doing this check avoids doing compiled/interpreter transitions.
return true;
}
if (Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
// Force the use of interpreter when it is required by the debugger.
return true;
}
if (Thread::Current()->IsAsyncExceptionPending()) {
// Force use of interpreter to handle async-exceptions
return true;
}
if (runtime->IsJavaDebuggable()) {
// For simplicity, we ignore precompiled code and go to the interpreter
// assuming we don't already have jitted code.
// We could look at the oat file where `quick_code` is being defined,
// and check whether it's been compiled debuggable, but we decided to
// only rely on the JIT for debuggable apps.
jit::Jit* jit = Runtime::Current()->GetJit();
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it (at first use) with extra stackmaps for native
// debugging. We keep however all AOT code from the boot image,
// since the JIT-at-first-use is blocking and would result in non-negligible
// startup performance impact.
return !runtime->GetHeap()->IsInBootImageOatFile(quick_code);
}
return false;
}
inline void PerformCall(Thread* self,
const CodeItemDataAccessor& accessor,
ArtMethod* caller_method,
const size_t first_dest_reg,
ShadowFrame* callee_frame,
JValue* result,
bool use_interpreter_entrypoint)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(Runtime::Current()->IsStarted())) {
if (use_interpreter_entrypoint) {
interpreter::ArtInterpreterToInterpreterBridge(self, accessor, callee_frame, result);
} else {
interpreter::ArtInterpreterToCompiledCodeBridge(
self, caller_method, callee_frame, first_dest_reg, result);
}
} else {
interpreter::UnstartedRuntime::Invoke(self, accessor, callee_frame, result, first_dest_reg);
}
}
uint32_t vmap_table_offset_ = 0u;
uint32_t method_info_offset_ = 0u;
QuickMethodFrameInfo frame_info_;
// The code size in bytes. The highest bit is used to signify if the compiled
// code with the method header has should_deoptimize flag.
uint32_t code_size_ = 0u;
// The actual code.
uint8_t code_[0];
public void setCloseOnTouchOutsideIfNotSet(boolean close) {
if (!mSetCloseOnTouchOutside) {
mCloseOnTouchOutside = close;
mSetCloseOnTouchOutside = true;
}
}
//stub of arg size 3, index 13
public static long stub_hook_13(long a0, long a1, long a2) throws Throwable {
return hookBridge(getMethodId(3, 13), null , a0, a1, a2);
}
public static Object addressToObject64(Class objectType, long address) {
if (objectType == null)
return null;
if (objectType.isPrimitive()) {
if (objectType == int.class) {
return (int)address;
} else if (objectType == long.class) {
return address;
} else if (objectType == short.class) {
return (short)address;
} else if (objectType == byte.class) {
return (byte)address;
} else if (objectType == char.class) {
return (char)address;
} else if (objectType == boolean.class) {
return address != 0;
} else {
throw new RuntimeException("unknown type: " + objectType.toString());
}
} else {
return SandHook.getObject(address);
}
}
const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
if ((compiler_options.GetInlineDepthLimit() == 0)
|| (compiler_options.GetInlineMaxCodeUnits() == 0)) {
return;
}
bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
&& (compiler_options.GetInlineMaxCodeUnits() > 0);
if (!should_inline) {
return;
}
size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
VLOG(compiler) << "Method " << PrettyMethod(method)
<< " is too big to inline: "
<< code_item->insns_size_in_code_units_
<< " > "
<< inline_max_code_units;
return false;
}