Dump il2cpp 通常有两种方法,一种是用 Il2CppDumper 加载二进制文件直接dump. 另一种是用Zygisk-Il2CppDumper注入目标进程来dump il2cpp.接下来介绍一种我常用的方法。
Unicorn 是一个CPU模拟框架,Qiling是基于Unicorn的一个系统模拟框架。通过Qiling我们可以模拟一个进程的执行环境,因此我们可以通过将目标进程的内存空间整个dump下载,然后用Qiling加载这个进程的内存映像,同时我们将dumper编译成payload,同样加载到模拟的内存空间里,最后通过直接运行payload来dump。
mypower 一个内存扫描工具,这里主要主要用来dump进程的完整内存。
Qiling 一个系统模拟框架
首先用 mypower 将目标进程dump下来。启动mypoewr后运行以下命令可得到进程的内存镜像
输出的两个文件分别是data.memory和data.json, 前者是内存的内容,后者是内存区域的描述信息。
接着准备Qiling框架的配置信息: dump.ql
根据 data.json 的信息,我们将payload加载到0x8FFF000000000000附近是安全的。
以下是将mypower工具dump下来的进程内存加载到qiling框架的脚本:
这里涉及到的内容主要是Unicorn的API的使用。先将data.json解析出来根据信息读取data.memory,然后用Unicorn的mem_write写到虚拟机的内存空间中。这里的dump.elf是我们的dumper,源码来自https://github.com/Perfare/Il2CppDumper,这部分内容下一节介绍。
在运行任何代码前,我们还要准备运行环境,主要是准备一个合法的进程TLS区域,TCB线程控制信息,和线程栈:
因为本文针对的是ARM64的安卓平台,因此这里提供的TCB TLS数据结构不适用别的平台架构,这点要注意。
最后一项工作是准备堆和payload所需要的一些参数。
因为我们是用模拟器来模拟执行,是没办法调用原系统的功能的,因此堆内存分配这部分功能是是无效的,我们要提供额外的堆:
原理就是预留一段足够的内存,然后hook几个堆内存函数,将堆分配函数用python实现。这里给的例子是几个简单的只分配不释放的堆内存函数。
我们的dumper运行起来还需要dlsym来查找il2cpp导出的函数:
为了绕过安卓的dll命名空间机制,我们还需要堆dlsym打一个补丁,使其能顺利找到il2cpp相关的函数。原理是安卓会通过调用dlsym的返回地址来决定从哪个命名空间查找符号,要将由LR提供的返回地址改为由第三个参数提供返回地址,我们的payload在调用dlsym的时候第三个参数会输入libil2cpp.so的地址,如此方能顺利获得il2cpp系列函数。
这些都做了就能启动虚拟机执行我们的payload了。
我们的payload主要是一个修改了的Il2CppDumper,主要修改的地方是使其接受打好补丁的dlsym和一个依赖较少的格式化字符串输出工具类。
功能大概是首先打开一个文件dump.cs,然后调用Il2CppDumper的dumpAPI来输出得到的内容写到此文件。
完整代码在Github上,qiling-il2cpp-dump
这套方案除了能用来dump il2cpp还可以用来做一些调试,只要编译对应的payload就行了。这个方案有个缺点就是运行速度比较慢,比如dump某5v5游戏大约需要30分钟左右。
人老了玩不动了,所以把一些技术思路分享一下,另外任何人不得将本文介绍的技术用于任何违法乱纪的事情,违者后果自负。
更快的提取速度,且支持Vnity2018
attach
-
p pid
snapshot data
attach
-
p pid
snapshot data
[OS64]
load_address
=
0x8FFF000000000000
stack_address
=
0x8FFF800000000000
stack_size
=
0x8000000
mmap_address
=
0
[OS64]
load_address
=
0x8FFF000000000000
stack_address
=
0x8FFF800000000000
stack_size
=
0x8000000
mmap_address
=
0
def
load_memory(mu):
idx
=
0
for
region
in
memory_info[
"regions"
]:
file
=
region[
"file"
]
if
file
.startswith(
"/dev/kgsl"
)
or
region[
"prot"
]
=
=
0
or
region[
"desc"
].endswith(
"stack]"
):
continue
if
'aidl'
in
file
or
'hidl'
in
file
or
'vndk'
in
file
or
'android.hardware'
in
file
\
or
file
.endswith(
"dex"
)
or
file
.endswith(
"jar"
)
or
file
.endswith(
"apk"
)
or
file
.endswith(
"art"
) \
or
file
.endswith(
"oat"
)
or
'dalvik'
in
file
or
'dalvik'
in
region[
'desc'
]
or
file
.startswith(
'/vendor'
)
or
'hardware'
in
file
:
continue
size
=
region[
"end"
]
-
region[
"begin"
]
mu.mem_map(region[
"begin"
], size)
memory_data.seek(region[
"saved_offset"
],
0
)
mem
=
memory_data.read(region[
"saved_size"
])
mu.mem_write(region[
"begin"
], mem)
del
mem
print
(f
"Load {idx}/{len(memory_info['regions'])} {region['begin']:x}-{region['end']:x} {size} {region['file']} {region['desc']}"
)
idx
+
=
1
ql
=
Qiling([
"dump.elf"
],
rootfs
=
'./rootfs'
,
verbose
=
QL_VERBOSE.OFF,
profile
=
'./dump.ql'
,
ostype
=
"Linux"
,
archtype
=
"ARM64"
)
load_memory(ql.uc)
def
load_memory(mu):
idx
=
0
for
region
in
memory_info[
"regions"
]:
file
=
region[
"file"
]
if
file
.startswith(
"/dev/kgsl"
)
or
region[
"prot"
]
=
=
0
or
region[
"desc"
].endswith(
"stack]"
):
continue
if
'aidl'
in
file
or
'hidl'
in
file
or
'vndk'
in
file
or
'android.hardware'
in
file
\
or
file
.endswith(
"dex"
)
or
file
.endswith(
"jar"
)
or
file
.endswith(
"apk"
)
or
file
.endswith(
"art"
) \
or
file
.endswith(
"oat"
)
or
'dalvik'
in
file
or
'dalvik'
in
region[
'desc'
]
or
file
.startswith(
'/vendor'
)
or
'hardware'
in
file
:
continue
size
=
region[
"end"
]
-
region[
"begin"
]
mu.mem_map(region[
"begin"
], size)
memory_data.seek(region[
"saved_offset"
],
0
)
mem
=
memory_data.read(region[
"saved_size"
])
mu.mem_write(region[
"begin"
], mem)
del
mem
print
(f
"Load {idx}/{len(memory_info['regions'])} {region['begin']:x}-{region['end']:x} {size} {region['file']} {region['desc']}"
)
idx
+
=
1
ql
=
Qiling([
"dump.elf"
],
rootfs
=
'./rootfs'
,
verbose
=
QL_VERBOSE.OFF,
profile
=
'./dump.ql'
,
ostype
=
"Linux"
,
archtype
=
"ARM64"
)
load_memory(ql.uc)
END_ADDRESS
=
0x55aa55aa55aa55aa
STACK_ADDRESS
=
0x8FFF800000000000
STACK_SIZE
=
0x8000000
TLS_ADDRESS
=
STACK_ADDRESS
+
0x1000
TCB_ADDRESS
=
TLS_ADDRESS
+
0x1000
BIONIC_TLS_ADDRESS
=
TCB_ADDRESS
+
0x1000
tls
=
struct.pack(
'<QQQQQQQQQ'
,
BIONIC_TLS_ADDRESS,
0
,
TCB_ADDRESS,
0
,
0
,
0
,
0
,
0
,
0
,
)
ql.uc.mem_write(TLS_ADDRESS, tls)
class
pthread_attr_t(ctypes.Structure):
_fields_
=
[
(
"flags"
, ctypes.c_uint32),
(
"stack_base"
, ctypes.c_void_p),
(
"stack_size"
, ctypes.c_size_t),
]
class
pthread_internal_t(ctypes.Structure):
_fields_
=
[
(
"next"
, ctypes.c_void_p),
(
"prev"
, ctypes.c_void_p),
(
"tid"
, ctypes.c_int),
(
"cache_pid_and_vforked"
, ctypes.c_uint32),
(
"attr"
, pthread_attr_t),
]
thread_attr
=
pthread_attr_t(
0
, STACK_ADDRESS, STACK_SIZE)
thread
=
pthread_internal_t(
0
,
0
,
0
,
0
, thread_attr)
ql.uc.mem_write(TCB_ADDRESS, bytes(thread))
ql.uc.reg_write(UC_ARM64_REG_TPIDR_EL0, TLS_ADDRESS
+
8
)
END_ADDRESS
=
0x55aa55aa55aa55aa
STACK_ADDRESS
=
0x8FFF800000000000
STACK_SIZE
=
0x8000000
TLS_ADDRESS
=
STACK_ADDRESS
+
0x1000
TCB_ADDRESS
=
TLS_ADDRESS
+
0x1000
BIONIC_TLS_ADDRESS
=
TCB_ADDRESS
+
0x1000
tls
=
struct.pack(
'<QQQQQQQQQ'
,
BIONIC_TLS_ADDRESS,
0
,
TCB_ADDRESS,
0
,
0
,
0
,
0
,
0
,
0
,
)
ql.uc.mem_write(TLS_ADDRESS, tls)
class
pthread_attr_t(ctypes.Structure):
_fields_
=
[
(
"flags"
, ctypes.c_uint32),
(
"stack_base"
, ctypes.c_void_p),
(
"stack_size"
, ctypes.c_size_t),
]
class
pthread_internal_t(ctypes.Structure):
_fields_
=
[
(
"next"
, ctypes.c_void_p),
(
"prev"
, ctypes.c_void_p),
(
"tid"
, ctypes.c_int),
(
"cache_pid_and_vforked"
, ctypes.c_uint32),
(
"attr"
, pthread_attr_t),
]
thread_attr
=
pthread_attr_t(
0
, STACK_ADDRESS, STACK_SIZE)
thread
=
pthread_internal_t(
0
,
0
,
0
,
0
, thread_attr)
ql.uc.mem_write(TCB_ADDRESS, bytes(thread))
ql.uc.reg_write(UC_ARM64_REG_TPIDR_EL0, TLS_ADDRESS
+
8
)
HEAP_ADDRESS
=
STACK_ADDRESS
+
0x8000
def
malloc(
*
args):
global
HEAP_ADDRESS
sz
=
ql.uc.reg_read(UC_ARM64_REG_X0)
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
HEAP_ADDRESS
+
=
(sz
+
15
) & ~
15
def
free(
*
args):
pass
def
calloc(
*
args):
global
HEAP_ADDRESS
n
=
ql.uc.reg_read(UC_ARM64_REG_X0)
sz
=
ql.uc.reg_read(UC_ARM64_REG_X1)
*
n
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
HEAP_ADDRESS
+
=
(sz
+
15
) & ~
15
def
realloc(
*
args):
global
HEAP_ADDRESS
ptr
=
ql.uc.reg_read(UC_ARM64_REG_X0)
sz
=
ql.uc.reg_read(UC_ARM64_REG_X1)
if
ptr !
=
0
:
if
sz
=
=
0
:
return
data
=
ql.uc.mem_read(ptr, sz)
ql.uc.mem_write(HEAP_ADDRESS, bytes(data))
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
HEAP_ADDRESS
+
=
(sz
+
15
) & ~
15
MALLOC_ADDR, FREE_ADDR, CALLOC_ADDR, REALLOC_ADDR
=
libc.get_funcs(memory_info, memory_data,
'libc.so'
, [
'malloc'
,
'free'
,
'calloc'
,
'realloc'
])
IL2CPP_BASE_DATA, IL2CPP_BASE_ADDR, IL2CPP_BASE_END
=
libc.read_so(memory_info, memory_data,
"libil2cpp.so"
)
print
(f
'malloc 0x{MALLOC_ADDR:x}'
)
print
(f
'free 0x{FREE_ADDR:x}'
)
print
(f
'calloc 0x{CALLOC_ADDR:x}'
)
print
(f
'dlsym 0x{DLSYM_ADDR:x}'
)
ql.uc.mem_write(MALLOC_ADDR, b
'\xC0\x03\x5F\xD6'
)
ql.uc.mem_write(FREE_ADDR, b
'\xC0\x03\x5F\xD6'
)
ql.uc.mem_write(CALLOC_ADDR, b
'\xC0\x03\x5F\xD6'
)
ql.uc.mem_write(REALLOC_ADDR, b
'\xC0\x03\x5F\xD6'
)
ql.uc.hook_add(UC_HOOK_CODE, malloc,
None
, MALLOC_ADDR, MALLOC_ADDR
+
4
)
ql.uc.hook_add(UC_HOOK_CODE, free,
None
, FREE_ADDR, FREE_ADDR
+
4
)
ql.uc.hook_add(UC_HOOK_CODE, calloc,
None
, CALLOC_ADDR, CALLOC_ADDR
+
4
)
ql.uc.hook_add(UC_HOOK_CODE, realloc,
None
, REALLOC_ADDR, REALLOC_ADDR
+
4
)
HEAP_ADDRESS
=
STACK_ADDRESS
+
0x8000
def
malloc(
*
args):
global
HEAP_ADDRESS
sz
=
ql.uc.reg_read(UC_ARM64_REG_X0)
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
HEAP_ADDRESS
+
=
(sz
+
15
) & ~
15
def
free(
*
args):
pass
def
calloc(
*
args):
global
HEAP_ADDRESS
n
=
ql.uc.reg_read(UC_ARM64_REG_X0)
sz
=
ql.uc.reg_read(UC_ARM64_REG_X1)
*
n
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
HEAP_ADDRESS
+
=
(sz
+
15
) & ~
15
def
realloc(
*
args):
global
HEAP_ADDRESS
ptr
=
ql.uc.reg_read(UC_ARM64_REG_X0)
sz
=
ql.uc.reg_read(UC_ARM64_REG_X1)
if
ptr !
=
0
:
if
sz
=
=
0
:
return
data
=
ql.uc.mem_read(ptr, sz)
ql.uc.mem_write(HEAP_ADDRESS, bytes(data))
ql.uc.reg_write(UC_ARM64_REG_X0, HEAP_ADDRESS)
[注意]传递专业知识、拓宽行业人脉——看雪讲师团队等你加入!
最后于 2023-10-8 21:32
被vrolife编辑
,原因: