首页
社区
课程
招聘
[原创] monitor memory read operation code snippet
发表于: 2022-6-28 16:27 21965

[原创] monitor memory read operation code snippet

2022-6-28 16:27
21965

monitor memory read operation with VM_PROT_EXECUTE_ONLY permission

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
#include "executeonly_monitor.h"
 
#include <mach/vm_param.h>
#include <mach/mach.h>
 
#include <dlfcn.h>
#include <unistd.h>
#include <sys/mman.h>
#include <pthread.h>
 
#include <vector>
#include <unordered_map>
 
#include "logging/logging.h"
 
#define ALIGN_FLOOR(address, range) ((addr_t)address & ~((addr_t)range - 1))
 
typedef int32_t arm64_insn_t;
 
#define LOG_TAG "executeonly monitor"
 
#define submask(x) ((1L << ((x) + 1)) - 1)
#define bits(obj, st, fn) (((obj) >> (st)) & submask((fn) - (st)))
#define bit(obj, st) (((obj) >> (st)) & 1)
#define sbits(obj, st, fn) ((long)(bits(obj, st, fn) | ((long)bit(obj, fn) * ~submask(fn - st))))
 
std::unordered_map<addr_t, addr_t> *backup_pages = nullptr;
 
void handle_fault_with_execute_only_supported(arm_thread_state64_t *ts, addr_t ts_pc, addr_t fault_addr,
                                              addr_t fault_page_addr) {
  arm64_insn_t insn;
  insn = *(arm64_insn_t *)ts_pc;
 
#if 0
  // read fault insn
  addr_t ts_pc_page = ALIGN_FLOOR(ts_pc, PAGE_SIZE);
  auto   iter       = g_crc_page_map->find(ts_pc_page);
  if (iter == g_crc_page_map->end()) {
    insn = *(arm64_insn_t *)ts_pc;
  } else {
    insn = *(arm64_insn_t *)(iter->second.bakcup_page_addr + (ts_pc - ts_pc_page));
  }
#endif
 
  /* C4.1 A64 instruction set encoding */
  /* C4.1.4 Loads and Stores */
  int rn_ndx = -1;
  int rt_ndx = -1;
  if (insn & 0x0a000000) {
    uint32_t op0 = (insn & 0xf0000000) >> 28;
    uint32_t op1 = (insn & 0x08000000) >> 27;
    uint32_t op2 = (insn & 0x01800000) >> 23;
    uint32_t op4 = (insn & 0x00000c00) >> 10;
    if (((op0 & 0b0011) == 0b01) && ((op2 & 0b10) == 0b00)) {
      rn_ndx = -1;
    } else {
      rn_ndx = ((insn & 0x1e0) >> 5);
    }
 
    if (((op0 & 0b0011) == 0b11) && ((op2 & 0b10) == 0b00) && (op4 == 0b10)) {
      uint32_t size = (insn & 0xc0000000) >> 30;
      uint32_t V = (insn & 0x04000000) >> 26;
      uint32_t opc = (insn & 0x00c00000) >> 22;
      // ldrsw(register)
      if (size == 0b10 && V == 0 && opc == 0b10) {
        rt_ndx = insn & 0x1f;
        if (backup_pages->count(fault_page_addr) == 0) {
          return;
        }
        auto backup_page = (*backup_pages)[fault_page_addr];
        auto rn = ts->__x[rn_ndx];
        auto fault_backup_addr = backup_page + (fault_addr - fault_page_addr);
        ts->__x[rt_ndx] = (int64_t) * (int32_t *)fault_backup_addr;
        LOG(1, "set rt register: %p", fault_backup_addr);
      }
    }
  }
 
  rt_ndx = bits(insn, 0, 4);
  int size_flag = bits(insn, 30, 31);
  int opc = bits(insn, 22, 23);
  int post_pre_flag = bits(insn, 10, 11);
  LOG(1, "fault: post_pre: %d, size:%d, opc: %d, rn: %d, rt: %d", post_pre_flag, size_flag, opc, rn_ndx, rt_ndx);
 
  if (rn_ndx >= 0) {
    if (backup_pages->count(fault_page_addr) == 0) {
      return;
    }
    auto backup_page = (*backup_pages)[fault_page_addr];
    auto rn = ts->__x[rn_ndx];
    auto new_rn = backup_page + (rn - fault_page_addr);
    if ((size_flag & 0b00) == 0b00) {
      *(uint8_t *)&ts->__x[rt_ndx] = *(uint8_t *)new_rn;
      ts->__x[rn_ndx] += 1;
    } else if ((size_flag & 0b01) == 0b01) {
      *(uint16_t *)&ts->__x[rt_ndx] = *(uint16_t *)new_rn;
      ts->__x[rn_ndx] += 2;
    } else if ((size_flag & 0b10) == 0b10) {
      *(uint32_t *)&ts->__x[rt_ndx] = *(uint32_t *)new_rn;
      ts->__x[rn_ndx] += 4;
    } else {
      *(uint64_t *)&ts->__x[rt_ndx] = *(uint64_t *)new_rn;
      ts->__x[rn_ndx] += 8;
    }
  }
 
  if (rt_ndx >= 0) {
    arm_thread_state64_set_pc_fptr(*ts, ts_pc + 4);
  }
}
 
void set_page_execute_only(void *addr) {
  size_t page_size = sysconf(_SC_PAGESIZE);
  addr_t page = ALIGN_FLOOR(addr, page_size);
#if 0
  kern_return_t kr;
  kr = vm_protect(mach_task_self(), (mach_vm_address_t) page, (mach_vm_size_t) page_size, false, VM_PROT_EXECUTE_ONLY);
  if (kr != KERN_SUCCESS) {
    ERROR_LOG("failed: %s", mach_error_string(kr));
  }
#else
  int ret = mprotect((void *)page, page_size, VM_PROT_EXECUTE_ONLY);
  if (ret) {
    LOG(1, "mprotect failed: %s");
  }
#endif
}
 
static mach_port_t exception_port = MACH_PORT_NULL;
 
static bool check_if_fall_loop(arm_thread_state64_t *ts, arm_exception_state64_t *es, addr_t fault_addr) {
  // stack backtrace
  addr_t ts_fp = __darwin_arm_thread_state64_get_fp(*ts);
  uint64_t fp_frame[2] = {0};
 
  // fault at same address multi-times
  bool is_multi_same_fault = false;
 
  static int fault_stack_count = 0;
  static addr_t fault_stack[16] = {0};
  fault_stack[fault_stack_count++ % 16] = fault_addr;
 
  int count = 0;
  for (int i = 0; i < 16; i++) {
    if (fault_stack[i] == fault_addr)
      count += 1;
  }
  if (count >= 13) {
    is_multi_same_fault = true;
  }
  return is_multi_same_fault;
}
 
static bool check_if_invalid_access(addr_t fault_addr) {
  bool is_invalid_access = false;
  if (fault_addr < 0x100000000 || fault_addr > 0x800000000) {
    is_invalid_access = true;
  }
  return is_invalid_access;
}
 
static void *exception_handler(void *ctx) {
  Request In0P;
  mach_msg_header_t *InHeadP = &In0P.Head;
  for (;;) {
    kern_return_t kr;
    kr = mach_msg(&In0P.Head, MACH_RCV_MSG | MACH_MSG_TIMEOUT_NONE, 0, sizeof(Request), exception_port,
                  MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }
 
    mach_port_t thread_port = In0P.thread.name;
    mach_port_t task_port = In0P.task.name;
 
    arm_thread_state64_t ts = {0};
    mach_msg_type_number_t ts_cnt = ARM_THREAD_STATE64_COUNT;
    kr = thread_get_state(thread_port, ARM_THREAD_STATE64, (thread_state_t)&ts, &ts_cnt);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }
 
    arm_exception_state64_t es = {0};
    mach_msg_type_number_t es_cnt = ARM_EXCEPTION_STATE64_COUNT;
    kr = thread_get_state(thread_port, ARM_EXCEPTION_STATE64, (thread_state_t)&es, &es_cnt);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }
 
    addr_t ts_pc = __darwin_arm_thread_state64_get_pc(ts);
    addr_t fault_addr = es.__far;
    addr_t fault_page_addr = ALIGN_FLOOR(fault_addr, PAGE_SIZE);
    LOG(1, "fault: at %p, pc %p", fault_addr, ts_pc);
 
    if (!check_if_invalid_access(fault_addr)) {
      handle_fault_with_execute_only_supported(&ts, ts_pc, fault_addr, fault_page_addr);
    }
 
    kr = thread_set_state(thread_port, ARM_THREAD_STATE64, (thread_state_t)&ts, ARM_THREAD_STATE64_COUNT);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }
 
    // reply
    typedef __Reply__mach_exception_raise_t Reply __attribute__((unused));
    Reply OutP;
 
    if (check_if_invalid_access(fault_addr)) {
      OutP.RetCode = KERN_FAILURE;
    }
 
    // reply
    kr = mach_msg(&OutP.Head, MACH_SEND_MSG | MACH_MSG_TIMEOUT_NONE, sizeof(Reply), 0, MACH_PORT_NULL,
                  MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    if (kr != KERN_SUCCESS) {
      LOG(1, " failed: %s", mach_error_string(kr));
      return NULL;
    }
  }
  return NULL;
}
 
void install_memory_read_exception_callback() {
  static bool initialized = false;
  if (initialized)
    return;
  initialized = true;
 
  kern_return_t kr = KERN_SUCCESS;
  kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &exception_port);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }
 
  kr = mach_port_insert_right(mach_task_self(), exception_port, exception_port, MACH_MSG_TYPE_MAKE_SEND);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }
 
  // set exception handler
  kr = task_set_exception_ports(mach_task_self(), EXC_MASK_ALL, exception_port,
                                EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, ARM_THREAD_STATE64);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }
 
  // setup a new thread where to handle the exceptions
  pthread_t exception_handler_thread;
  pthread_create(&exception_handler_thread, NULL, exception_handler, NULL);
 
  LOG(1, "install memory read exception(port is %p) callback done", exception_port);
}
 
void executeonly_monitor_init() {
  install_memory_read_exception_callback();
}
 
addr_t allocate_page() {
  auto page = (addr_t)mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, VM_MAKE_TAG(255), 0);
  if ((void *)page == MAP_FAILED) {
    LOG(1, "mmap failed");
    return 0;
  }
  return page;
}
 
void allocate_backup_page(void *addr, char *buffer, int buffer_size) {
  addr_t page_addr = ALIGN_FLOOR(addr, PAGE_SIZE);
  auto backup_page_addr = allocate_page();
  memcpy((void *)backup_page_addr, (void *)page_addr, PAGE_SIZE);
 
  uint32_t page_offset = (addr_t)addr - page_addr;
  memcpy((void *)(backup_page_addr + page_offset), buffer, buffer_size);
 
  backup_pages->insert(std::make_pair(page_addr, backup_page_addr));
  LOG(1, "allocate backup page: %p --> %p, %p, %p", page_addr, backup_page_addr, *(uint64_t *)page_addr,
      *(uint64_t *)backup_page_addr);
}
 
int executeonly_monitor(void *addr, char *buffer, int buffer_size) {
  if (backup_pages == nullptr) {
    backup_pages = new std::unordered_map<addr_t, addr_t>();
  }
 
  addr_t page_addr = ALIGN_FLOOR(addr, PAGE_SIZE);
  LOG(1, "start monitor %p page", page_addr);
 
  allocate_backup_page(addr, buffer, buffer_size);
  set_page_execute_only(addr);
  return 0;
}

[培训]内核驱动高级班,冲击BAT一流互联网大厂工作,每周日13:00-18:00直播授课

收藏
免费 8
支持
分享
最新回复 (1)
雪    币: 71
能力值: ( LV1,RANK:0 )
在线值:
发帖
回帖
粉丝
2
2023-9-18 10:58
0
游客
登录 | 注册 方可回帖
返回
//