Advertisement
cruisek

WARNING in kvm_mmu_uninit_tdp_mmu c

Jan 9th, 2022
56
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 41.31 KB | None | 0 0
  1. // autogenerated by syzkaller (https://github.com/google/syzkaller)
  2.  
  3. #define _GNU_SOURCE
  4.  
  5. #include <dirent.h>
  6. #include <endian.h>
  7. #include <errno.h>
  8. #include <fcntl.h>
  9. #include <pthread.h>
  10. #include <signal.h>
  11. #include <stdarg.h>
  12. #include <stdbool.h>
  13. #include <stddef.h>
  14. #include <stdint.h>
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <sys/ioctl.h>
  19. #include <sys/prctl.h>
  20. #include <sys/stat.h>
  21. #include <sys/syscall.h>
  22. #include <sys/types.h>
  23. #include <sys/wait.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26.  
  27. #include <linux/futex.h>
  28. #include <linux/kvm.h>
  29.  
  30. static unsigned long long procid;
  31.  
  32. static void sleep_ms(uint64_t ms)
  33. {
  34. usleep(ms * 1000);
  35. }
  36.  
  37. static uint64_t current_time_ms(void)
  38. {
  39. struct timespec ts;
  40. if (clock_gettime(CLOCK_MONOTONIC, &ts))
  41. exit(1);
  42. return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
  43. }
  44.  
  45. static void thread_start(void* (*fn)(void*), void* arg)
  46. {
  47. pthread_t th;
  48. pthread_attr_t attr;
  49. pthread_attr_init(&attr);
  50. pthread_attr_setstacksize(&attr, 128 << 10);
  51. int i = 0;
  52. for (; i < 100; i++) {
  53. if (pthread_create(&th, &attr, fn, arg) == 0) {
  54. pthread_attr_destroy(&attr);
  55. return;
  56. }
  57. if (errno == EAGAIN) {
  58. usleep(50);
  59. continue;
  60. }
  61. break;
  62. }
  63. exit(1);
  64. }
  65.  
  66. typedef struct {
  67. int state;
  68. } event_t;
  69.  
  70. static void event_init(event_t* ev)
  71. {
  72. ev->state = 0;
  73. }
  74.  
  75. static void event_reset(event_t* ev)
  76. {
  77. ev->state = 0;
  78. }
  79.  
  80. static void event_set(event_t* ev)
  81. {
  82. if (ev->state)
  83. exit(1);
  84. __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
  85. syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
  86. }
  87.  
  88. static void event_wait(event_t* ev)
  89. {
  90. while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
  91. syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
  92. }
  93.  
  94. static int event_isset(event_t* ev)
  95. {
  96. return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
  97. }
  98.  
  99. static int event_timedwait(event_t* ev, uint64_t timeout)
  100. {
  101. uint64_t start = current_time_ms();
  102. uint64_t now = start;
  103. for (;;) {
  104. uint64_t remain = timeout - (now - start);
  105. struct timespec ts;
  106. ts.tv_sec = remain / 1000;
  107. ts.tv_nsec = (remain % 1000) * 1000 * 1000;
  108. syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
  109. if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
  110. return 1;
  111. now = current_time_ms();
  112. if (now - start > timeout)
  113. return 0;
  114. }
  115. }
  116.  
  117. static bool write_file(const char* file, const char* what, ...)
  118. {
  119. char buf[1024];
  120. va_list args;
  121. va_start(args, what);
  122. vsnprintf(buf, sizeof(buf), what, args);
  123. va_end(args);
  124. buf[sizeof(buf) - 1] = 0;
  125. int len = strlen(buf);
  126. int fd = open(file, O_WRONLY | O_CLOEXEC);
  127. if (fd == -1)
  128. return false;
  129. if (write(fd, buf, len) != len) {
  130. int err = errno;
  131. close(fd);
  132. errno = err;
  133. return false;
  134. }
  135. close(fd);
  136. return true;
  137. }
  138.  
  139. #define ADDR_TEXT 0x0000
  140. #define ADDR_GDT 0x1000
  141. #define ADDR_LDT 0x1800
  142. #define ADDR_PML4 0x2000
  143. #define ADDR_PDP 0x3000
  144. #define ADDR_PD 0x4000
  145. #define ADDR_STACK0 0x0f80
  146. #define ADDR_VAR_HLT 0x2800
  147. #define ADDR_VAR_SYSRET 0x2808
  148. #define ADDR_VAR_SYSEXIT 0x2810
  149. #define ADDR_VAR_IDT 0x3800
  150. #define ADDR_VAR_TSS64 0x3a00
  151. #define ADDR_VAR_TSS64_CPL3 0x3c00
  152. #define ADDR_VAR_TSS16 0x3d00
  153. #define ADDR_VAR_TSS16_2 0x3e00
  154. #define ADDR_VAR_TSS16_CPL3 0x3f00
  155. #define ADDR_VAR_TSS32 0x4800
  156. #define ADDR_VAR_TSS32_2 0x4a00
  157. #define ADDR_VAR_TSS32_CPL3 0x4c00
  158. #define ADDR_VAR_TSS32_VM86 0x4e00
  159. #define ADDR_VAR_VMXON_PTR 0x5f00
  160. #define ADDR_VAR_VMCS_PTR 0x5f08
  161. #define ADDR_VAR_VMEXIT_PTR 0x5f10
  162. #define ADDR_VAR_VMWRITE_FLD 0x5f18
  163. #define ADDR_VAR_VMWRITE_VAL 0x5f20
  164. #define ADDR_VAR_VMXON 0x6000
  165. #define ADDR_VAR_VMCS 0x7000
  166. #define ADDR_VAR_VMEXIT_CODE 0x9000
  167. #define ADDR_VAR_USER_CODE 0x9100
  168. #define ADDR_VAR_USER_CODE2 0x9120
  169.  
  170. #define SEL_LDT (1 << 3)
  171. #define SEL_CS16 (2 << 3)
  172. #define SEL_DS16 (3 << 3)
  173. #define SEL_CS16_CPL3 ((4 << 3) + 3)
  174. #define SEL_DS16_CPL3 ((5 << 3) + 3)
  175. #define SEL_CS32 (6 << 3)
  176. #define SEL_DS32 (7 << 3)
  177. #define SEL_CS32_CPL3 ((8 << 3) + 3)
  178. #define SEL_DS32_CPL3 ((9 << 3) + 3)
  179. #define SEL_CS64 (10 << 3)
  180. #define SEL_DS64 (11 << 3)
  181. #define SEL_CS64_CPL3 ((12 << 3) + 3)
  182. #define SEL_DS64_CPL3 ((13 << 3) + 3)
  183. #define SEL_CGATE16 (14 << 3)
  184. #define SEL_TGATE16 (15 << 3)
  185. #define SEL_CGATE32 (16 << 3)
  186. #define SEL_TGATE32 (17 << 3)
  187. #define SEL_CGATE64 (18 << 3)
  188. #define SEL_CGATE64_HI (19 << 3)
  189. #define SEL_TSS16 (20 << 3)
  190. #define SEL_TSS16_2 (21 << 3)
  191. #define SEL_TSS16_CPL3 ((22 << 3) + 3)
  192. #define SEL_TSS32 (23 << 3)
  193. #define SEL_TSS32_2 (24 << 3)
  194. #define SEL_TSS32_CPL3 ((25 << 3) + 3)
  195. #define SEL_TSS32_VM86 (26 << 3)
  196. #define SEL_TSS64 (27 << 3)
  197. #define SEL_TSS64_HI (28 << 3)
  198. #define SEL_TSS64_CPL3 ((29 << 3) + 3)
  199. #define SEL_TSS64_CPL3_HI (30 << 3)
  200.  
  201. #define MSR_IA32_FEATURE_CONTROL 0x3a
  202. #define MSR_IA32_VMX_BASIC 0x480
  203. #define MSR_IA32_SMBASE 0x9e
  204. #define MSR_IA32_SYSENTER_CS 0x174
  205. #define MSR_IA32_SYSENTER_ESP 0x175
  206. #define MSR_IA32_SYSENTER_EIP 0x176
  207. #define MSR_IA32_STAR 0xC0000081
  208. #define MSR_IA32_LSTAR 0xC0000082
  209. #define MSR_IA32_VMX_PROCBASED_CTLS2 0x48B
  210.  
  211. #define NEXT_INSN $0xbadc0de
  212. #define PREFIX_SIZE 0xba1d
  213. const char kvm_asm16_cpl3[] = "\x0f\x20\xc0\x66\x83\xc8\x01\x0f\x22\xc0\xb8\xa0\x00\x0f\x00\xd8\xb8\x2b\x00\x8e\xd8\x8e\xc0\x8e\xe0\x8e\xe8\xbc\x00\x01\xc7\x06\x00\x01\x1d\xba\xc7\x06\x02\x01\x23\x00\xc7\x06\x04\x01\x00\x01\xc7\x06\x06\x01\x2b\x00\xcb";
  214. const char kvm_asm32_paged[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0";
  215. const char kvm_asm32_vm86[] = "\x66\xb8\xb8\x00\x0f\x00\xd8\xea\x00\x00\x00\x00\xd0\x00";
  216. const char kvm_asm32_paged_vm86[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\x66\xb8\xb8\x00\x0f\x00\xd8\xea\x00\x00\x00\x00\xd0\x00";
  217. const char kvm_asm64_enable_long[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8";
  218. const char kvm_asm64_init_vm[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc1\x3a\x00\x00\x00\x0f\x32\x48\x83\xc8\x05\x0f\x30\x0f\x20\xe0\x48\x0d\x00\x20\x00\x00\x0f\x22\xe0\x48\xc7\xc1\x80\x04\x00\x00\x0f\x32\x48\xc7\xc2\x00\x60\x00\x00\x89\x02\x48\xc7\xc2\x00\x70\x00\x00\x89\x02\x48\xc7\xc0\x00\x5f\x00\x00\xf3\x0f\xc7\x30\x48\xc7\xc0\x08\x5f\x00\x00\x66\x0f\xc7\x30\x0f\xc7\x30\x48\xc7\xc1\x81\x04\x00\x00\x0f\x32\x48\x83\xc8\x00\x48\x21\xd0\x48\xc7\xc2\x00\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x82\x04\x00\x00\x0f\x32\x48\x83\xc8\x00\x48\x21\xd0\x48\xc7\xc2\x02\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x40\x00\x00\x48\xc7\xc0\x81\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x83\x04\x00\x00\x0f\x32\x48\x0d\xff\x6f\x03\x00\x48\x21\xd0\x48\xc7\xc2\x0c\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x84\x04\x00\x00\x0f\x32\x48\x0d\xff\x17\x00\x00\x48\x21\xd0\x48\xc7\xc2\x12\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x2c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x28\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x02\x0c\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc0\x58\x00\x00\x00\x48\xc7\xc2\x00\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc0\xd8\x00\x00\x00\x48\xc7\xc2\x0c\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x2c\x00\x00\x48\xc7\xc0\x00\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x4c\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00\x6c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xd8\x48\xc7\xc2\x02\x6c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xe0\x48\xc7\xc2\x04\x6c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48\xc7\xc2\x06\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x6c\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x6c\x00\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x6c\x00\x00\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x6c\x00\x00\x48\x8b\x04\x25\x10\x5f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x00\x00\x00\x48\xc7\xc0\x01\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x00\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x77\x02\x00\x00\x0f\x32\x48\xc1\xe2\x20\x48\x09\xd0\x48\xc7\xc2\x00\x2c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48\xc7\xc2\x04\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x60\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x02\x60\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x1c\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x22\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x08\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x08\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x08\x00\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x68\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x68\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x68\x00\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x18\x68\x00\x00\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x48\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x48\x00\x00\x48\xc7\xc0\x9b\x20\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x18\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1a\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1c\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x48\x00\x00\x48\xc7\xc0\x82\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x22\x48\x00\x00\x48\xc7\xc0\x8b\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1c\x68\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x68\x00\x00\x48\xc7\xc0\x00\x91\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x68\x00\x00\x48\xc7\xc0\x02\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x28\x00\x00\x48\xc7\xc0\x00\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xd8\x48\xc7\xc2\x02\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xe0\x48\xc7\xc2\x04\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48\xc7\xc0\x18\x5f\x00\x00\x48\x8b\x10\x48\xc7\xc0\x20\x5f\x00\x00\x48\x8b\x08\x48\x31\xc0\x0f\x78\xd0\x48\x31\xc8\x0f\x79\xd0\x0f\x01\xc2\x48\xc7\xc2\x00\x44\x00\x00\x0f\x78\xd0\xf4";
  219. const char kvm_asm64_vm_exit[] = "\x48\xc7\xc3\x00\x44\x00\x00\x0f\x78\xda\x48\xc7\xc3\x02\x44\x00\x00\x0f\x78\xd9\x48\xc7\xc0\x00\x64\x00\x00\x0f\x78\xc0\x48\xc7\xc3\x1e\x68\x00\x00\x0f\x78\xdb\xf4";
  220. const char kvm_asm64_cpl3[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc0\x6b\x00\x00\x00\x8e\xd8\x8e\xc0\x8e\xe0\x8e\xe8\x48\xc7\xc4\x80\x0f\x00\x00\x48\xc7\x04\x24\x1d\xba\x00\x00\x48\xc7\x44\x24\x04\x63\x00\x00\x00\x48\xc7\x44\x24\x08\x80\x0f\x00\x00\x48\xc7\x44\x24\x0c\x6b\x00\x00\x00\xcb";
  221.  
  222. #define KVM_SMI _IO(KVMIO, 0xb7)
  223.  
  224. #define CR0_PE 1
  225. #define CR0_MP (1 << 1)
  226. #define CR0_EM (1 << 2)
  227. #define CR0_TS (1 << 3)
  228. #define CR0_ET (1 << 4)
  229. #define CR0_NE (1 << 5)
  230. #define CR0_WP (1 << 16)
  231. #define CR0_AM (1 << 18)
  232. #define CR0_NW (1 << 29)
  233. #define CR0_CD (1 << 30)
  234. #define CR0_PG (1 << 31)
  235.  
  236. #define CR4_VME 1
  237. #define CR4_PVI (1 << 1)
  238. #define CR4_TSD (1 << 2)
  239. #define CR4_DE (1 << 3)
  240. #define CR4_PSE (1 << 4)
  241. #define CR4_PAE (1 << 5)
  242. #define CR4_MCE (1 << 6)
  243. #define CR4_PGE (1 << 7)
  244. #define CR4_PCE (1 << 8)
  245. #define CR4_OSFXSR (1 << 8)
  246. #define CR4_OSXMMEXCPT (1 << 10)
  247. #define CR4_UMIP (1 << 11)
  248. #define CR4_VMXE (1 << 13)
  249. #define CR4_SMXE (1 << 14)
  250. #define CR4_FSGSBASE (1 << 16)
  251. #define CR4_PCIDE (1 << 17)
  252. #define CR4_OSXSAVE (1 << 18)
  253. #define CR4_SMEP (1 << 20)
  254. #define CR4_SMAP (1 << 21)
  255. #define CR4_PKE (1 << 22)
  256.  
  257. #define EFER_SCE 1
  258. #define EFER_LME (1 << 8)
  259. #define EFER_LMA (1 << 10)
  260. #define EFER_NXE (1 << 11)
  261. #define EFER_SVME (1 << 12)
  262. #define EFER_LMSLE (1 << 13)
  263. #define EFER_FFXSR (1 << 14)
  264. #define EFER_TCE (1 << 15)
  265. #define PDE32_PRESENT 1
  266. #define PDE32_RW (1 << 1)
  267. #define PDE32_USER (1 << 2)
  268. #define PDE32_PS (1 << 7)
  269. #define PDE64_PRESENT 1
  270. #define PDE64_RW (1 << 1)
  271. #define PDE64_USER (1 << 2)
  272. #define PDE64_ACCESSED (1 << 5)
  273. #define PDE64_DIRTY (1 << 6)
  274. #define PDE64_PS (1 << 7)
  275. #define PDE64_G (1 << 8)
  276.  
  277. struct tss16 {
  278. uint16_t prev;
  279. uint16_t sp0;
  280. uint16_t ss0;
  281. uint16_t sp1;
  282. uint16_t ss1;
  283. uint16_t sp2;
  284. uint16_t ss2;
  285. uint16_t ip;
  286. uint16_t flags;
  287. uint16_t ax;
  288. uint16_t cx;
  289. uint16_t dx;
  290. uint16_t bx;
  291. uint16_t sp;
  292. uint16_t bp;
  293. uint16_t si;
  294. uint16_t di;
  295. uint16_t es;
  296. uint16_t cs;
  297. uint16_t ss;
  298. uint16_t ds;
  299. uint16_t ldt;
  300. } __attribute__((packed));
  301.  
  302. struct tss32 {
  303. uint16_t prev, prevh;
  304. uint32_t sp0;
  305. uint16_t ss0, ss0h;
  306. uint32_t sp1;
  307. uint16_t ss1, ss1h;
  308. uint32_t sp2;
  309. uint16_t ss2, ss2h;
  310. uint32_t cr3;
  311. uint32_t ip;
  312. uint32_t flags;
  313. uint32_t ax;
  314. uint32_t cx;
  315. uint32_t dx;
  316. uint32_t bx;
  317. uint32_t sp;
  318. uint32_t bp;
  319. uint32_t si;
  320. uint32_t di;
  321. uint16_t es, esh;
  322. uint16_t cs, csh;
  323. uint16_t ss, ssh;
  324. uint16_t ds, dsh;
  325. uint16_t fs, fsh;
  326. uint16_t gs, gsh;
  327. uint16_t ldt, ldth;
  328. uint16_t trace;
  329. uint16_t io_bitmap;
  330. } __attribute__((packed));
  331.  
  332. struct tss64 {
  333. uint32_t reserved0;
  334. uint64_t rsp[3];
  335. uint64_t reserved1;
  336. uint64_t ist[7];
  337. uint64_t reserved2;
  338. uint32_t reserved3;
  339. uint32_t io_bitmap;
  340. } __attribute__((packed));
  341.  
  342. static void fill_segment_descriptor(uint64_t* dt, uint64_t* lt, struct kvm_segment* seg)
  343. {
  344. uint16_t index = seg->selector >> 3;
  345. uint64_t limit = seg->g ? seg->limit >> 12 : seg->limit;
  346. uint64_t sd = (limit & 0xffff) | (seg->base & 0xffffff) << 16 | (uint64_t)seg->type << 40 | (uint64_t)seg->s << 44 | (uint64_t)seg->dpl << 45 | (uint64_t)seg->present << 47 | (limit & 0xf0000ULL) << 48 | (uint64_t)seg->avl << 52 | (uint64_t)seg->l << 53 | (uint64_t)seg->db << 54 | (uint64_t)seg->g << 55 | (seg->base & 0xff000000ULL) << 56;
  347. dt[index] = sd;
  348. lt[index] = sd;
  349. }
  350.  
  351. static void fill_segment_descriptor_dword(uint64_t* dt, uint64_t* lt, struct kvm_segment* seg)
  352. {
  353. fill_segment_descriptor(dt, lt, seg);
  354. uint16_t index = seg->selector >> 3;
  355. dt[index + 1] = 0;
  356. lt[index + 1] = 0;
  357. }
  358.  
  359. static void setup_syscall_msrs(int cpufd, uint16_t sel_cs, uint16_t sel_cs_cpl3)
  360. {
  361. char buf[sizeof(struct kvm_msrs) + 5 * sizeof(struct kvm_msr_entry)];
  362. memset(buf, 0, sizeof(buf));
  363. struct kvm_msrs* msrs = (struct kvm_msrs*)buf;
  364. struct kvm_msr_entry* entries = msrs->entries;
  365. msrs->nmsrs = 5;
  366. entries[0].index = MSR_IA32_SYSENTER_CS;
  367. entries[0].data = sel_cs;
  368. entries[1].index = MSR_IA32_SYSENTER_ESP;
  369. entries[1].data = ADDR_STACK0;
  370. entries[2].index = MSR_IA32_SYSENTER_EIP;
  371. entries[2].data = ADDR_VAR_SYSEXIT;
  372. entries[3].index = MSR_IA32_STAR;
  373. entries[3].data = ((uint64_t)sel_cs << 32) | ((uint64_t)sel_cs_cpl3 << 48);
  374. entries[4].index = MSR_IA32_LSTAR;
  375. entries[4].data = ADDR_VAR_SYSRET;
  376. ioctl(cpufd, KVM_SET_MSRS, msrs);
  377. }
  378.  
  379. static void setup_32bit_idt(struct kvm_sregs* sregs, char* host_mem, uintptr_t guest_mem)
  380. {
  381. sregs->idt.base = guest_mem + ADDR_VAR_IDT;
  382. sregs->idt.limit = 0x1ff;
  383. uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
  384. for (int i = 0; i < 32; i++) {
  385. struct kvm_segment gate;
  386. gate.selector = i << 3;
  387. switch (i % 6) {
  388. case 0:
  389. gate.type = 6;
  390. gate.base = SEL_CS16;
  391. break;
  392. case 1:
  393. gate.type = 7;
  394. gate.base = SEL_CS16;
  395. break;
  396. case 2:
  397. gate.type = 3;
  398. gate.base = SEL_TGATE16;
  399. break;
  400. case 3:
  401. gate.type = 14;
  402. gate.base = SEL_CS32;
  403. break;
  404. case 4:
  405. gate.type = 15;
  406. gate.base = SEL_CS32;
  407. break;
  408. case 5:
  409. gate.type = 11;
  410. gate.base = SEL_TGATE32;
  411. break;
  412. }
  413. gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
  414. gate.present = 1;
  415. gate.dpl = 0;
  416. gate.s = 0;
  417. gate.g = 0;
  418. gate.db = 0;
  419. gate.l = 0;
  420. gate.avl = 0;
  421. fill_segment_descriptor(idt, idt, &gate);
  422. }
  423. }
  424.  
  425. static void setup_64bit_idt(struct kvm_sregs* sregs, char* host_mem, uintptr_t guest_mem)
  426. {
  427. sregs->idt.base = guest_mem + ADDR_VAR_IDT;
  428. sregs->idt.limit = 0x1ff;
  429. uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
  430. for (int i = 0; i < 32; i++) {
  431. struct kvm_segment gate;
  432. gate.selector = (i * 2) << 3;
  433. gate.type = (i & 1) ? 14 : 15;
  434. gate.base = SEL_CS64;
  435. gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
  436. gate.present = 1;
  437. gate.dpl = 0;
  438. gate.s = 0;
  439. gate.g = 0;
  440. gate.db = 0;
  441. gate.l = 0;
  442. gate.avl = 0;
  443. fill_segment_descriptor_dword(idt, idt, &gate);
  444. }
  445. }
  446.  
  447. struct kvm_text {
  448. uintptr_t typ;
  449. const void* text;
  450. uintptr_t size;
  451. };
  452.  
  453. struct kvm_opt {
  454. uint64_t typ;
  455. uint64_t val;
  456. };
  457.  
  458. #define KVM_SETUP_PAGING (1 << 0)
  459. #define KVM_SETUP_PAE (1 << 1)
  460. #define KVM_SETUP_PROTECTED (1 << 2)
  461. #define KVM_SETUP_CPL3 (1 << 3)
  462. #define KVM_SETUP_VIRT86 (1 << 4)
  463. #define KVM_SETUP_SMM (1 << 5)
  464. #define KVM_SETUP_VM (1 << 6)
  465. static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3, volatile long a4, volatile long a5, volatile long a6, volatile long a7)
  466. {
  467. const int vmfd = a0;
  468. const int cpufd = a1;
  469. char* const host_mem = (char*)a2;
  470. const struct kvm_text* const text_array_ptr = (struct kvm_text*)a3;
  471. const uintptr_t text_count = a4;
  472. const uintptr_t flags = a5;
  473. const struct kvm_opt* const opt_array_ptr = (struct kvm_opt*)a6;
  474. uintptr_t opt_count = a7;
  475. const uintptr_t page_size = 4 << 10;
  476. const uintptr_t ioapic_page = 10;
  477. const uintptr_t guest_mem_size = 24 * page_size;
  478. const uintptr_t guest_mem = 0;
  479. (void)text_count;
  480. int text_type = text_array_ptr[0].typ;
  481. const void* text = text_array_ptr[0].text;
  482. uintptr_t text_size = text_array_ptr[0].size;
  483. for (uintptr_t i = 0; i < guest_mem_size / page_size; i++) {
  484. struct kvm_userspace_memory_region memreg;
  485. memreg.slot = i;
  486. memreg.flags = 0;
  487. memreg.guest_phys_addr = guest_mem + i * page_size;
  488. if (i == ioapic_page)
  489. memreg.guest_phys_addr = 0xfec00000;
  490. memreg.memory_size = page_size;
  491. memreg.userspace_addr = (uintptr_t)host_mem + i * page_size;
  492. ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
  493. }
  494. struct kvm_userspace_memory_region memreg;
  495. memreg.slot = 1 + (1 << 16);
  496. memreg.flags = 0;
  497. memreg.guest_phys_addr = 0x30000;
  498. memreg.memory_size = 64 << 10;
  499. memreg.userspace_addr = (uintptr_t)host_mem;
  500. ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
  501. struct kvm_sregs sregs;
  502. if (ioctl(cpufd, KVM_GET_SREGS, &sregs))
  503. return -1;
  504. struct kvm_regs regs;
  505. memset(&regs, 0, sizeof(regs));
  506. regs.rip = guest_mem + ADDR_TEXT;
  507. regs.rsp = ADDR_STACK0;
  508. sregs.gdt.base = guest_mem + ADDR_GDT;
  509. sregs.gdt.limit = 256 * sizeof(uint64_t) - 1;
  510. uint64_t* gdt = (uint64_t*)(host_mem + sregs.gdt.base);
  511. struct kvm_segment seg_ldt;
  512. seg_ldt.selector = SEL_LDT;
  513. seg_ldt.type = 2;
  514. seg_ldt.base = guest_mem + ADDR_LDT;
  515. seg_ldt.limit = 256 * sizeof(uint64_t) - 1;
  516. seg_ldt.present = 1;
  517. seg_ldt.dpl = 0;
  518. seg_ldt.s = 0;
  519. seg_ldt.g = 0;
  520. seg_ldt.db = 1;
  521. seg_ldt.l = 0;
  522. sregs.ldt = seg_ldt;
  523. uint64_t* ldt = (uint64_t*)(host_mem + sregs.ldt.base);
  524. struct kvm_segment seg_cs16;
  525. seg_cs16.selector = SEL_CS16;
  526. seg_cs16.type = 11;
  527. seg_cs16.base = 0;
  528. seg_cs16.limit = 0xfffff;
  529. seg_cs16.present = 1;
  530. seg_cs16.dpl = 0;
  531. seg_cs16.s = 1;
  532. seg_cs16.g = 0;
  533. seg_cs16.db = 0;
  534. seg_cs16.l = 0;
  535. struct kvm_segment seg_ds16 = seg_cs16;
  536. seg_ds16.selector = SEL_DS16;
  537. seg_ds16.type = 3;
  538. struct kvm_segment seg_cs16_cpl3 = seg_cs16;
  539. seg_cs16_cpl3.selector = SEL_CS16_CPL3;
  540. seg_cs16_cpl3.dpl = 3;
  541. struct kvm_segment seg_ds16_cpl3 = seg_ds16;
  542. seg_ds16_cpl3.selector = SEL_DS16_CPL3;
  543. seg_ds16_cpl3.dpl = 3;
  544. struct kvm_segment seg_cs32 = seg_cs16;
  545. seg_cs32.selector = SEL_CS32;
  546. seg_cs32.db = 1;
  547. struct kvm_segment seg_ds32 = seg_ds16;
  548. seg_ds32.selector = SEL_DS32;
  549. seg_ds32.db = 1;
  550. struct kvm_segment seg_cs32_cpl3 = seg_cs32;
  551. seg_cs32_cpl3.selector = SEL_CS32_CPL3;
  552. seg_cs32_cpl3.dpl = 3;
  553. struct kvm_segment seg_ds32_cpl3 = seg_ds32;
  554. seg_ds32_cpl3.selector = SEL_DS32_CPL3;
  555. seg_ds32_cpl3.dpl = 3;
  556. struct kvm_segment seg_cs64 = seg_cs16;
  557. seg_cs64.selector = SEL_CS64;
  558. seg_cs64.l = 1;
  559. struct kvm_segment seg_ds64 = seg_ds32;
  560. seg_ds64.selector = SEL_DS64;
  561. struct kvm_segment seg_cs64_cpl3 = seg_cs64;
  562. seg_cs64_cpl3.selector = SEL_CS64_CPL3;
  563. seg_cs64_cpl3.dpl = 3;
  564. struct kvm_segment seg_ds64_cpl3 = seg_ds64;
  565. seg_ds64_cpl3.selector = SEL_DS64_CPL3;
  566. seg_ds64_cpl3.dpl = 3;
  567. struct kvm_segment seg_tss32;
  568. seg_tss32.selector = SEL_TSS32;
  569. seg_tss32.type = 9;
  570. seg_tss32.base = ADDR_VAR_TSS32;
  571. seg_tss32.limit = 0x1ff;
  572. seg_tss32.present = 1;
  573. seg_tss32.dpl = 0;
  574. seg_tss32.s = 0;
  575. seg_tss32.g = 0;
  576. seg_tss32.db = 0;
  577. seg_tss32.l = 0;
  578. struct kvm_segment seg_tss32_2 = seg_tss32;
  579. seg_tss32_2.selector = SEL_TSS32_2;
  580. seg_tss32_2.base = ADDR_VAR_TSS32_2;
  581. struct kvm_segment seg_tss32_cpl3 = seg_tss32;
  582. seg_tss32_cpl3.selector = SEL_TSS32_CPL3;
  583. seg_tss32_cpl3.base = ADDR_VAR_TSS32_CPL3;
  584. struct kvm_segment seg_tss32_vm86 = seg_tss32;
  585. seg_tss32_vm86.selector = SEL_TSS32_VM86;
  586. seg_tss32_vm86.base = ADDR_VAR_TSS32_VM86;
  587. struct kvm_segment seg_tss16 = seg_tss32;
  588. seg_tss16.selector = SEL_TSS16;
  589. seg_tss16.base = ADDR_VAR_TSS16;
  590. seg_tss16.limit = 0xff;
  591. seg_tss16.type = 1;
  592. struct kvm_segment seg_tss16_2 = seg_tss16;
  593. seg_tss16_2.selector = SEL_TSS16_2;
  594. seg_tss16_2.base = ADDR_VAR_TSS16_2;
  595. seg_tss16_2.dpl = 0;
  596. struct kvm_segment seg_tss16_cpl3 = seg_tss16;
  597. seg_tss16_cpl3.selector = SEL_TSS16_CPL3;
  598. seg_tss16_cpl3.base = ADDR_VAR_TSS16_CPL3;
  599. seg_tss16_cpl3.dpl = 3;
  600. struct kvm_segment seg_tss64 = seg_tss32;
  601. seg_tss64.selector = SEL_TSS64;
  602. seg_tss64.base = ADDR_VAR_TSS64;
  603. seg_tss64.limit = 0x1ff;
  604. struct kvm_segment seg_tss64_cpl3 = seg_tss64;
  605. seg_tss64_cpl3.selector = SEL_TSS64_CPL3;
  606. seg_tss64_cpl3.base = ADDR_VAR_TSS64_CPL3;
  607. seg_tss64_cpl3.dpl = 3;
  608. struct kvm_segment seg_cgate16;
  609. seg_cgate16.selector = SEL_CGATE16;
  610. seg_cgate16.type = 4;
  611. seg_cgate16.base = SEL_CS16 | (2 << 16);
  612. seg_cgate16.limit = ADDR_VAR_USER_CODE2;
  613. seg_cgate16.present = 1;
  614. seg_cgate16.dpl = 0;
  615. seg_cgate16.s = 0;
  616. seg_cgate16.g = 0;
  617. seg_cgate16.db = 0;
  618. seg_cgate16.l = 0;
  619. seg_cgate16.avl = 0;
  620. struct kvm_segment seg_tgate16 = seg_cgate16;
  621. seg_tgate16.selector = SEL_TGATE16;
  622. seg_tgate16.type = 3;
  623. seg_cgate16.base = SEL_TSS16_2;
  624. seg_tgate16.limit = 0;
  625. struct kvm_segment seg_cgate32 = seg_cgate16;
  626. seg_cgate32.selector = SEL_CGATE32;
  627. seg_cgate32.type = 12;
  628. seg_cgate32.base = SEL_CS32 | (2 << 16);
  629. struct kvm_segment seg_tgate32 = seg_cgate32;
  630. seg_tgate32.selector = SEL_TGATE32;
  631. seg_tgate32.type = 11;
  632. seg_tgate32.base = SEL_TSS32_2;
  633. seg_tgate32.limit = 0;
  634. struct kvm_segment seg_cgate64 = seg_cgate16;
  635. seg_cgate64.selector = SEL_CGATE64;
  636. seg_cgate64.type = 12;
  637. seg_cgate64.base = SEL_CS64;
  638. int kvmfd = open("/dev/kvm", O_RDWR);
  639. char buf[sizeof(struct kvm_cpuid2) + 128 * sizeof(struct kvm_cpuid_entry2)];
  640. memset(buf, 0, sizeof(buf));
  641. struct kvm_cpuid2* cpuid = (struct kvm_cpuid2*)buf;
  642. cpuid->nent = 128;
  643. ioctl(kvmfd, KVM_GET_SUPPORTED_CPUID, cpuid);
  644. ioctl(cpufd, KVM_SET_CPUID2, cpuid);
  645. close(kvmfd);
  646. const char* text_prefix = 0;
  647. int text_prefix_size = 0;
  648. char* host_text = host_mem + ADDR_TEXT;
  649. if (text_type == 8) {
  650. if (flags & KVM_SETUP_SMM) {
  651. if (flags & KVM_SETUP_PROTECTED) {
  652. sregs.cs = seg_cs16;
  653. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
  654. sregs.cr0 |= CR0_PE;
  655. } else {
  656. sregs.cs.selector = 0;
  657. sregs.cs.base = 0;
  658. }
  659. *(host_mem + ADDR_TEXT) = 0xf4;
  660. host_text = host_mem + 0x8000;
  661. ioctl(cpufd, KVM_SMI, 0);
  662. } else if (flags & KVM_SETUP_VIRT86) {
  663. sregs.cs = seg_cs32;
  664. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
  665. sregs.cr0 |= CR0_PE;
  666. sregs.efer |= EFER_SCE;
  667. setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
  668. setup_32bit_idt(&sregs, host_mem, guest_mem);
  669. if (flags & KVM_SETUP_PAGING) {
  670. uint64_t pd_addr = guest_mem + ADDR_PD;
  671. uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
  672. pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS;
  673. sregs.cr3 = pd_addr;
  674. sregs.cr4 |= CR4_PSE;
  675. text_prefix = kvm_asm32_paged_vm86;
  676. text_prefix_size = sizeof(kvm_asm32_paged_vm86) - 1;
  677. } else {
  678. text_prefix = kvm_asm32_vm86;
  679. text_prefix_size = sizeof(kvm_asm32_vm86) - 1;
  680. }
  681. } else {
  682. sregs.cs.selector = 0;
  683. sregs.cs.base = 0;
  684. }
  685. } else if (text_type == 16) {
  686. if (flags & KVM_SETUP_CPL3) {
  687. sregs.cs = seg_cs16;
  688. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
  689. text_prefix = kvm_asm16_cpl3;
  690. text_prefix_size = sizeof(kvm_asm16_cpl3) - 1;
  691. } else {
  692. sregs.cr0 |= CR0_PE;
  693. sregs.cs = seg_cs16;
  694. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
  695. }
  696. } else if (text_type == 32) {
  697. sregs.cr0 |= CR0_PE;
  698. sregs.efer |= EFER_SCE;
  699. setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
  700. setup_32bit_idt(&sregs, host_mem, guest_mem);
  701. if (flags & KVM_SETUP_SMM) {
  702. sregs.cs = seg_cs32;
  703. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
  704. *(host_mem + ADDR_TEXT) = 0xf4;
  705. host_text = host_mem + 0x8000;
  706. ioctl(cpufd, KVM_SMI, 0);
  707. } else if (flags & KVM_SETUP_PAGING) {
  708. sregs.cs = seg_cs32;
  709. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
  710. uint64_t pd_addr = guest_mem + ADDR_PD;
  711. uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
  712. pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS;
  713. sregs.cr3 = pd_addr;
  714. sregs.cr4 |= CR4_PSE;
  715. text_prefix = kvm_asm32_paged;
  716. text_prefix_size = sizeof(kvm_asm32_paged) - 1;
  717. } else if (flags & KVM_SETUP_CPL3) {
  718. sregs.cs = seg_cs32_cpl3;
  719. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32_cpl3;
  720. } else {
  721. sregs.cs = seg_cs32;
  722. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
  723. }
  724. } else {
  725. sregs.efer |= EFER_LME | EFER_SCE;
  726. sregs.cr0 |= CR0_PE;
  727. setup_syscall_msrs(cpufd, SEL_CS64, SEL_CS64_CPL3);
  728. setup_64bit_idt(&sregs, host_mem, guest_mem);
  729. sregs.cs = seg_cs32;
  730. sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
  731. uint64_t pml4_addr = guest_mem + ADDR_PML4;
  732. uint64_t* pml4 = (uint64_t*)(host_mem + ADDR_PML4);
  733. uint64_t pdpt_addr = guest_mem + ADDR_PDP;
  734. uint64_t* pdpt = (uint64_t*)(host_mem + ADDR_PDP);
  735. uint64_t pd_addr = guest_mem + ADDR_PD;
  736. uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
  737. pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdpt_addr;
  738. pdpt[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_addr;
  739. pd[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | PDE64_PS;
  740. sregs.cr3 = pml4_addr;
  741. sregs.cr4 |= CR4_PAE;
  742. if (flags & KVM_SETUP_VM) {
  743. sregs.cr0 |= CR0_NE;
  744. *((uint64_t*)(host_mem + ADDR_VAR_VMXON_PTR)) = ADDR_VAR_VMXON;
  745. *((uint64_t*)(host_mem + ADDR_VAR_VMCS_PTR)) = ADDR_VAR_VMCS;
  746. memcpy(host_mem + ADDR_VAR_VMEXIT_CODE, kvm_asm64_vm_exit, sizeof(kvm_asm64_vm_exit) - 1);
  747. *((uint64_t*)(host_mem + ADDR_VAR_VMEXIT_PTR)) = ADDR_VAR_VMEXIT_CODE;
  748. text_prefix = kvm_asm64_init_vm;
  749. text_prefix_size = sizeof(kvm_asm64_init_vm) - 1;
  750. } else if (flags & KVM_SETUP_CPL3) {
  751. text_prefix = kvm_asm64_cpl3;
  752. text_prefix_size = sizeof(kvm_asm64_cpl3) - 1;
  753. } else {
  754. text_prefix = kvm_asm64_enable_long;
  755. text_prefix_size = sizeof(kvm_asm64_enable_long) - 1;
  756. }
  757. }
  758. struct tss16 tss16;
  759. memset(&tss16, 0, sizeof(tss16));
  760. tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
  761. tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
  762. tss16.ip = ADDR_VAR_USER_CODE2;
  763. tss16.flags = (1 << 1);
  764. tss16.cs = SEL_CS16;
  765. tss16.es = tss16.ds = tss16.ss = SEL_DS16;
  766. tss16.ldt = SEL_LDT;
  767. struct tss16* tss16_addr = (struct tss16*)(host_mem + seg_tss16_2.base);
  768. memcpy(tss16_addr, &tss16, sizeof(tss16));
  769. memset(&tss16, 0, sizeof(tss16));
  770. tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
  771. tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
  772. tss16.ip = ADDR_VAR_USER_CODE2;
  773. tss16.flags = (1 << 1);
  774. tss16.cs = SEL_CS16_CPL3;
  775. tss16.es = tss16.ds = tss16.ss = SEL_DS16_CPL3;
  776. tss16.ldt = SEL_LDT;
  777. struct tss16* tss16_cpl3_addr = (struct tss16*)(host_mem + seg_tss16_cpl3.base);
  778. memcpy(tss16_cpl3_addr, &tss16, sizeof(tss16));
  779. struct tss32 tss32;
  780. memset(&tss32, 0, sizeof(tss32));
  781. tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
  782. tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
  783. tss32.ip = ADDR_VAR_USER_CODE;
  784. tss32.flags = (1 << 1) | (1 << 17);
  785. tss32.ldt = SEL_LDT;
  786. tss32.cr3 = sregs.cr3;
  787. tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
  788. struct tss32* tss32_addr = (struct tss32*)(host_mem + seg_tss32_vm86.base);
  789. memcpy(tss32_addr, &tss32, sizeof(tss32));
  790. memset(&tss32, 0, sizeof(tss32));
  791. tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
  792. tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
  793. tss32.ip = ADDR_VAR_USER_CODE;
  794. tss32.flags = (1 << 1);
  795. tss32.cr3 = sregs.cr3;
  796. tss32.es = tss32.ds = tss32.ss = tss32.gs = tss32.fs = SEL_DS32;
  797. tss32.cs = SEL_CS32;
  798. tss32.ldt = SEL_LDT;
  799. tss32.cr3 = sregs.cr3;
  800. tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
  801. struct tss32* tss32_cpl3_addr = (struct tss32*)(host_mem + seg_tss32_2.base);
  802. memcpy(tss32_cpl3_addr, &tss32, sizeof(tss32));
  803. struct tss64 tss64;
  804. memset(&tss64, 0, sizeof(tss64));
  805. tss64.rsp[0] = ADDR_STACK0;
  806. tss64.rsp[1] = ADDR_STACK0;
  807. tss64.rsp[2] = ADDR_STACK0;
  808. tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
  809. struct tss64* tss64_addr = (struct tss64*)(host_mem + seg_tss64.base);
  810. memcpy(tss64_addr, &tss64, sizeof(tss64));
  811. memset(&tss64, 0, sizeof(tss64));
  812. tss64.rsp[0] = ADDR_STACK0;
  813. tss64.rsp[1] = ADDR_STACK0;
  814. tss64.rsp[2] = ADDR_STACK0;
  815. tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
  816. struct tss64* tss64_cpl3_addr = (struct tss64*)(host_mem + seg_tss64_cpl3.base);
  817. memcpy(tss64_cpl3_addr, &tss64, sizeof(tss64));
  818. if (text_size > 1000)
  819. text_size = 1000;
  820. if (text_prefix) {
  821. memcpy(host_text, text_prefix, text_prefix_size);
  822. void* patch = memmem(host_text, text_prefix_size, "\xde\xc0\xad\x0b", 4);
  823. if (patch)
  824. *((uint32_t*)patch) = guest_mem + ADDR_TEXT + ((char*)patch - host_text) + 6;
  825. uint16_t magic = PREFIX_SIZE;
  826. patch = memmem(host_text, text_prefix_size, &magic, sizeof(magic));
  827. if (patch)
  828. *((uint16_t*)patch) = guest_mem + ADDR_TEXT + text_prefix_size;
  829. }
  830. memcpy((void*)(host_text + text_prefix_size), text, text_size);
  831. *(host_text + text_prefix_size + text_size) = 0xf4;
  832. memcpy(host_mem + ADDR_VAR_USER_CODE, text, text_size);
  833. *(host_mem + ADDR_VAR_USER_CODE + text_size) = 0xf4;
  834. *(host_mem + ADDR_VAR_HLT) = 0xf4;
  835. memcpy(host_mem + ADDR_VAR_SYSRET, "\x0f\x07\xf4", 3);
  836. memcpy(host_mem + ADDR_VAR_SYSEXIT, "\x0f\x35\xf4", 3);
  837. *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = 0;
  838. *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = 0;
  839. if (opt_count > 2)
  840. opt_count = 2;
  841. for (uintptr_t i = 0; i < opt_count; i++) {
  842. uint64_t typ = opt_array_ptr[i].typ;
  843. uint64_t val = opt_array_ptr[i].val;
  844. switch (typ % 9) {
  845. case 0:
  846. sregs.cr0 ^= val & (CR0_MP | CR0_EM | CR0_ET | CR0_NE | CR0_WP | CR0_AM | CR0_NW | CR0_CD);
  847. break;
  848. case 1:
  849. sregs.cr4 ^= val & (CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_MCE | CR4_PGE | CR4_PCE |
  850. CR4_OSFXSR | CR4_OSXMMEXCPT | CR4_UMIP | CR4_VMXE | CR4_SMXE | CR4_FSGSBASE | CR4_PCIDE |
  851. CR4_OSXSAVE | CR4_SMEP | CR4_SMAP | CR4_PKE);
  852. break;
  853. case 2:
  854. sregs.efer ^= val & (EFER_SCE | EFER_NXE | EFER_SVME | EFER_LMSLE | EFER_FFXSR | EFER_TCE);
  855. break;
  856. case 3:
  857. val &= ((1 << 8) | (1 << 9) | (1 << 10) | (1 << 12) | (1 << 13) | (1 << 14) |
  858. (1 << 15) | (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21));
  859. regs.rflags ^= val;
  860. tss16_addr->flags ^= val;
  861. tss16_cpl3_addr->flags ^= val;
  862. tss32_addr->flags ^= val;
  863. tss32_cpl3_addr->flags ^= val;
  864. break;
  865. case 4:
  866. seg_cs16.type = val & 0xf;
  867. seg_cs32.type = val & 0xf;
  868. seg_cs64.type = val & 0xf;
  869. break;
  870. case 5:
  871. seg_cs16_cpl3.type = val & 0xf;
  872. seg_cs32_cpl3.type = val & 0xf;
  873. seg_cs64_cpl3.type = val & 0xf;
  874. break;
  875. case 6:
  876. seg_ds16.type = val & 0xf;
  877. seg_ds32.type = val & 0xf;
  878. seg_ds64.type = val & 0xf;
  879. break;
  880. case 7:
  881. seg_ds16_cpl3.type = val & 0xf;
  882. seg_ds32_cpl3.type = val & 0xf;
  883. seg_ds64_cpl3.type = val & 0xf;
  884. break;
  885. case 8:
  886. *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = (val & 0xffff);
  887. *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = (val >> 16);
  888. break;
  889. default:
  890. exit(1);
  891. }
  892. }
  893. regs.rflags |= 2;
  894. fill_segment_descriptor(gdt, ldt, &seg_ldt);
  895. fill_segment_descriptor(gdt, ldt, &seg_cs16);
  896. fill_segment_descriptor(gdt, ldt, &seg_ds16);
  897. fill_segment_descriptor(gdt, ldt, &seg_cs16_cpl3);
  898. fill_segment_descriptor(gdt, ldt, &seg_ds16_cpl3);
  899. fill_segment_descriptor(gdt, ldt, &seg_cs32);
  900. fill_segment_descriptor(gdt, ldt, &seg_ds32);
  901. fill_segment_descriptor(gdt, ldt, &seg_cs32_cpl3);
  902. fill_segment_descriptor(gdt, ldt, &seg_ds32_cpl3);
  903. fill_segment_descriptor(gdt, ldt, &seg_cs64);
  904. fill_segment_descriptor(gdt, ldt, &seg_ds64);
  905. fill_segment_descriptor(gdt, ldt, &seg_cs64_cpl3);
  906. fill_segment_descriptor(gdt, ldt, &seg_ds64_cpl3);
  907. fill_segment_descriptor(gdt, ldt, &seg_tss32);
  908. fill_segment_descriptor(gdt, ldt, &seg_tss32_2);
  909. fill_segment_descriptor(gdt, ldt, &seg_tss32_cpl3);
  910. fill_segment_descriptor(gdt, ldt, &seg_tss32_vm86);
  911. fill_segment_descriptor(gdt, ldt, &seg_tss16);
  912. fill_segment_descriptor(gdt, ldt, &seg_tss16_2);
  913. fill_segment_descriptor(gdt, ldt, &seg_tss16_cpl3);
  914. fill_segment_descriptor_dword(gdt, ldt, &seg_tss64);
  915. fill_segment_descriptor_dword(gdt, ldt, &seg_tss64_cpl3);
  916. fill_segment_descriptor(gdt, ldt, &seg_cgate16);
  917. fill_segment_descriptor(gdt, ldt, &seg_tgate16);
  918. fill_segment_descriptor(gdt, ldt, &seg_cgate32);
  919. fill_segment_descriptor(gdt, ldt, &seg_tgate32);
  920. fill_segment_descriptor_dword(gdt, ldt, &seg_cgate64);
  921. if (ioctl(cpufd, KVM_SET_SREGS, &sregs))
  922. return -1;
  923. if (ioctl(cpufd, KVM_SET_REGS, &regs))
  924. return -1;
  925. return 0;
  926. }
  927.  
  928. static void kill_and_wait(int pid, int* status)
  929. {
  930. kill(-pid, SIGKILL);
  931. kill(pid, SIGKILL);
  932. for (int i = 0; i < 100; i++) {
  933. if (waitpid(-1, status, WNOHANG | __WALL) == pid)
  934. return;
  935. usleep(1000);
  936. }
  937. DIR* dir = opendir("/sys/fs/fuse/connections");
  938. if (dir) {
  939. for (;;) {
  940. struct dirent* ent = readdir(dir);
  941. if (!ent)
  942. break;
  943. if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
  944. continue;
  945. char abort[300];
  946. snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort", ent->d_name);
  947. int fd = open(abort, O_WRONLY);
  948. if (fd == -1) {
  949. continue;
  950. }
  951. if (write(fd, abort, 1) < 0) {
  952. }
  953. close(fd);
  954. }
  955. closedir(dir);
  956. } else {
  957. }
  958. while (waitpid(-1, status, __WALL) != pid) {
  959. }
  960. }
  961.  
  962. static void setup_test()
  963. {
  964. prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
  965. setpgrp();
  966. write_file("/proc/self/oom_score_adj", "1000");
  967. }
  968.  
  969. struct thread_t {
  970. int created, call;
  971. event_t ready, done;
  972. };
  973.  
  974. static struct thread_t threads[16];
  975. static void execute_call(int call);
  976. static int running;
  977.  
  978. static void* thr(void* arg)
  979. {
  980. struct thread_t* th = (struct thread_t*)arg;
  981. for (;;) {
  982. event_wait(&th->ready);
  983. event_reset(&th->ready);
  984. execute_call(th->call);
  985. __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
  986. event_set(&th->done);
  987. }
  988. return 0;
  989. }
  990.  
  991. static void execute_one(void)
  992. {
  993. int i, call, thread;
  994. for (call = 0; call < 14; call++) {
  995. for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) {
  996. struct thread_t* th = &threads[thread];
  997. if (!th->created) {
  998. th->created = 1;
  999. event_init(&th->ready);
  1000. event_init(&th->done);
  1001. event_set(&th->done);
  1002. thread_start(thr, th);
  1003. }
  1004. if (!event_isset(&th->done))
  1005. continue;
  1006. event_reset(&th->done);
  1007. th->call = call;
  1008. __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
  1009. event_set(&th->ready);
  1010. if (call == 10)
  1011. break;
  1012. event_timedwait(&th->done, 50);
  1013. break;
  1014. }
  1015. }
  1016. for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
  1017. sleep_ms(1);
  1018. }
  1019.  
  1020. static void execute_one(void);
  1021.  
  1022. #define WAIT_FLAGS __WALL
  1023.  
  1024. static void loop(void)
  1025. {
  1026. int iter = 0;
  1027. for (;; iter++) {
  1028. int pid = fork();
  1029. if (pid < 0)
  1030. exit(1);
  1031. if (pid == 0) {
  1032. setup_test();
  1033. execute_one();
  1034. exit(0);
  1035. }
  1036. int status = 0;
  1037. uint64_t start = current_time_ms();
  1038. for (;;) {
  1039. if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
  1040. break;
  1041. sleep_ms(1);
  1042. if (current_time_ms() - start < 5000)
  1043. continue;
  1044. kill_and_wait(pid, &status);
  1045. break;
  1046. }
  1047. }
  1048. }
  1049.  
  1050. uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff};
  1051.  
  1052. void execute_call(int call)
  1053. {
  1054. intptr_t res = 0;
  1055. switch (call) {
  1056. case 0:
  1057. memcpy((void*)0x20000000, "/dev/kvm\000", 9);
  1058. res = syscall(__NR_openat, 0xffffffffffffff9cul, 0x20000000ul, 0ul, 0ul);
  1059. if (res != -1)
  1060. r[0] = res;
  1061. break;
  1062. case 1:
  1063. res = syscall(__NR_ioctl, r[0], 0xae01, 0ul);
  1064. if (res != -1)
  1065. r[1] = res;
  1066. break;
  1067. case 2:
  1068. *(uint32_t*)0x20000040 = 0;
  1069. *(uint32_t*)0x20000044 = 0;
  1070. *(uint64_t*)0x20000048 = 0;
  1071. *(uint64_t*)0x20000050 = 0x20002000;
  1072. *(uint64_t*)0x20000058 = 0x20000000;
  1073. syscall(__NR_ioctl, r[1], 0x4020ae46, 0x20000040ul);
  1074. break;
  1075. case 3:
  1076. res = syscall(__NR_ioctl, r[1], 0xae41, 0ul);
  1077. if (res != -1)
  1078. r[2] = res;
  1079. break;
  1080. case 4:
  1081. *(uint64_t*)0x20000080 = 0x40;
  1082. *(uint64_t*)0x20000088 = 0x20000140;
  1083. memcpy((void*)0x20000140, "\x26\x45\x0f\x01\xd1\x47\x0f\x01\xc5\x43\x0f\x1c\x4d\xbb\x0f\x78\x96\x00\x00\x00\x00\x40\x0f\x22\x63\x45\xd9\xf1\x26\x66\x46\x0f\xc7\xb2\x5e\xa4\x0f\xb8\x36\x2e\x65\x46\x0f\x01\xd1\xb9\x62\x0b\x00\x00\x0f\x32\xc7\x44\x24\x00\x00\x01\x00\x00\xc7\x44\x24\x02\x0d\x00\x00\x00\xff\x2c\x24", 71);
  1084. *(uint64_t*)0x20000090 = 0x47;
  1085. syz_kvm_setup_cpu(-1, r[2], 0x20000000, 0x20000080, 1, 0, 0, 0);
  1086. break;
  1087. case 5:
  1088. syscall(__NR_ioctl, r[2], 0xae80, 0ul);
  1089. break;
  1090. case 6:
  1091. syscall(__NR_ioctl, r[2], 0xae9a, 0);
  1092. break;
  1093. case 7:
  1094. memcpy((void*)0x200000c0, "\x02\x00\x00\x00\x00\x00\x00\x00\x53\x0a\x00\x00\x00\x00\x00\x00\x01\x4c\xb8\x64\xd8\x98\x1b\x95\x78\x09\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00", 40);
  1095. syscall(__NR_ioctl, -1, 0x4008ae89, 0x200000c0ul);
  1096. break;
  1097. case 8:
  1098. syscall(__NR_ioctl, r[2], 0xae80, 0ul);
  1099. break;
  1100. case 9:
  1101. syscall(__NR_ioctl, r[1], 0x4020ae46, 0ul);
  1102. break;
  1103. case 10:
  1104. *(uint64_t*)0x20000080 = 0x40;
  1105. *(uint64_t*)0x20000088 = 0x20000140;
  1106. memcpy((void*)0x20000140, "\x26\x45\x0f\x01\xd1\x47\x0f\x01\xc5\x43\x0f\x1c\x4d\xbb\x0f\x78\x96\x00\x00\x00\x00\x40\x0f\x22\x63\x45\xd9\xf1\x26\x66\x46\x0f\xc7\xb2\x5e\xa4\x0f\xb8\x36\x2e\x65\x46\x0f\x01\xd1\xb9\x62\x0b\x00\x00\x0f\x32\xc7\x44\x24\x00\x00\x01\x00\x00\xc7\x44\x24\x02\x0d\x00\x00\x00\xff\x2c\x24", 71);
  1107. *(uint64_t*)0x20000090 = 0x47;
  1108. syz_kvm_setup_cpu(-1, r[2], 0x20000000, 0x20000080, 1, 0, 0, 0);
  1109. break;
  1110. case 11:
  1111. syscall(__NR_ioctl, r[2], 0xae80, 0ul);
  1112. break;
  1113. case 12:
  1114. memcpy((void*)0x200000c0, "\x02\x00\x00\x00\x00\x00\x00\x00\x53\x0a\x00\x00\x00\x00\x00\x00\x01\x4c\xb8\x64\xd8\x98\x1b\x95\x78\x09\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00", 40);
  1115. syscall(__NR_ioctl, -1, 0x4008ae89, 0x200000c0ul);
  1116. break;
  1117. case 13:
  1118. syscall(__NR_ioctl, r[2], 0xae80, 0ul);
  1119. break;
  1120. }
  1121.  
  1122. }
  1123. int main(void)
  1124. {
  1125. syscall(__NR_mmap, 0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
  1126. syscall(__NR_mmap, 0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
  1127. syscall(__NR_mmap, 0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
  1128. for (procid = 0; procid < 8; procid++) {
  1129. if (fork() == 0) {
  1130. loop();
  1131. }
  1132. }
  1133. sleep(1000000);
  1134. return 0;
  1135. }
  1136.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement