Advertisement
Guest User

azurema

a guest
Jun 19th, 2011
235
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 14.51 KB | None | 0 0
  1. diff -Npru kernel/include/linux/azure.h /home/paul/kvm-68/kernel/include/linux/azure.h
  2. --- kernel/include/linux/azure.h    1970-01-01 00:00:00.000000000 +0000
  3. +++ /home/paul/kvm-68/kernel/include/linux/azure.h  2008-08-04 14:13:52.914039176 +0000
  4. @@ -0,0 +1,28 @@
  5. +#ifndef __AZURE_H
  6. +#define __AZURE_H
  7. +
  8. +#include <linux/types.h>
  9. +
  10. +#define TARGET_NAME "sample.exe"
  11. +
  12. +#define PUSHF_OPCODE 0x9C
  13. +#define PUSHF_SIZE 1
  14. +#define POPF_OPCODE 0x9D
  15. +#define POPF_SIZE 1
  16. +
  17. +enum {MEM_OP_READ, MEM_OP_WRITE};
  18. +
  19. +struct azure{
  20. +  u8 enabled;
  21. +  u32 target_cr3;
  22. +  u32 target_entrypoint;
  23. +  u32 target_base;
  24. +  u32 target_size;
  25. +  u8 set_tf;
  26. +  u8 prv_set_tf;
  27. +  unsigned long post_pushf_ip;
  28. +  unsigned long post_popf_ip;
  29. +  u8 popf_fwd_tf;
  30. +};
  31. +
  32. +#endif
  33. diff -Npru kernel/include/linux/kvm_host.h /home/paul/kvm-68/kernel/include/linux/kvm_host.h
  34. --- kernel/include/linux/kvm_host.h 2008-05-04 15:28:38.000000000 +0000
  35. +++ /home/paul/kvm-68/kernel/include/linux/kvm_host.h   2008-08-04 14:13:52.914039176 +0000
  36. @@ -6,6 +6,8 @@
  37.   * the COPYING file in the top-level directory.
  38.   */
  39.  
  40. +#include "azure.h"
  41. +
  42.  #include <linux/types.h>
  43.  #include <linux/hardirq.h>
  44.  #include <linux/list.h>
  45. @@ -84,6 +86,7 @@ struct kvm_vcpu {
  46.  #endif
  47.  
  48.     struct kvm_vcpu_arch arch;
  49. +   struct azure azure;
  50.  };
  51.  
  52.  struct kvm_memory_slot {
  53. diff -Npru kernel/kvm_main.c /home/paul/kvm-68/kernel/kvm_main.c
  54. --- kernel/kvm_main.c   2008-05-04 15:28:39.000000000 +0000
  55. +++ /home/paul/kvm-68/kernel/kvm_main.c 2008-08-04 14:13:52.918039140 +0000
  56. @@ -165,6 +165,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
  57.     r = kvm_arch_vcpu_init(vcpu);
  58.     if (r < 0)
  59.         goto fail_free_run;
  60. +
  61. +   memset(&vcpu->azure, 0, sizeof(vcpu->azure));
  62.     return 0;
  63.  
  64.  fail_free_run:
  65. @@ -738,6 +740,7 @@ int kvm_write_guest(struct kvm *kvm, gpa
  66.     }
  67.     return 0;
  68.  }
  69. +EXPORT_SYMBOL_GPL(kvm_write_guest);
  70.  
  71.  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  72.  {
  73. diff -Npru kernel/msw.h /home/paul/kvm-68/kernel/msw.h
  74. --- kernel/msw.h    1970-01-01 00:00:00.000000000 +0000
  75. +++ /home/paul/kvm-68/kernel/msw.h  2008-08-04 14:13:52.918039140 +0000
  76. @@ -0,0 +1,50 @@
  77. +#ifndef __MSW_H
  78. +#define __MSW_H
  79. +
  80. +#include <linux/types.h>
  81. +
  82. +#define FS_ETHREAD_OFFSET 0x124
  83. +#define ETHREAD_EPROCESS_OFFSET 0x44
  84. +
  85. +#define EPROCESS_IMAGE_NAME_OFFSET 0x174
  86. +#define EPROCESS_PEB_OFFSET 0x1b0
  87. +
  88. +#define KERNEL_START 0x80000000
  89. +
  90. +#define IMAGE_NAME_LEN 16
  91. +
  92. +struct msw_peb{
  93. +  u8 inherited_address_space;
  94. +  u8 read_image_file_exec_options;
  95. +  u8 being_debugged;
  96. +  u8 spare;
  97. +  u32 mutant;
  98. +  u32 image_base_address;
  99. +  u32 loader_data;
  100. +  u32 process_parameters;
  101. +};
  102. +
  103. +struct list_entry{
  104. +  u32 flink;
  105. +  u32 blink;
  106. +};
  107. +
  108. +struct msw_peb_ldr_data{
  109. +  u32 length;
  110. +  u32 initialized;
  111. +  u32 sshandle;
  112. +  struct list_entry in_ld_order_mod_list;
  113. +  struct list_entry in_mem_order_mod_list;
  114. +  struct list_entry in_init_order_mod_list;
  115. +};
  116. +
  117. +struct msw_ldr_mod{
  118. +  struct list_entry in_ld_order_mod_list;
  119. +  struct list_entry in_mem_order_mod_list;
  120. +  struct list_entry in_init_order_mod_list;
  121. +  u32 base_address;
  122. +  u32 entrypoint;
  123. +  u32 size_of_image;
  124. +};
  125. +
  126. +#endif
  127. diff -Npru kernel/vmx.c /home/paul/kvm-68/kernel/vmx.c
  128. --- kernel/vmx.c    2008-05-04 15:28:39.000000000 +0000
  129. +++ /home/paul/kvm-68/kernel/vmx.c  2008-08-04 14:14:34.825661675 +0000
  130. @@ -19,6 +19,9 @@
  131.  #include "vmx.h"
  132.  #include "mmu.h"
  133.  
  134. +#include "msw.h"
  135. +#include "include/linux/azure.h"
  136. +
  137.  #include <linux/kvm_host.h>
  138.  #include <linux/module.h>
  139.  #include <linux/kernel.h>
  140. @@ -464,6 +467,10 @@ static void update_exception_bitmap(stru
  141.         eb = ~0;
  142.     if (vm_need_ept())
  143.         eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
  144. +  
  145. +   if(vcpu->azure.set_tf == 1)
  146. +     eb |= 1u << 1;
  147. +  
  148.     vmcs_write32(EXCEPTION_BITMAP, eb);
  149.  }
  150.  
  151. @@ -1084,7 +1091,7 @@ static __init int setup_vmcs_config(stru
  152.           CPU_BASED_CR3_LOAD_EXITING |
  153.           CPU_BASED_CR3_STORE_EXITING |
  154.           CPU_BASED_USE_IO_BITMAPS |
  155. -         CPU_BASED_MOV_DR_EXITING |
  156. +         // CPU_BASED_MOV_DR_EXITING |
  157.           CPU_BASED_USE_TSC_OFFSETING;
  158.     opt = CPU_BASED_TPR_SHADOW |
  159.           CPU_BASED_USE_MSR_BITMAPS |
  160. @@ -2206,6 +2213,135 @@ static int handle_rmode_exception(struct
  161.     return 0;
  162.  }
  163.  
  164. +static int mem_op(struct kvm_vcpu *vcpu, gva_t gva,
  165. +         void *data, unsigned int len, u8 op)
  166. +{
  167. +  gpa_t gpa;
  168. +  int toproc;
  169. +  int ret;
  170. +  
  171. +  while(len){
  172. +    gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  173. +    if(gpa == UNMAPPED_GVA)
  174. +      return -1;
  175. +    
  176. +    toproc = min((unsigned long) len, (PAGE_SIZE - offset_in_page(gpa)));
  177. +    switch(op){
  178. +      case MEM_OP_READ:
  179. +   ret = kvm_read_guest(vcpu->kvm, gpa, data, toproc);
  180. +   break;
  181. +      case MEM_OP_WRITE:
  182. +   ret = kvm_write_guest(vcpu->kvm, gpa, data, toproc);
  183. +   break;
  184. +      default:
  185. +   return -1;
  186. +    }
  187. +        
  188. +    if(ret < 0)
  189. +      return -1;
  190. +    
  191. +    len -= toproc;
  192. +    data += toproc;
  193. +    gva += toproc;
  194. +  }
  195. +
  196. +  return len;
  197. +}
  198. +
  199. +static int read_guest_mem(struct kvm_vcpu *vcpu, gva_t gva,
  200. +             void *data, unsigned int len)
  201. +{
  202. +  return mem_op(vcpu, gva, data, len, MEM_OP_READ);
  203. +}
  204. +
  205. +static int write_guest_mem(struct kvm_vcpu *vcpu, gva_t gva,
  206. +              void *data, unsigned int len)
  207. +{
  208. +  return mem_op(vcpu, gva, data, len, MEM_OP_WRITE);
  209. +}
  210. +
  211. +static u8 read_byte_opcode(struct kvm_vcpu *vcpu)
  212. +{
  213. +  unsigned long rip;
  214. +  u8 opcode;
  215. +  int ret;
  216. +
  217. +  rip = vmcs_readl(GUEST_RIP);
  218. +  ret = read_guest_mem(vcpu, rip, &opcode, sizeof(opcode));
  219. +  if(ret < 0)
  220. +    return 0;
  221. +  
  222. +  return opcode;
  223. +}
  224. +
  225. +static u32 read_stack_top(struct kvm_vcpu *vcpu)
  226. +{
  227. +  unsigned long rsp;
  228. +  u32 stack_top;
  229. +  int ret;
  230. +
  231. +  rsp = vmcs_readl(GUEST_RSP);
  232. +  ret = read_guest_mem(vcpu, rsp, &stack_top, sizeof(stack_top));
  233. +  if(ret < 0)
  234. +    return 0;
  235. +  
  236. +  return stack_top;
  237. +}
  238. +
  239. +static void write_stack_top(struct kvm_vcpu *vcpu, u32 stack_top)
  240. +{
  241. +  unsigned long rsp;
  242. +  
  243. +  rsp = vmcs_readl(GUEST_RSP);
  244. +  write_guest_mem(vcpu, rsp, &stack_top, sizeof(stack_top));
  245. +}
  246. +
  247. +static void process_op_pushf(struct kvm_vcpu *vcpu)
  248. +{
  249. +  unsigned long rip;
  250. +  u32 stack_top;
  251. +
  252. +  rip = vmcs_readl(GUEST_RIP);
  253. +  if(rip >= KERNEL_START)
  254. +    return;
  255. +  
  256. +  if(vcpu->azure.post_pushf_ip == rip){
  257. +    stack_top = read_stack_top(vcpu) & ~(X86_EFLAGS_TF);
  258. +    if(stack_top != 0)
  259. +      write_stack_top(vcpu, stack_top);
  260. +    vcpu->azure.post_pushf_ip = 0;
  261. +  }
  262. +  
  263. +  if(read_byte_opcode(vcpu) == PUSHF_OPCODE)
  264. +    vcpu->azure.post_pushf_ip = rip + PUSHF_SIZE;
  265. +}
  266. +
  267. +static void process_op_popf(struct kvm_vcpu *vcpu){
  268. +  unsigned long rip;
  269. +  
  270. +  rip = vmcs_readl(GUEST_RIP);
  271. +  if(rip >= KERNEL_START)
  272. +    return;
  273. +  
  274. +  if(vcpu->azure.post_popf_ip == rip)
  275. +    vcpu->azure.popf_fwd_tf = 1;
  276. +
  277. +  if(read_byte_opcode(vcpu) == POPF_OPCODE)
  278. +    vcpu->azure.post_popf_ip = rip + POPF_SIZE;
  279. +}
  280. +
  281. +static void handle_debug_exception(struct kvm_vcpu *vcpu)
  282. +{
  283. +  unsigned long rip;
  284. +  
  285. +  rip = vmcs_readl(GUEST_RIP);
  286. +  if(rip == vcpu->azure.target_entrypoint)
  287. +    printk("azure: target entrypoint executed utc %ld\n", get_seconds());
  288. +    
  289. +  /* additional malware analysis code here */
  290. +  
  291. +}
  292. +
  293.  static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  294.  {
  295.     struct vcpu_vmx *vmx = to_vmx(vcpu);
  296. @@ -2269,8 +2405,17 @@ static int handle_exception(struct kvm_v
  297.  
  298.     if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
  299.         (INTR_TYPE_EXCEPTION | 1)) {
  300. -       kvm_run->exit_reason = KVM_EXIT_DEBUG;
  301. -       return 0;
  302. +       u8 popf_fwd_tf = vcpu->azure.popf_fwd_tf;
  303. +       vcpu->azure.popf_fwd_tf = 0;
  304. +       process_op_pushf(vcpu);
  305. +       process_op_popf(vcpu);
  306. +       handle_debug_exception(vcpu);
  307. +
  308. +       kvm_run->exit_reason = KVM_EXIT_DEBUG;     
  309. +       if(popf_fwd_tf)
  310. +         return 0;
  311. +       else
  312. +         return 1;
  313.     }
  314.     kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
  315.     kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
  316. @@ -2329,12 +2474,185 @@ vmx_patch_hypercall(struct kvm_vcpu *vcp
  317.     hypercall[2] = 0xc1;
  318.  }
  319.  
  320. +static gva_t read_msw_ethread_gva(struct kvm_vcpu *vcpu)
  321. +{
  322. +  unsigned long fs_base;
  323. +  u32 msw_ethread_gva;
  324. +  int ret;
  325. +
  326. +  fs_base = vmcs_readl(GUEST_FS_BASE);
  327. +  ret = read_guest_mem(vcpu, fs_base + FS_ETHREAD_OFFSET,
  328. +              &msw_ethread_gva, sizeof(msw_ethread_gva));
  329. +  if(ret < 0)
  330. +    return 0;
  331. +  
  332. +  return msw_ethread_gva;
  333. +}
  334. +
  335. +static gva_t read_msw_eprocess_gva(struct kvm_vcpu *vcpu)
  336. +{
  337. +  gva_t msw_ethread_gva;
  338. +  u32 msw_eprocess_gva;
  339. +  int ret;
  340. +
  341. +  msw_ethread_gva = read_msw_ethread_gva(vcpu);
  342. +  if(msw_ethread_gva == 0)
  343. +    return 0;
  344. +  
  345. +  ret = read_guest_mem(vcpu, msw_ethread_gva + ETHREAD_EPROCESS_OFFSET,
  346. +              &msw_eprocess_gva, sizeof(msw_eprocess_gva));
  347. +  if(ret < 0)
  348. +    return 0;
  349. +  
  350. +  return msw_eprocess_gva;
  351. +}
  352. +
  353. +static gva_t read_msw_peb_gva(struct kvm_vcpu *vcpu)
  354. +{
  355. +  gva_t msw_eprocess_gva;
  356. +  u32 msw_peb_gva;
  357. +  int ret;
  358. +
  359. +  msw_eprocess_gva = read_msw_eprocess_gva(vcpu);
  360. +  if(msw_eprocess_gva == 0)
  361. +    return 0;
  362. +  
  363. +  ret = read_guest_mem(vcpu, msw_eprocess_gva + EPROCESS_PEB_OFFSET,
  364. +              &msw_peb_gva, sizeof(msw_peb_gva));
  365. +  if(ret < 0)
  366. +    return 0;
  367. +
  368. +  return msw_peb_gva;
  369. +}
  370. +
  371. +static int read_msw_peb(struct kvm_vcpu *vcpu, struct msw_peb *msw_peb_p)
  372. +{
  373. +  gva_t msw_peb_gva;
  374. +  int ret;
  375. +
  376. +  msw_peb_gva = read_msw_peb_gva(vcpu);
  377. +  if(msw_peb_gva == 0)
  378. +    return -1;
  379. +
  380. +  ret = read_guest_mem(vcpu, msw_peb_gva, msw_peb_p, sizeof(*msw_peb_p));
  381. +  if(ret < 0)
  382. +    return -1;
  383. +  
  384. +  return 0;
  385. +}
  386. +
  387. +static int read_msw_peb_ldr_data(struct kvm_vcpu *vcpu,
  388. +                gva_t msw_peb_ldr_data_gva,
  389. +                struct msw_peb_ldr_data *msw_peb_ldr_data_p)
  390. +{
  391. +  int ret;
  392. +  
  393. +  if(msw_peb_ldr_data_gva == 0)
  394. +    return -1;
  395. +  
  396. +  ret = read_guest_mem(vcpu, msw_peb_ldr_data_gva,
  397. +              msw_peb_ldr_data_p, sizeof(*msw_peb_ldr_data_p));
  398. +  if(ret < 0)
  399. +    return -1;
  400. +  
  401. +  return 0;
  402. +}
  403. +
  404. +static int read_msw_ldr_mod(struct kvm_vcpu *vcpu, gva_t msw_ldr_mod_gva,
  405. +                            struct msw_ldr_mod *msw_ldr_mod_p)
  406. +{
  407. +  int ret;
  408. +
  409. +  if(msw_ldr_mod_gva == 0)
  410. +    return -1;
  411. +  
  412. +  ret = read_guest_mem(vcpu, msw_ldr_mod_gva,
  413. +              msw_ldr_mod_p, sizeof(*msw_ldr_mod_p));
  414. +  if(ret < 0)
  415. +    return -1;
  416. +  
  417. +  return 0;
  418. +}
  419. +
  420. +static void read_target_info(struct kvm_vcpu *vcpu, u32 cr3)
  421. +{
  422. +  struct msw_peb msw_peb;
  423. +  struct msw_peb_ldr_data msw_peb_ldr_data;
  424. +  struct msw_ldr_mod msw_ldr_mod;
  425. +  int ret;
  426. +
  427. +  if(vcpu->azure.target_cr3 == 0)
  428. +    vcpu->azure.target_cr3 = cr3;
  429. +
  430. +  if(vcpu->azure.target_entrypoint != 0)
  431. +    return;
  432. +
  433. +  ret = read_msw_peb(vcpu, &msw_peb);
  434. +  if(ret < 0)
  435. +    return;
  436. +  ret = read_msw_peb_ldr_data(vcpu, msw_peb.loader_data, &msw_peb_ldr_data);
  437. +  if(ret < 0)
  438. +    return;
  439. +  ret = read_msw_ldr_mod(vcpu, msw_peb_ldr_data.in_ld_order_mod_list.flink,
  440. +                         &msw_ldr_mod);
  441. +  if(ret < 0)
  442. +    return;
  443. +
  444. +  if(msw_ldr_mod.base_address == 0)
  445. +    return;
  446. +  
  447. +  vcpu->azure.target_entrypoint = msw_ldr_mod.entrypoint;
  448. +  vcpu->azure.target_base = msw_ldr_mod.base_address;
  449. +  vcpu->azure.target_size = msw_ldr_mod.size_of_image;
  450. +
  451. +  printk("azure: peb info available utc %ld\n", get_seconds());
  452. +  printk("azure: imagebase (msw_ldr_mod) 0x%x\n", msw_ldr_mod.base_address);
  453. +  printk("azure: imagesize (msw_ldr_mod) 0x%x\n", msw_ldr_mod.size_of_image);
  454. +  printk("azure: entrypoint (msw_ldr_mod) 0x%x\n", msw_ldr_mod.entrypoint);
  455. +}
  456. +
  457. +static void read_msw_image_name(struct kvm_vcpu *vcpu, char *image_name_p)
  458. +{
  459. +  gva_t msw_eprocess_gva;
  460. +  
  461. +  memset(image_name_p, 0, IMAGE_NAME_LEN);
  462. +
  463. +  msw_eprocess_gva = read_msw_eprocess_gva(vcpu);
  464. +  if(msw_eprocess_gva == 0)
  465. +    return;
  466. +  
  467. +  read_guest_mem(vcpu, msw_eprocess_gva + EPROCESS_IMAGE_NAME_OFFSET,
  468. +        image_name_p, IMAGE_NAME_LEN - 1);
  469. +}
  470. +
  471. +static uint8_t is_target_process(struct kvm_vcpu *vcpu, u32 cr3)
  472. +{
  473. +  char image_name[IMAGE_NAME_LEN];
  474. +
  475. +  if(vcpu->azure.target_cr3 != 0){
  476. +    if(vcpu->azure.target_cr3 == cr3)
  477. +      return 1;
  478. +    else
  479. +      return 0;
  480. +  }
  481. +
  482. +  read_msw_image_name(vcpu, image_name);
  483. +  if(strcmp(image_name, TARGET_NAME) == 0){
  484. +    printk("azure: target located utc %ld\n", get_seconds());
  485. +    printk("azure: target cr3 0x%x\n", cr3);
  486. +    return 1;
  487. +  }
  488. +  else{
  489. +    return 0;
  490. +  }
  491. +}
  492. +
  493.  static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  494.  {
  495.     unsigned long exit_qualification;
  496.     int cr;
  497.     int reg;
  498. -
  499. +  
  500.     exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  501.     cr = exit_qualification & 15;
  502.     reg = (exit_qualification >> 8) & 15;
  503. @@ -2352,6 +2670,16 @@ static int handle_cr(struct kvm_vcpu *vc
  504.             vcpu_load_rsp_rip(vcpu);
  505.             kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
  506.             skip_emulated_instruction(vcpu);
  507. +           if(vcpu->azure.enabled == 1){
  508. +             if(is_target_process(vcpu, vcpu->arch.regs[reg])){
  509. +               read_target_info(vcpu, vcpu->arch.regs[reg]);
  510. +               vcpu->azure.set_tf = 1;
  511. +               vcpu->azure.prv_set_tf = 1;
  512. +             }
  513. +             else{
  514. +               vcpu->azure.set_tf = 0;
  515. +             }
  516. +           }
  517.             return 1;
  518.         case 4:
  519.             vcpu_load_rsp_rip(vcpu);
  520. @@ -2490,6 +2818,12 @@ static int handle_wrmsr(struct kvm_vcpu
  521.     }
  522.  
  523.     skip_emulated_instruction(vcpu);
  524. +  
  525. +   if(vcpu->azure.enabled == 0){
  526. +     vcpu->azure.enabled = 1;
  527. +     printk("azure: enabled\n");
  528. +   }
  529. +  
  530.     return 1;
  531.  }
  532.  
  533. @@ -2816,11 +3150,46 @@ static void fixup_rmode_irq(struct vcpu_
  534.         | vmx->rmode.irq.vector;
  535.  }
  536.  
  537. +static void unset_hlt_movss_intrblt(void){
  538. +  u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  539. +  if(interruptibility & 3){
  540. +    interruptibility &= ~(3);
  541. +    vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
  542. +  }
  543. +}
  544. +
  545. +static void set_guest_tf(struct kvm_vcpu *vcpu){
  546. +  unsigned long flags;
  547. +
  548. +  flags = vmcs_readl(GUEST_RFLAGS);
  549. +  flags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  550. +  vmcs_writel(GUEST_RFLAGS, flags);
  551. +  unset_hlt_movss_intrblt();
  552. +  update_exception_bitmap(vcpu);
  553. +}
  554. +
  555. +static void unset_guest_tf(struct kvm_vcpu *vcpu){
  556. +  unsigned long flags;
  557. +
  558. +  flags = vmcs_readl(GUEST_RFLAGS);
  559. +  flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  560. +  vmcs_writel(GUEST_RFLAGS, flags);
  561. +  update_exception_bitmap(vcpu);
  562. +}
  563. +
  564.  static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  565.  {
  566.     struct vcpu_vmx *vmx = to_vmx(vcpu);
  567.     u32 intr_info;
  568.  
  569. +   if(vcpu->azure.set_tf == 1)
  570. +     set_guest_tf(vcpu);
  571. +  
  572. +   if(vcpu->azure.set_tf == 0 && vcpu->azure.prv_set_tf == 1){
  573. +     unset_guest_tf(vcpu);
  574. +     vcpu->azure.prv_set_tf = 0;
  575. +   }
  576. +
  577.     /*
  578.      * Loading guest fpu may have cleared host cr0.ts
  579.      */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement