Advertisement
Guest User

debug_register.patch

a guest
Jun 3rd, 2014
319
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 11.80 KB | None | 0 0
  1. When not running in guest-debug mode (i.e. the guest controls the debug
  2. registers, having to take an exit for each DR access is a waste of time.
  3. If the guest gets into a state where each context switch causes DR to be
  4. saved and restored, this can take away as much as 40% of the execution
  5. time from the guest.
  6.  
  7. If the guest is running with vcpu->arch.db == vcpu->arch.eff_db, we
  8. can let it write freely to the debug registers and reload them on the
  9. next exit.  We still need to exit on the first access, so that the
  10. KVM_DEBUGREG_WONT_EXIT flag is set in switch_db_regs; after that, further
  11. accesses to the debug registers will not cause a vmexit.
  12.  
  13.  
  14. ---
  15. Unlike other intercepts, debug register intercepts will be modified
  16. in hot paths if the guest OS is bad or otherwise gets tricked into
  17. doing so.
  18.  
  19. Avoid calling recalc_intercepts 16 times for debug registers.
  20.  
  21.  
  22. ---
  23. When not running in guest-debug mode (i.e. the guest controls the debug
  24. registers, having to take an exit for each DR access is a waste of time.
  25. If the guest gets into a state where each context switch causes DR to be
  26. saved and restored, this can take away as much as 40% of the execution
  27. time from the guest.
  28.  
  29. If the guest is running with vcpu->arch.db == vcpu->arch.eff_db, we
  30. can let it write freely to the debug registers and reload them on the
  31. next exit.  We still need to exit on the first access, so that the
  32. KVM_DEBUGREG_WONT_EXIT flag is set in switch_db_regs; after that, further
  33. accesses to the debug registers will not cause a vmexit.
  34.  
  35.  
  36. ---
  37. When preparing the VMCS02, the CPU-based execution controls is computed
  38. by vmx_exec_control.  Turn off DR access exits there, too, if the
  39. KVM_DEBUGREG_WONT_EXIT bit is set in switch_db_regs.
  40.  
  41.  
  42. ---
  43. When not running in guest-debug mode, the guest controls the debug
  44. registers and having to take an exit for each DR access is a waste
  45. of time.  If the guest gets into a state where each context switch
  46. causes DR to be saved and restored, this can take away as much as 40%
  47. of the execution time from the guest.
  48.  
  49. After this patch, VMX- and SVM-specific code can set a flag in
  50. switch_db_regs, telling vcpu_enter_guest that on the next exit the debug
  51. registers might be dirty and need to be reloaded (syncing will be taken
  52. care of by a new callback in kvm_x86_ops).  This flag can be set on the
  53. first access to a debug registers, so that multiple accesses to the
  54. debug registers only cause one vmexit.
  55.  
  56. Note that since the guest will be able to read debug registers and
  57. enable breakpoints in DR7, we need to ensure that they are synchronized
  58. on entry to the guest---including DR6 that was not synced before.
  59.  
  60.  
  61. ---
  62. Currently, this works even if the bit is not in "min", because the bit is always
  63. set in MSR_IA32_VMX_ENTRY_CTLS.  Mention it for the sake of documentation, and
  64. to avoid surprises if we later switch to MSR_IA32_VMX_TRUE_ENTRY_CTLS.
  65.  
  66. ---
  67. --- a/arch/x86/include/asm/kvm_host.h   2014-03-31 03:40:15.000000000 +0000
  68. +++ b/arch/x86/include/asm/kvm_host.h   2014-06-03 23:12:45.827059438 +0000
  69. @@ -337,6 +337,11 @@
  70.     u64 reprogram_pmi;
  71.  };
  72.  
  73. +enum {
  74. +   KVM_DEBUGREG_BP_ENABLED = 1,
  75. +   KVM_DEBUGREG_WONT_EXIT = 2,
  76. +};
  77. +
  78.  struct kvm_vcpu_arch {
  79.     /*
  80.      * rip and regs accesses must go through
  81. @@ -464,7 +469,7 @@
  82.     struct mtrr_state_type mtrr_state;
  83.     u32 pat;
  84.  
  85. -   int switch_db_regs;
  86. +   unsigned switch_db_regs;
  87.     unsigned long db[KVM_NR_DB_REGS];
  88.     unsigned long dr6;
  89.     unsigned long dr7;
  90. @@ -702,6 +707,7 @@
  91.     void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
  92.     u64 (*get_dr6)(struct kvm_vcpu *vcpu);
  93.     void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
  94. +   void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
  95.     void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
  96.     void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
  97.     unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
  98.  
  99.    
  100.    
  101. --- a/arch/x86/kvm/svm.c    2014-03-31 03:40:15.000000000 +0000
  102. +++ b/arch/x86/kvm/svm.c    2014-06-03 23:03:43.500052410 +0000
  103. @@ -303,23 +303,38 @@
  104.     return vmcb->control.intercept_cr & (1U << bit);
  105.  }
  106.  
  107. -static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
  108. -{
  109. -   struct vmcb *vmcb = get_host_vmcb(svm);
  110. -
  111. -   vmcb->control.intercept_dr |= (1U << bit);
  112. -
  113. -   recalc_intercepts(svm);
  114. -}
  115. -
  116. -static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
  117. -{
  118. -   struct vmcb *vmcb = get_host_vmcb(svm);
  119. -
  120. -   vmcb->control.intercept_dr &= ~(1U << bit);
  121. -
  122. -   recalc_intercepts(svm);
  123. -}
  124. +static inline void set_dr_intercepts(struct vcpu_svm *svm)
  125. + {
  126. +   struct vmcb *vmcb = get_host_vmcb(svm);
  127. +
  128. +   vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
  129. +       | (1 << INTERCEPT_DR1_READ)
  130. +       | (1 << INTERCEPT_DR2_READ)
  131. +       | (1 << INTERCEPT_DR3_READ)
  132. +       | (1 << INTERCEPT_DR4_READ)
  133. +       | (1 << INTERCEPT_DR5_READ)
  134. +       | (1 << INTERCEPT_DR6_READ)
  135. +       | (1 << INTERCEPT_DR7_READ)
  136. +       | (1 << INTERCEPT_DR0_WRITE)
  137. +       | (1 << INTERCEPT_DR1_WRITE)
  138. +       | (1 << INTERCEPT_DR2_WRITE)
  139. +       | (1 << INTERCEPT_DR3_WRITE)
  140. +       | (1 << INTERCEPT_DR4_WRITE)
  141. +       | (1 << INTERCEPT_DR5_WRITE)
  142. +       | (1 << INTERCEPT_DR6_WRITE)
  143. +       | (1 << INTERCEPT_DR7_WRITE);
  144. +
  145. +   recalc_intercepts(svm);
  146. + }
  147. +
  148. +static inline void clr_dr_intercepts(struct vcpu_svm *svm)
  149. + {
  150. +   struct vmcb *vmcb = get_host_vmcb(svm);
  151. +
  152. +   vmcb->control.intercept_dr = 0;
  153. +
  154. +   recalc_intercepts(svm);
  155. + }
  156.  
  157.  static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
  158.  {
  159. @@ -1080,23 +1095,7 @@
  160.     set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
  161.     set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  162.  
  163. -   set_dr_intercept(svm, INTERCEPT_DR0_READ);
  164. -   set_dr_intercept(svm, INTERCEPT_DR1_READ);
  165. -   set_dr_intercept(svm, INTERCEPT_DR2_READ);
  166. -   set_dr_intercept(svm, INTERCEPT_DR3_READ);
  167. -   set_dr_intercept(svm, INTERCEPT_DR4_READ);
  168. -   set_dr_intercept(svm, INTERCEPT_DR5_READ);
  169. -   set_dr_intercept(svm, INTERCEPT_DR6_READ);
  170. -   set_dr_intercept(svm, INTERCEPT_DR7_READ);
  171. -
  172. -   set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
  173. -   set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
  174. -   set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
  175. -   set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
  176. -   set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
  177. -   set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
  178. -   set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
  179. -   set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
  180. +   set_dr_intercepts(svm);
  181.  
  182.     set_exception_intercept(svm, PF_VECTOR);
  183.     set_exception_intercept(svm, UD_VECTOR);
  184. @@ -1684,6 +1683,21 @@
  185.     mark_dirty(svm->vmcb, VMCB_DR);
  186.  }
  187.  
  188. +static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
  189. +{
  190. +   struct vcpu_svm *svm = to_svm(vcpu);
  191. +
  192. +   get_debugreg(vcpu->arch.db[0], 0);
  193. +   get_debugreg(vcpu->arch.db[1], 1);
  194. +   get_debugreg(vcpu->arch.db[2], 2);
  195. +   get_debugreg(vcpu->arch.db[3], 3);
  196. +   vcpu->arch.dr6 = svm_get_dr6(vcpu);
  197. +   vcpu->arch.dr7 = svm->vmcb->save.dr7;
  198. +
  199. +   vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
  200. +   set_dr_intercepts(svm);
  201. +}
  202. +
  203.  static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
  204.  {
  205.     struct vcpu_svm *svm = to_svm(vcpu);
  206. @@ -2973,6 +2987,16 @@
  207.     int reg, dr;
  208.     unsigned long val;
  209.     int err;
  210. +   if (svm->vcpu.guest_debug == 0) {
  211. +       /*
  212. +        * No more DR vmexits; force a reload of the debug registers
  213. +        * and reenter on this instruction.  The next vmexit will
  214. +        * retrieve the full state of the debug registers.
  215. +        */
  216. +       clr_dr_intercepts(svm);
  217. +       svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
  218. +       return 1;
  219. +   }
  220.  
  221.     if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
  222.         return emulate_on_interception(svm);
  223. @@ -4302,6 +4326,7 @@
  224.     .get_dr6 = svm_get_dr6,
  225.     .set_dr6 = svm_set_dr6,
  226.     .set_dr7 = svm_set_dr7,
  227. +   .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
  228.     .cache_reg = svm_cache_reg,
  229.     .get_rflags = svm_get_rflags,
  230.     .set_rflags = svm_set_rflags,
  231.  
  232.    
  233.  --- a/arch/x86/kvm/vmx.c   2014-03-31 03:40:15.000000000 +0000
  234. +++ b/arch/x86/kvm/vmx.c    2014-06-03 22:54:01.928044874 +0000
  235. @@ -2832,7 +2832,7 @@
  236.               vmx_capability.ept, vmx_capability.vpid);
  237.     }
  238.  
  239. -   min = 0;
  240. +   min = VM_EXIT_SAVE_DEBUG_CONTROLS;
  241.  #ifdef CONFIG_X86_64
  242.     min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
  243.  #endif
  244. @@ -2853,7 +2853,7 @@
  245.         !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
  246.         _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
  247.  
  248. -   min = 0;
  249. +   min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
  250.     opt = VM_ENTRY_LOAD_IA32_PAT;
  251.     if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
  252.                 &_vmentry_control) < 0)
  253. @@ -4223,6 +4223,10 @@
  254.  static u32 vmx_exec_control(struct vcpu_vmx *vmx)
  255.  {
  256.     u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
  257. +
  258. +   if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
  259. +       exec_control &= ~CPU_BASED_MOV_DR_EXITING;
  260. +
  261.     if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
  262.         exec_control &= ~CPU_BASED_TPR_SHADOW;
  263.  #ifdef CONFIG_X86_64
  264. @@ -5101,6 +5105,21 @@
  265.             return 1;
  266.         }
  267.     }
  268. +   if (vcpu->guest_debug == 0) {
  269. +       u32 cpu_based_vm_exec_control;
  270. +
  271. +       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  272. +       cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
  273. +       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  274. +
  275. +       /*
  276. +        * No more DR vmexits; force a reload of the debug registers
  277. +        * and reenter on this instruction.  The next vmexit will
  278. +        * retrieve the full state of the debug registers.
  279. +        */
  280. +       vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
  281. +       return 1;
  282. +   }
  283.  
  284.     exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  285.     dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
  286. @@ -5128,6 +5147,24 @@
  287.  {
  288.  }
  289.  
  290. +static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
  291. +{
  292. +   u32 cpu_based_vm_exec_control;
  293. +
  294. +   get_debugreg(vcpu->arch.db[0], 0);
  295. +   get_debugreg(vcpu->arch.db[1], 1);
  296. +   get_debugreg(vcpu->arch.db[2], 2);
  297. +   get_debugreg(vcpu->arch.db[3], 3);
  298. +   get_debugreg(vcpu->arch.dr6, 6);
  299. +   vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
  300. +
  301. +   vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
  302. +
  303. +   cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  304. +   cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
  305. +   vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  306. +}
  307. +
  308.  static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
  309.  {
  310.     vmcs_writel(GUEST_DR7, val);
  311. @@ -8573,6 +8610,7 @@
  312.     .get_dr6 = vmx_get_dr6,
  313.     .set_dr6 = vmx_set_dr6,
  314.     .set_dr7 = vmx_set_dr7,
  315. +   .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
  316.     .cache_reg = vmx_cache_reg,
  317.     .get_rflags = vmx_get_rflags,
  318.     .set_rflags = vmx_set_rflags,
  319.  
  320.    
  321.  --- a/arch/x86/kvm/x86.c   2014-03-31 03:40:15.000000000 +0000
  322. +++ b/arch/x86/kvm/x86.c    2014-06-03 23:10:14.892057482 +0000
  323. @@ -753,7 +753,9 @@
  324.     else
  325.         dr7 = vcpu->arch.dr7;
  326.     kvm_x86_ops->set_dr7(vcpu, dr7);
  327. -   vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
  328. +   vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
  329. +   if (dr7 & DR7_BP_EN_MASK)
  330. +       vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
  331.  }
  332.  
  333.  static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  334. @@ -5992,12 +5994,28 @@
  335.         set_debugreg(vcpu->arch.eff_db[1], 1);
  336.         set_debugreg(vcpu->arch.eff_db[2], 2);
  337.         set_debugreg(vcpu->arch.eff_db[3], 3);
  338. +       set_debugreg(vcpu->arch.dr6, 6);
  339.     }
  340.  
  341.     trace_kvm_entry(vcpu->vcpu_id);
  342.     kvm_x86_ops->run(vcpu);
  343.  
  344.     /*
  345. +    * Do this here before restoring debug registers on the host.  And
  346. +    * since we do this before handling the vmexit, a DR access vmexit
  347. +    * can (a) read the correct value of the debug registers, (b) set
  348. +    * KVM_DEBUGREG_WONT_EXIT again.
  349. +    */
  350. +   if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
  351. +       int i;
  352. +
  353. +       WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
  354. +       kvm_x86_ops->sync_dirty_debug_regs(vcpu);
  355. +       for (i = 0; i < KVM_NR_DB_REGS; i++)
  356. +           vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  357. +   }
  358. +
  359. +   /*
  360.      * If the guest has used debug registers, at least dr7
  361.      * will be disabled while returning to the host.
  362.      * If we don't have active breakpoints in the host, we don't
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement