Advertisement
jintack

Untitled

Jun 13th, 2016
107
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 7.95 KB | None | 0 0
  1. diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
  2. index 81f18ed..b9ce476 100644
  3. --- a/arch/arm/kvm/arm.c
  4. +++ b/arch/arm/kvm/arm.c
  5. @@ -143,6 +143,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  6.     /* Mark the initial VMID generation invalid */
  7.     kvm->arch.mmu.vmid.vmid_gen = 0;
  8.     kvm->arch.mmu.el2_vmid.vmid_gen = 0;
  9. +   INIT_LIST_HEAD(&kvm->arch.nested_mmu_list);
  10.  
  11.     /* The maximum number of VCPUs is limited by the host's GIC model */
  12.     kvm->arch.max_vcpus = vgic_present ?
  13. @@ -460,6 +461,8 @@ static void update_vttbr(struct kvm *kvm, struct kvm_s2_vmid *vmid)
  14.     /* update vttbr to be used with the new vmid */
  15.  
  16.     spin_unlock(&kvm_vmid_lock);
  17. +
  18. +   printk("new vmid: %d\n", vmid->vmid);
  19.  }
  20.  
  21.  static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
  22. @@ -472,6 +475,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
  23.  
  24.     vcpu->arch.has_run_once = true;
  25.  
  26. +   vcpu->arch.nested_mmu = NULL;
  27.     /*
  28.      * Map the VGIC hardware resources before running a vcpu the first
  29.      * time on this VM.
  30. diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
  31. index 0d3cb0e..9719517 100644
  32. --- a/arch/arm/kvm/mmu.c
  33. +++ b/arch/arm/kvm/mmu.c
  34. @@ -443,7 +443,8 @@ static void stage2_flush_vcpu(struct kvm_vcpu *vcpu)
  35.  
  36.  #ifdef CONFIG_KVM_ARM_NESTED_HYP
  37.     /* TODO: Do something more clever */
  38. -   kvm_stage2_flush_range(&vcpu->arch.nested_mmu,
  39. +   if (vcpu->arch.nested_mmu)
  40. +       kvm_stage2_flush_range(vcpu->arch.nested_mmu,
  41.                    0, KVM_PHYS_SIZE);
  42.  #endif
  43.  
  44. diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
  45. index a5bfadf..9ba46eb 100644
  46. --- a/arch/arm64/include/asm/kvm_emulate.h
  47. +++ b/arch/arm64/include/asm/kvm_emulate.h
  48. @@ -371,13 +371,27 @@ static inline bool nested_s2_mmu_enabled(struct kvm_vcpu *vcpu)
  49.     return vcpu->arch.ctxt.el2_regs[HCR_EL2] & HCR_VM;
  50.  }
  51.  
  52. +
  53. +static inline struct kvm_s2_mmu *get_nested_mmu(struct kvm_vcpu *vcpu)
  54. +{
  55. +   struct kvm_nested_s2_mmu *mmu;
  56. +   u64 shadow_vttbr = vcpu_el2_reg(vcpu, VTTBR_EL2);
  57. +   printk("%s target mmu vttbr_ba: %llx\n", __func__, shadow_vttbr & VTTBR_BADDR_MASK);
  58. +   list_for_each_entry_rcu(mmu, &vcpu->kvm->arch.nested_mmu_list, list) {
  59. +       printk("%llx %s nested_mmu in list: vttbr_ba: %llx\n", (u64)mmu, __func__, (u64)mmu->vttbr_ba);
  60. +       if (mmu->vttbr_ba == (shadow_vttbr & VTTBR_BADDR_MASK))
  61. +           return (struct kvm_s2_mmu*)mmu;
  62. +   }
  63. +   return NULL;
  64. +}
  65. +
  66.  static inline struct kvm_s2_mmu *vcpu_get_active_s2_mmu(struct kvm_vcpu *vcpu)
  67.  {
  68.     if (unlikely(vcpu_mode_el2(vcpu)))
  69.         return &vcpu->kvm->arch.mmu;
  70.  
  71.     if (nested_s2_mmu_enabled(vcpu))
  72. -       return &vcpu->arch.nested_mmu;
  73. +       return get_nested_mmu(vcpu);
  74.     else
  75.         return &vcpu->kvm->arch.mmu;
  76.  }
  77. diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
  78. index aeb4862..af45d4c 100644
  79. --- a/arch/arm64/include/asm/kvm_host.h
  80. +++ b/arch/arm64/include/asm/kvm_host.h
  81. @@ -66,6 +66,14 @@ struct kvm_s2_mmu {
  82.     u64    vttbr;
  83.  };
  84.  
  85. +struct kvm_nested_s2_mmu {
  86. +   struct kvm_s2_mmu hw_mmu;
  87. +
  88. +   struct list_head list;
  89. +
  90. +   u64    vttbr_ba;
  91. +};
  92. +
  93.  struct kvm_arch {
  94.     /* Stage 2 paging state for the VM */
  95.     struct kvm_s2_mmu mmu;
  96. @@ -78,6 +86,9 @@ struct kvm_arch {
  97.  
  98.     /* Timer */
  99.     struct arch_timer_kvm   timer;
  100. +
  101. +   /* Stage 2 shadow paging contexts for nested L2 VM */
  102. +   struct list_head nested_mmu_list;
  103.  };
  104.  
  105.  #define KVM_NR_MEM_OBJS     40
  106. @@ -234,8 +245,8 @@ struct kvm_vcpu_arch {
  107.     /* Detect first run of a vcpu */
  108.     bool has_run_once;
  109.  
  110. -   /* Stage 2 shadow paging state for nested L2 VM */
  111. -   struct kvm_s2_mmu nested_mmu;
  112. +   /* Stage 2 paging context currently used for this vcpu */
  113. +   struct kvm_s2_mmu *nested_mmu;
  114.  
  115.     /* Stage 2 paging state used by the hardware on next switch */
  116.     struct kvm_s2_mmu *hw_mmu;
  117. diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
  118. index 7b3b183..69edb5f 100644
  119. --- a/arch/arm64/kvm/emulate.c
  120. +++ b/arch/arm64/kvm/emulate.c
  121. @@ -220,6 +220,13 @@ static void setup_s2_mmu(struct kvm_vcpu *vcpu)
  122.  
  123.     mmu->vttbr = kvm_get_vttbr(vmid, mmu);
  124.     vcpu->arch.hw_mmu = mmu;
  125. +
  126. +   /* should check condition? should it be here? */
  127. +   vcpu->arch.nested_mmu = mmu;
  128. +
  129. +   if ((!vcpu_mode_el2(vcpu)) && nested_s2_mmu_enabled(vcpu)) {
  130. +       printk("%s setup current_mmu : %llx, vmid: %d vttbr: %llx\n", __func__, (u64)mmu, vmid->vmid, mmu->vttbr);
  131. +   }
  132.  }
  133.  
  134.  /*
  135. diff --git a/arch/arm64/kvm/handle_exit_nested.c b/arch/arm64/kvm/handle_exit_nested.c
  136. index d99fae5..a629bb1 100644
  137. --- a/arch/arm64/kvm/handle_exit_nested.c
  138. +++ b/arch/arm64/kvm/handle_exit_nested.c
  139. @@ -115,11 +115,17 @@ static int handle_msr_pv(struct kvm_vcpu *vcpu)
  140.      * unmap the shadow page table only when vttbr_el2 is
  141.      * changed to different non-zero value
  142.      */
  143. -   if (sysregp == &vcpu->arch.ctxt.el2_regs[VTTBR_EL2])
  144. +   if (sysregp == &vcpu->arch.ctxt.el2_regs[VTTBR_EL2]) {
  145.         if (val && (val != prev_vttbr)) {
  146. +           struct kvm_nested_s2_mmu *nested_mmu;
  147.             kvm_nested_s2_unmap(vcpu);
  148.             prev_vttbr = val;
  149. +           nested_mmu = (struct kvm_nested_s2_mmu *)get_nested_mmu(vcpu);
  150. +           nested_mmu->vttbr_ba = val & VTTBR_BADDR_MASK;
  151. +           printk("%llx mmu vttbr is written. ba: %llx\n", (u64)nested_mmu, nested_mmu->vttbr_ba);
  152.         }
  153. +   }
  154. +
  155.  
  156.     *sysregp = val;
  157.  
  158. diff --git a/arch/arm64/kvm/mmu-nested.c b/arch/arm64/kvm/mmu-nested.c
  159. index c395800..6edadd2 100644
  160. --- a/arch/arm64/kvm/mmu-nested.c
  161. +++ b/arch/arm64/kvm/mmu-nested.c
  162. @@ -309,7 +309,7 @@ void kvm_nested_s2_all_vcpus_wp(struct kvm *kvm)
  163.     kvm_for_each_vcpu(i, vcpu, kvm) {
  164.         if (need_resched() || spin_needbreak(&kvm->mmu_lock))
  165.             cond_resched_lock(&kvm->mmu_lock);
  166. -       kvm_stage2_wp_range(kvm, &vcpu->arch.nested_mmu,
  167. +       kvm_stage2_wp_range(kvm, vcpu->arch.nested_mmu,
  168.                     0, KVM_PHYS_SIZE);
  169.     }
  170.  }
  171. @@ -323,7 +323,8 @@ void kvm_nested_s2_all_vcpus_unmap(struct kvm *kvm)
  172.     kvm_for_each_vcpu(i, vcpu, kvm) {
  173.         if (need_resched() || spin_needbreak(&kvm->mmu_lock))
  174.             cond_resched_lock(&kvm->mmu_lock);
  175. -       kvm_unmap_stage2_range(&vcpu->arch.nested_mmu,
  176. +       if (vcpu->arch.nested_mmu)
  177. +           kvm_unmap_stage2_range(vcpu->arch.nested_mmu,
  178.                        0, KVM_PHYS_SIZE);
  179.     }
  180.  }
  181. @@ -336,25 +337,40 @@ void kvm_nested_s2_all_vcpus_flush(struct kvm *kvm)
  182.     kvm_for_each_vcpu(i, vcpu, kvm) {
  183.         if (need_resched() || spin_needbreak(&kvm->mmu_lock))
  184.             cond_resched_lock(&kvm->mmu_lock);
  185. -       kvm_stage2_flush_range(&vcpu->arch.nested_mmu,
  186. +       if (vcpu->arch.nested_mmu)
  187. +           kvm_stage2_flush_range(vcpu->arch.nested_mmu,
  188.                        0, KVM_PHYS_SIZE);
  189.     }
  190.  }
  191.  
  192.  void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu)
  193.  {
  194. -   kvm_unmap_stage2_range(&vcpu->arch.nested_mmu, 0, KVM_PHYS_SIZE);
  195. +   if (vcpu->arch.nested_mmu)
  196. +       kvm_unmap_stage2_range(vcpu->arch.nested_mmu, 0, KVM_PHYS_SIZE);
  197.  }
  198.  
  199.  int kvm_nested_s2_init(struct kvm_vcpu *vcpu)
  200.  {
  201. -   vcpu->arch.nested_mmu.vmid.vmid_gen = 0;
  202. -   return __kvm_alloc_stage2_pgd(&vcpu->arch.nested_mmu);
  203. +   /* should we check if s2 is already existing */
  204. +   int ret;
  205. +   struct kvm_nested_s2_mmu *nested_s2_mmu;
  206. +
  207. +   nested_s2_mmu = kzalloc(sizeof(struct kvm_nested_s2_mmu), GFP_KERNEL);
  208. +   nested_s2_mmu->hw_mmu.vmid.vmid_gen = 0;
  209. +   ret = __kvm_alloc_stage2_pgd(&nested_s2_mmu->hw_mmu);
  210. +   if (!ret) {
  211. +       list_add_rcu(&nested_s2_mmu->list, &vcpu->kvm->arch.nested_mmu_list);
  212. +       printk("%llx %s added pgd: %llx\n", (u64)nested_s2_mmu, __func__, (u64)nested_s2_mmu->hw_mmu.pgd);
  213. +   }
  214. +
  215. +   return ret;
  216.  }
  217.  
  218.  void kvm_nested_s2_teardown(struct kvm_vcpu *vcpu)
  219.  {
  220. -   __kvm_free_stage2_pgd(&vcpu->arch.nested_mmu);
  221. +   /* TODO: remove mmu from list */
  222. +   if (vcpu->arch.nested_mmu)
  223. +       __kvm_free_stage2_pgd(vcpu->arch.nested_mmu);
  224.  }
  225.  
  226.  /* Ideally, this value should come from QEMU ioctl */
  227. @@ -366,7 +382,7 @@ int kvm_nested_mmio_ondemand(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
  228.     phys_addr_t vcpu_base = vgic_vcpu_base();
  229.  
  230.     if (ipa == NESTED_VCPU_IF_ADDR)  {
  231. -       ret = __kvm_phys_addr_ioremap(vcpu->kvm, &vcpu->arch.nested_mmu,
  232. +       ret = __kvm_phys_addr_ioremap(vcpu->kvm, vcpu->arch.nested_mmu,
  233.                 fault_ipa, vcpu_base, KVM_VGIC_V2_CPU_SIZE, true);
  234.         if (!ret)
  235.             ret = 1;
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement