Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
- index 81f18ed..b9ce476 100644
- --- a/arch/arm/kvm/arm.c
- +++ b/arch/arm/kvm/arm.c
- @@ -143,6 +143,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
- /* Mark the initial VMID generation invalid */
- kvm->arch.mmu.vmid.vmid_gen = 0;
- kvm->arch.mmu.el2_vmid.vmid_gen = 0;
- + INIT_LIST_HEAD(&kvm->arch.nested_mmu_list);
- /* The maximum number of VCPUs is limited by the host's GIC model */
- kvm->arch.max_vcpus = vgic_present ?
- @@ -460,6 +461,8 @@ static void update_vttbr(struct kvm *kvm, struct kvm_s2_vmid *vmid)
- /* update vttbr to be used with the new vmid */
- spin_unlock(&kvm_vmid_lock);
- +
- + printk("new vmid: %d\n", vmid->vmid);
- }
- static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
- @@ -472,6 +475,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
- vcpu->arch.has_run_once = true;
- + vcpu->arch.nested_mmu = NULL;
- /*
- * Map the VGIC hardware resources before running a vcpu the first
- * time on this VM.
- diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
- index 0d3cb0e..9719517 100644
- --- a/arch/arm/kvm/mmu.c
- +++ b/arch/arm/kvm/mmu.c
- @@ -443,7 +443,8 @@ static void stage2_flush_vcpu(struct kvm_vcpu *vcpu)
- #ifdef CONFIG_KVM_ARM_NESTED_HYP
- /* TODO: Do something more clever */
- - kvm_stage2_flush_range(&vcpu->arch.nested_mmu,
- + if (vcpu->arch.nested_mmu)
- + kvm_stage2_flush_range(vcpu->arch.nested_mmu,
- 0, KVM_PHYS_SIZE);
- #endif
- diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
- index a5bfadf..9ba46eb 100644
- --- a/arch/arm64/include/asm/kvm_emulate.h
- +++ b/arch/arm64/include/asm/kvm_emulate.h
- @@ -371,13 +371,27 @@ static inline bool nested_s2_mmu_enabled(struct kvm_vcpu *vcpu)
- return vcpu->arch.ctxt.el2_regs[HCR_EL2] & HCR_VM;
- }
- +
- +static inline struct kvm_s2_mmu *get_nested_mmu(struct kvm_vcpu *vcpu)
- +{
- + struct kvm_nested_s2_mmu *mmu;
- + u64 shadow_vttbr = vcpu_el2_reg(vcpu, VTTBR_EL2);
- + printk("%s target mmu vttbr_ba: %llx\n", __func__, shadow_vttbr & VTTBR_BADDR_MASK);
- + list_for_each_entry_rcu(mmu, &vcpu->kvm->arch.nested_mmu_list, list) {
- + printk("%llx %s nested_mmu in list: vttbr_ba: %llx\n", (u64)mmu, __func__, (u64)mmu->vttbr_ba);
- + if (mmu->vttbr_ba == (shadow_vttbr & VTTBR_BADDR_MASK))
- + return (struct kvm_s2_mmu*)mmu;
- + }
- + return NULL;
- +}
- +
- static inline struct kvm_s2_mmu *vcpu_get_active_s2_mmu(struct kvm_vcpu *vcpu)
- {
- if (unlikely(vcpu_mode_el2(vcpu)))
- return &vcpu->kvm->arch.mmu;
- if (nested_s2_mmu_enabled(vcpu))
- - return &vcpu->arch.nested_mmu;
- + return get_nested_mmu(vcpu);
- else
- return &vcpu->kvm->arch.mmu;
- }
- diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
- index aeb4862..af45d4c 100644
- --- a/arch/arm64/include/asm/kvm_host.h
- +++ b/arch/arm64/include/asm/kvm_host.h
- @@ -66,6 +66,14 @@ struct kvm_s2_mmu {
- u64 vttbr;
- };
- +struct kvm_nested_s2_mmu {
- + struct kvm_s2_mmu hw_mmu;
- +
- + struct list_head list;
- +
- + u64 vttbr_ba;
- +};
- +
- struct kvm_arch {
- /* Stage 2 paging state for the VM */
- struct kvm_s2_mmu mmu;
- @@ -78,6 +86,9 @@ struct kvm_arch {
- /* Timer */
- struct arch_timer_kvm timer;
- +
- + /* Stage 2 shadow paging contexts for nested L2 VM */
- + struct list_head nested_mmu_list;
- };
- #define KVM_NR_MEM_OBJS 40
- @@ -234,8 +245,8 @@ struct kvm_vcpu_arch {
- /* Detect first run of a vcpu */
- bool has_run_once;
- - /* Stage 2 shadow paging state for nested L2 VM */
- - struct kvm_s2_mmu nested_mmu;
- + /* Stage 2 paging context currently used for this vcpu */
- + struct kvm_s2_mmu *nested_mmu;
- /* Stage 2 paging state used by the hardware on next switch */
- struct kvm_s2_mmu *hw_mmu;
- diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
- index 7b3b183..69edb5f 100644
- --- a/arch/arm64/kvm/emulate.c
- +++ b/arch/arm64/kvm/emulate.c
- @@ -220,6 +220,13 @@ static void setup_s2_mmu(struct kvm_vcpu *vcpu)
- mmu->vttbr = kvm_get_vttbr(vmid, mmu);
- vcpu->arch.hw_mmu = mmu;
- +
- + /* should check condition? should it be here? */
- + vcpu->arch.nested_mmu = mmu;
- +
- + if ((!vcpu_mode_el2(vcpu)) && nested_s2_mmu_enabled(vcpu)) {
- + printk("%s setup current_mmu : %llx, vmid: %d vttbr: %llx\n", __func__, (u64)mmu, vmid->vmid, mmu->vttbr);
- + }
- }
- /*
- diff --git a/arch/arm64/kvm/handle_exit_nested.c b/arch/arm64/kvm/handle_exit_nested.c
- index d99fae5..a629bb1 100644
- --- a/arch/arm64/kvm/handle_exit_nested.c
- +++ b/arch/arm64/kvm/handle_exit_nested.c
- @@ -115,11 +115,17 @@ static int handle_msr_pv(struct kvm_vcpu *vcpu)
- * unmap the shadow page table only when vttbr_el2 is
- * changed to different non-zero value
- */
- - if (sysregp == &vcpu->arch.ctxt.el2_regs[VTTBR_EL2])
- + if (sysregp == &vcpu->arch.ctxt.el2_regs[VTTBR_EL2]) {
- if (val && (val != prev_vttbr)) {
- + struct kvm_nested_s2_mmu *nested_mmu;
- kvm_nested_s2_unmap(vcpu);
- prev_vttbr = val;
- + nested_mmu = (struct kvm_nested_s2_mmu *)get_nested_mmu(vcpu);
- + nested_mmu->vttbr_ba = val & VTTBR_BADDR_MASK;
- + printk("%llx mmu vttbr is written. ba: %llx\n", (u64)nested_mmu, nested_mmu->vttbr_ba);
- }
- + }
- +
- *sysregp = val;
- diff --git a/arch/arm64/kvm/mmu-nested.c b/arch/arm64/kvm/mmu-nested.c
- index c395800..6edadd2 100644
- --- a/arch/arm64/kvm/mmu-nested.c
- +++ b/arch/arm64/kvm/mmu-nested.c
- @@ -309,7 +309,7 @@ void kvm_nested_s2_all_vcpus_wp(struct kvm *kvm)
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (need_resched() || spin_needbreak(&kvm->mmu_lock))
- cond_resched_lock(&kvm->mmu_lock);
- - kvm_stage2_wp_range(kvm, &vcpu->arch.nested_mmu,
- + kvm_stage2_wp_range(kvm, vcpu->arch.nested_mmu,
- 0, KVM_PHYS_SIZE);
- }
- }
- @@ -323,7 +323,8 @@ void kvm_nested_s2_all_vcpus_unmap(struct kvm *kvm)
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (need_resched() || spin_needbreak(&kvm->mmu_lock))
- cond_resched_lock(&kvm->mmu_lock);
- - kvm_unmap_stage2_range(&vcpu->arch.nested_mmu,
- + if (vcpu->arch.nested_mmu)
- + kvm_unmap_stage2_range(vcpu->arch.nested_mmu,
- 0, KVM_PHYS_SIZE);
- }
- }
- @@ -336,25 +337,40 @@ void kvm_nested_s2_all_vcpus_flush(struct kvm *kvm)
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (need_resched() || spin_needbreak(&kvm->mmu_lock))
- cond_resched_lock(&kvm->mmu_lock);
- - kvm_stage2_flush_range(&vcpu->arch.nested_mmu,
- + if (vcpu->arch.nested_mmu)
- + kvm_stage2_flush_range(vcpu->arch.nested_mmu,
- 0, KVM_PHYS_SIZE);
- }
- }
- void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu)
- {
- - kvm_unmap_stage2_range(&vcpu->arch.nested_mmu, 0, KVM_PHYS_SIZE);
- + if (vcpu->arch.nested_mmu)
- + kvm_unmap_stage2_range(vcpu->arch.nested_mmu, 0, KVM_PHYS_SIZE);
- }
- int kvm_nested_s2_init(struct kvm_vcpu *vcpu)
- {
- - vcpu->arch.nested_mmu.vmid.vmid_gen = 0;
- - return __kvm_alloc_stage2_pgd(&vcpu->arch.nested_mmu);
- + /* should we check if s2 is already existing */
- + int ret;
- + struct kvm_nested_s2_mmu *nested_s2_mmu;
- +
- + nested_s2_mmu = kzalloc(sizeof(struct kvm_nested_s2_mmu), GFP_KERNEL);
- + nested_s2_mmu->hw_mmu.vmid.vmid_gen = 0;
- + ret = __kvm_alloc_stage2_pgd(&nested_s2_mmu->hw_mmu);
- + if (!ret) {
- + list_add_rcu(&nested_s2_mmu->list, &vcpu->kvm->arch.nested_mmu_list);
- + printk("%llx %s added pgd: %llx\n", (u64)nested_s2_mmu, __func__, (u64)nested_s2_mmu->hw_mmu.pgd);
- + }
- +
- + return ret;
- }
- void kvm_nested_s2_teardown(struct kvm_vcpu *vcpu)
- {
- - __kvm_free_stage2_pgd(&vcpu->arch.nested_mmu);
- + /* TODO: remove mmu from list */
- + if (vcpu->arch.nested_mmu)
- + __kvm_free_stage2_pgd(vcpu->arch.nested_mmu);
- }
- /* Ideally, this value should come from QEMU ioctl */
- @@ -366,7 +382,7 @@ int kvm_nested_mmio_ondemand(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- phys_addr_t vcpu_base = vgic_vcpu_base();
- if (ipa == NESTED_VCPU_IF_ADDR) {
- - ret = __kvm_phys_addr_ioremap(vcpu->kvm, &vcpu->arch.nested_mmu,
- + ret = __kvm_phys_addr_ioremap(vcpu->kvm, vcpu->arch.nested_mmu,
- fault_ipa, vcpu_base, KVM_VGIC_V2_CPU_SIZE, true);
- if (!ret)
- ret = 1;
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement