Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
- index 2d92957..21ee46c 100644
- --- a/xen/arch/x86/hvm/emulate.c
- +++ b/xen/arch/x86/hvm/emulate.c
- @@ -20,11 +20,75 @@
- #include <asm/hvm/emulate.h>
- #include <asm/hvm/hvm.h>
- #include <asm/hvm/ioreq.h>
- +#include <asm/hvm/nestedhvm.h>
- #include <asm/hvm/trace.h>
- #include <asm/hvm/support.h>
- #include <asm/hvm/svm/svm.h>
- #include <asm/vm_event.h>
- +/*
- + * Atomic compare and exchange. Compare OLD with MEM, if identical,
- + * store NEW in MEM. Return the initial value in MEM. Success is
- + * indicated by comparing RETURN with OLD.
- + */
- +#define __raw_cmpxchg(ptr, old, new, size, lock) \
- +({ \
- + __typeof__(*(ptr)) __ret; \
- + __typeof__(*(ptr)) __old = (old); \
- + __typeof__(*(ptr)) __new = (new); \
- + switch (size) { \
- + case 1: \
- + { \
- + volatile u8 *__ptr = (volatile u8 *)(ptr); \
- + asm volatile(lock "cmpxchgb %2,%1" \
- + : "=a" (__ret), "+m" (*__ptr) \
- + : "q" (__new), "0" (__old) \
- + : "memory"); \
- + break; \
- + } \
- + case 2: \
- + { \
- + volatile u16 *__ptr = (volatile u16 *)(ptr); \
- + asm volatile(lock "cmpxchgw %2,%1" \
- + : "=a" (__ret), "+m" (*__ptr) \
- + : "r" (__new), "0" (__old) \
- + : "memory"); \
- + break; \
- + } \
- + case 4: \
- + { \
- + volatile u32 *__ptr = (volatile u32 *)(ptr); \
- + asm volatile(lock "cmpxchgl %2,%1" \
- + : "=a" (__ret), "+m" (*__ptr) \
- + : "r" (__new), "0" (__old) \
- + : "memory"); \
- + break; \
- + } \
- + case 8: \
- + { \
- + volatile u64 *__ptr = (volatile u64 *)(ptr); \
- + asm volatile(lock "cmpxchgq %2,%1" \
- + : "=a" (__ret), "+m" (*__ptr) \
- + : "r" (__new), "0" (__old) \
- + : "memory"); \
- + break; \
- + } \
- + } \
- + __ret; \
- +})
- +
- +#define __sync_cmpxchg(ptr, old, new, size) \
- + __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
- +
- +#define __cmpxchg_local(ptr, old, new, size) \
- + __raw_cmpxchg((ptr), (old), (new), (size), "")
- +
- +#define sync_cmpxchg(ptr, old, new) \
- + __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
- +
- +#define cmpxchg_local(ptr, old, new) \
- + __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
- +
- static void hvmtrace_io_assist(const ioreq_t *p)
- {
- unsigned int size, event;
- @@ -992,6 +1056,7 @@ static int hvmemul_cmpxchg_discard(
- void *p_old,
- void *p_new,
- unsigned int bytes,
- + bool lock,
- struct x86_emulate_ctxt *ctxt)
- {
- return X86EMUL_OKAY;
- @@ -1035,10 +1100,137 @@ static int hvmemul_cmpxchg(
- void *p_old,
- void *p_new,
- unsigned int bytes,
- + bool lock,
- struct x86_emulate_ctxt *ctxt)
- {
- + unsigned long addr, gfn, reps = 1;
- + struct hvm_emulate_ctxt *hvmemul_ctxt =
- + container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- + uint32_t pfec = PFEC_page_present | PFEC_write_access;
- + paddr_t gpa = addr & ~PAGE_MASK;
- + struct page_info *page;
- + struct vcpu *curr = current;
- + p2m_type_t p2mt;
- + char *p;
- + int rc = X86EMUL_OKAY;
- + bool ret = true;
- +
- + if ( is_x86_system_segment(seg) )
- + pfec |= PFEC_implicit;
- + else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
- + pfec |= PFEC_user_mode;
- +
- + rc = hvmemul_virtual_to_linear(
- + seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
- + if ( rc != X86EMUL_OKAY || !bytes )
- + return rc;
- +
- + gfn = paging_gva_to_gfn(curr, addr, &pfec);
- + if ( gfn == gfn_x(INVALID_GFN) )
- + {
- + pagefault_info_t pfinfo = {};
- +
- + if ( ( pfec & PFEC_page_paged ) || ( pfec & PFEC_page_shared ) )
- + return X86EMUL_RETRY;
- +
- + pfinfo.linear = addr;
- + pfinfo.ec = pfec;
- +
- + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
- + return X86EMUL_EXCEPTION;
- + }
- + gpa |= (paddr_t)gfn << PAGE_SHIFT;
- +
- + /*
- + * No need to do the P2M lookup for internally handled MMIO, benefiting
- + * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
- + * - newer Windows (like Server 2012) for HPET accesses.
- + */
- + if ( !nestedhvm_vcpu_in_guestmode(curr) && hvm_mmio_internal(gpa) )
- + return X86EMUL_UNHANDLEABLE;
- +
- + page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
- +
- + if ( !page )
- + return X86EMUL_UNHANDLEABLE;
- +
- + if ( p2m_is_paging(p2mt) )
- + {
- + put_page(page);
- + p2m_mem_paging_populate(curr->domain, gfn);
- + return X86EMUL_RETRY;
- + }
- + if ( p2m_is_shared(p2mt) )
- + {
- + put_page(page);
- + return X86EMUL_RETRY;
- + }
- + if ( p2m_is_grant(p2mt) )
- + {
- + put_page(page);
- + return X86EMUL_UNHANDLEABLE;
- + }
- +
- + p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
- +
- + switch ( bytes )
- + {
- + case 1:
- + {
- + uint8_t val, old = *(uint8_t *)p_old, new = *(uint8_t *)p_new;
- + uint8_t *mem = (uint8_t *)p;
- +
- + val = ( lock ? sync_cmpxchg(mem, old, new) : cmpxchg_local(mem, old, new) );
- + ret = ( val == new );
- + break;
- + }
- + case 2:
- + {
- + uint16_t val, old = *(uint16_t *)p_old, new = *(uint16_t *)p_new;
- + uint16_t *mem = (uint16_t *)p;
- +
- + val = ( lock ? sync_cmpxchg(mem, old, new) : cmpxchg_local(mem, old, new) );
- + ret = ( val == new );
- + break;
- + }
- + case 4:
- + {
- + uint32_t val, old = *(uint32_t *)p_old, new = *(uint32_t *)p_new;
- + uint32_t *mem = (uint32_t *)p;
- +
- + // printk("- mem: %d, old: %d, new: %d\n", *mem, old, new);
- + val = ( lock ? sync_cmpxchg(mem, old, new) : cmpxchg_local(mem, old, new) );
- + // printk("+ mem: %d, old: %d, new: %d, val: %d\n", *mem, old, new, val);
- + ret = ( val == new );
- + break;
- + }
- + case 8:
- + {
- + uint64_t val, old = *(uint64_t *)p_old, new = *(uint64_t *)p_new;
- + uint64_t *mem = (uint64_t *)p;
- +
- + val = ( lock ? sync_cmpxchg(mem, old, new) : cmpxchg_local(mem, old, new) );
- + ret = ( val == new );
- + break;
- + }
- + default:
- + rc = X86EMUL_UNHANDLEABLE;
- + }
- +
- + if ( !ret ) {
- + // printk("X86EMUL_RETRY\n");
- + // rc = X86EMUL_RETRY;
- + }
- +
- /* Fix this in case the guest is really relying on r-m-w atomicity. */
- - return hvmemul_write(seg, offset, p_new, bytes, ctxt);
- + // return hvmemul_write(seg, offset, p_new, bytes, ctxt);
- +
- + unmap_domain_page(p);
- + put_page(page);
- +
- + // printk("returning %s\n", ( rc == X86EMUL_OKAY ) ? "X86EMUL_OKAY" : "error");
- +
- + return rc;
- }
- static int hvmemul_validate(
- diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
- index a6b2649..b6d8be6 100644
- --- a/xen/arch/x86/mm.c
- +++ b/xen/arch/x86/mm.c
- @@ -5336,6 +5336,7 @@ static int ptwr_emulated_cmpxchg(
- void *p_old,
- void *p_new,
- unsigned int bytes,
- + bool lock,
- struct x86_emulate_ctxt *ctxt)
- {
- paddr_t old = 0, new = 0;
- diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
- index d93f2ab..4c269b8 100644
- --- a/xen/arch/x86/mm/shadow/common.c
- +++ b/xen/arch/x86/mm/shadow/common.c
- @@ -280,6 +280,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
- void *p_old,
- void *p_new,
- unsigned int bytes,
- + bool lock,
- struct x86_emulate_ctxt *ctxt)
- {
- struct sh_emulate_ctxt *sh_ctxt =
- diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
- index bb67be6..d5d78e6 100644
- --- a/xen/arch/x86/x86_emulate/x86_emulate.c
- +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
- @@ -1850,7 +1850,7 @@ protmode_load_seg(
- fail_if(!ops->cmpxchg);
- switch ( (rc = ops->cmpxchg(sel_seg, (sel & 0xfff8) + 4, &desc.b,
- - &new_desc_b, sizeof(desc.b), ctxt)) )
- + &new_desc_b, sizeof(desc.b), false, ctxt)) )
- {
- case X86EMUL_OKAY:
- break;
- @@ -7017,7 +7017,7 @@ x86_emulate(
- }
- if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, aux,
- - op_bytes, ctxt)) != X86EMUL_OKAY )
- + op_bytes, lock_prefix, ctxt)) != X86EMUL_OKAY )
- goto done;
- _regs.eflags |= X86_EFLAGS_ZF;
- }
- @@ -7941,7 +7941,7 @@ x86_emulate(
- fail_if(!ops->cmpxchg);
- rc = ops->cmpxchg(
- dst.mem.seg, dst.mem.off, &dst.orig_val,
- - &dst.val, dst.bytes, ctxt);
- + &dst.val, dst.bytes, lock_prefix, ctxt);
- }
- else
- {
- diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
- index 9c5fcde..dc46e1f 100644
- --- a/xen/arch/x86/x86_emulate/x86_emulate.h
- +++ b/xen/arch/x86/x86_emulate/x86_emulate.h
- @@ -259,6 +259,7 @@ struct x86_emulate_ops
- void *p_old,
- void *p_new,
- unsigned int bytes,
- + bool lock,
- struct x86_emulate_ctxt *ctxt);
- /*
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement