Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff -Npru kernel/include/linux/azure.h /home/paul/kvm-68/kernel/include/linux/azure.h
- --- kernel/include/linux/azure.h 1970-01-01 00:00:00.000000000 +0000
- +++ /home/paul/kvm-68/kernel/include/linux/azure.h 2008-08-04 14:13:52.914039176 +0000
- @@ -0,0 +1,28 @@
- +#ifndef __AZURE_H
- +#define __AZURE_H
- +
- +#include <linux/types.h>
- +
- +#define TARGET_NAME "sample.exe"
- +
- +#define PUSHF_OPCODE 0x9C
- +#define PUSHF_SIZE 1
- +#define POPF_OPCODE 0x9D
- +#define POPF_SIZE 1
- +
- +enum {MEM_OP_READ, MEM_OP_WRITE};
- +
- +struct azure{
- + u8 enabled;
- + u32 target_cr3;
- + u32 target_entrypoint;
- + u32 target_base;
- + u32 target_size;
- + u8 set_tf;
- + u8 prv_set_tf;
- + unsigned long post_pushf_ip;
- + unsigned long post_popf_ip;
- + u8 popf_fwd_tf;
- +};
- +
- +#endif
- diff -Npru kernel/include/linux/kvm_host.h /home/paul/kvm-68/kernel/include/linux/kvm_host.h
- --- kernel/include/linux/kvm_host.h 2008-05-04 15:28:38.000000000 +0000
- +++ /home/paul/kvm-68/kernel/include/linux/kvm_host.h 2008-08-04 14:13:52.914039176 +0000
- @@ -6,6 +6,8 @@
- * the COPYING file in the top-level directory.
- */
- +#include "azure.h"
- +
- #include <linux/types.h>
- #include <linux/hardirq.h>
- #include <linux/list.h>
- @@ -84,6 +86,7 @@ struct kvm_vcpu {
- #endif
- struct kvm_vcpu_arch arch;
- + struct azure azure;
- };
- struct kvm_memory_slot {
- diff -Npru kernel/kvm_main.c /home/paul/kvm-68/kernel/kvm_main.c
- --- kernel/kvm_main.c 2008-05-04 15:28:39.000000000 +0000
- +++ /home/paul/kvm-68/kernel/kvm_main.c 2008-08-04 14:13:52.918039140 +0000
- @@ -165,6 +165,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
- r = kvm_arch_vcpu_init(vcpu);
- if (r < 0)
- goto fail_free_run;
- +
- + memset(&vcpu->azure, 0, sizeof(vcpu->azure));
- return 0;
- fail_free_run:
- @@ -738,6 +740,7 @@ int kvm_write_guest(struct kvm *kvm, gpa
- }
- return 0;
- }
- +EXPORT_SYMBOL_GPL(kvm_write_guest);
- int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
- {
- diff -Npru kernel/msw.h /home/paul/kvm-68/kernel/msw.h
- --- kernel/msw.h 1970-01-01 00:00:00.000000000 +0000
- +++ /home/paul/kvm-68/kernel/msw.h 2008-08-04 14:13:52.918039140 +0000
- @@ -0,0 +1,50 @@
- +#ifndef __MSW_H
- +#define __MSW_H
- +
- +#include <linux/types.h>
- +
- +#define FS_ETHREAD_OFFSET 0x124
- +#define ETHREAD_EPROCESS_OFFSET 0x44
- +
- +#define EPROCESS_IMAGE_NAME_OFFSET 0x174
- +#define EPROCESS_PEB_OFFSET 0x1b0
- +
- +#define KERNEL_START 0x80000000
- +
- +#define IMAGE_NAME_LEN 16
- +
- +struct msw_peb{
- + u8 inherited_address_space;
- + u8 read_image_file_exec_options;
- + u8 being_debugged;
- + u8 spare;
- + u32 mutant;
- + u32 image_base_address;
- + u32 loader_data;
- + u32 process_parameters;
- +};
- +
- +struct list_entry{
- + u32 flink;
- + u32 blink;
- +};
- +
- +struct msw_peb_ldr_data{
- + u32 length;
- + u32 initialized;
- + u32 sshandle;
- + struct list_entry in_ld_order_mod_list;
- + struct list_entry in_mem_order_mod_list;
- + struct list_entry in_init_order_mod_list;
- +};
- +
- +struct msw_ldr_mod{
- + struct list_entry in_ld_order_mod_list;
- + struct list_entry in_mem_order_mod_list;
- + struct list_entry in_init_order_mod_list;
- + u32 base_address;
- + u32 entrypoint;
- + u32 size_of_image;
- +};
- +
- +#endif
- diff -Npru kernel/vmx.c /home/paul/kvm-68/kernel/vmx.c
- --- kernel/vmx.c 2008-05-04 15:28:39.000000000 +0000
- +++ /home/paul/kvm-68/kernel/vmx.c 2008-08-04 14:14:34.825661675 +0000
- @@ -19,6 +19,9 @@
- #include "vmx.h"
- #include "mmu.h"
- +#include "msw.h"
- +#include "include/linux/azure.h"
- +
- #include <linux/kvm_host.h>
- #include <linux/module.h>
- #include <linux/kernel.h>
- @@ -464,6 +467,10 @@ static void update_exception_bitmap(stru
- eb = ~0;
- if (vm_need_ept())
- eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
- +
- + if(vcpu->azure.set_tf == 1)
- + eb |= 1u << 1;
- +
- vmcs_write32(EXCEPTION_BITMAP, eb);
- }
- @@ -1084,7 +1091,7 @@ static __init int setup_vmcs_config(stru
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_USE_IO_BITMAPS |
- - CPU_BASED_MOV_DR_EXITING |
- + // CPU_BASED_MOV_DR_EXITING |
- CPU_BASED_USE_TSC_OFFSETING;
- opt = CPU_BASED_TPR_SHADOW |
- CPU_BASED_USE_MSR_BITMAPS |
- @@ -2206,6 +2213,135 @@ static int handle_rmode_exception(struct
- return 0;
- }
- +static int mem_op(struct kvm_vcpu *vcpu, gva_t gva,
- + void *data, unsigned int len, u8 op)
- +{
- + gpa_t gpa;
- + int toproc;
- + int ret;
- +
- + while(len){
- + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
- + if(gpa == UNMAPPED_GVA)
- + return -1;
- +
- + toproc = min((unsigned long) len, (PAGE_SIZE - offset_in_page(gpa)));
- + switch(op){
- + case MEM_OP_READ:
- + ret = kvm_read_guest(vcpu->kvm, gpa, data, toproc);
- + break;
- + case MEM_OP_WRITE:
- + ret = kvm_write_guest(vcpu->kvm, gpa, data, toproc);
- + break;
- + default:
- + return -1;
- + }
- +
- + if(ret < 0)
- + return -1;
- +
- + len -= toproc;
- + data += toproc;
- + gva += toproc;
- + }
- +
- + return len;
- +}
- +
- +static int read_guest_mem(struct kvm_vcpu *vcpu, gva_t gva,
- + void *data, unsigned int len)
- +{
- + return mem_op(vcpu, gva, data, len, MEM_OP_READ);
- +}
- +
- +static int write_guest_mem(struct kvm_vcpu *vcpu, gva_t gva,
- + void *data, unsigned int len)
- +{
- + return mem_op(vcpu, gva, data, len, MEM_OP_WRITE);
- +}
- +
- +static u8 read_byte_opcode(struct kvm_vcpu *vcpu)
- +{
- + unsigned long rip;
- + u8 opcode;
- + int ret;
- +
- + rip = vmcs_readl(GUEST_RIP);
- + ret = read_guest_mem(vcpu, rip, &opcode, sizeof(opcode));
- + if(ret < 0)
- + return 0;
- +
- + return opcode;
- +}
- +
- +static u32 read_stack_top(struct kvm_vcpu *vcpu)
- +{
- + unsigned long rsp;
- + u32 stack_top;
- + int ret;
- +
- + rsp = vmcs_readl(GUEST_RSP);
- + ret = read_guest_mem(vcpu, rsp, &stack_top, sizeof(stack_top));
- + if(ret < 0)
- + return 0;
- +
- + return stack_top;
- +}
- +
- +static void write_stack_top(struct kvm_vcpu *vcpu, u32 stack_top)
- +{
- + unsigned long rsp;
- +
- + rsp = vmcs_readl(GUEST_RSP);
- + write_guest_mem(vcpu, rsp, &stack_top, sizeof(stack_top));
- +}
- +
- +static void process_op_pushf(struct kvm_vcpu *vcpu)
- +{
- + unsigned long rip;
- + u32 stack_top;
- +
- + rip = vmcs_readl(GUEST_RIP);
- + if(rip >= KERNEL_START)
- + return;
- +
- + if(vcpu->azure.post_pushf_ip == rip){
- + stack_top = read_stack_top(vcpu) & ~(X86_EFLAGS_TF);
- + if(stack_top != 0)
- + write_stack_top(vcpu, stack_top);
- + vcpu->azure.post_pushf_ip = 0;
- + }
- +
- + if(read_byte_opcode(vcpu) == PUSHF_OPCODE)
- + vcpu->azure.post_pushf_ip = rip + PUSHF_SIZE;
- +}
- +
- +static void process_op_popf(struct kvm_vcpu *vcpu){
- + unsigned long rip;
- +
- + rip = vmcs_readl(GUEST_RIP);
- + if(rip >= KERNEL_START)
- + return;
- +
- + if(vcpu->azure.post_popf_ip == rip)
- + vcpu->azure.popf_fwd_tf = 1;
- +
- + if(read_byte_opcode(vcpu) == POPF_OPCODE)
- + vcpu->azure.post_popf_ip = rip + POPF_SIZE;
- +}
- +
- +static void handle_debug_exception(struct kvm_vcpu *vcpu)
- +{
- + unsigned long rip;
- +
- + rip = vmcs_readl(GUEST_RIP);
- + if(rip == vcpu->azure.target_entrypoint)
- + printk("azure: target entrypoint executed utc %ld\n", get_seconds());
- +
- + /* additional malware analysis code here */
- +
- +}
- +
- static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- @@ -2269,8 +2405,17 @@ static int handle_exception(struct kvm_v
- if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
- (INTR_TYPE_EXCEPTION | 1)) {
- - kvm_run->exit_reason = KVM_EXIT_DEBUG;
- - return 0;
- + u8 popf_fwd_tf = vcpu->azure.popf_fwd_tf;
- + vcpu->azure.popf_fwd_tf = 0;
- + process_op_pushf(vcpu);
- + process_op_popf(vcpu);
- + handle_debug_exception(vcpu);
- +
- + kvm_run->exit_reason = KVM_EXIT_DEBUG;
- + if(popf_fwd_tf)
- + return 0;
- + else
- + return 1;
- }
- kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
- kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
- @@ -2329,12 +2474,185 @@ vmx_patch_hypercall(struct kvm_vcpu *vcp
- hypercall[2] = 0xc1;
- }
- +static gva_t read_msw_ethread_gva(struct kvm_vcpu *vcpu)
- +{
- + unsigned long fs_base;
- + u32 msw_ethread_gva;
- + int ret;
- +
- + fs_base = vmcs_readl(GUEST_FS_BASE);
- + ret = read_guest_mem(vcpu, fs_base + FS_ETHREAD_OFFSET,
- + &msw_ethread_gva, sizeof(msw_ethread_gva));
- + if(ret < 0)
- + return 0;
- +
- + return msw_ethread_gva;
- +}
- +
- +static gva_t read_msw_eprocess_gva(struct kvm_vcpu *vcpu)
- +{
- + gva_t msw_ethread_gva;
- + u32 msw_eprocess_gva;
- + int ret;
- +
- + msw_ethread_gva = read_msw_ethread_gva(vcpu);
- + if(msw_ethread_gva == 0)
- + return 0;
- +
- + ret = read_guest_mem(vcpu, msw_ethread_gva + ETHREAD_EPROCESS_OFFSET,
- + &msw_eprocess_gva, sizeof(msw_eprocess_gva));
- + if(ret < 0)
- + return 0;
- +
- + return msw_eprocess_gva;
- +}
- +
- +static gva_t read_msw_peb_gva(struct kvm_vcpu *vcpu)
- +{
- + gva_t msw_eprocess_gva;
- + u32 msw_peb_gva;
- + int ret;
- +
- + msw_eprocess_gva = read_msw_eprocess_gva(vcpu);
- + if(msw_eprocess_gva == 0)
- + return 0;
- +
- + ret = read_guest_mem(vcpu, msw_eprocess_gva + EPROCESS_PEB_OFFSET,
- + &msw_peb_gva, sizeof(msw_peb_gva));
- + if(ret < 0)
- + return 0;
- +
- + return msw_peb_gva;
- +}
- +
- +static int read_msw_peb(struct kvm_vcpu *vcpu, struct msw_peb *msw_peb_p)
- +{
- + gva_t msw_peb_gva;
- + int ret;
- +
- + msw_peb_gva = read_msw_peb_gva(vcpu);
- + if(msw_peb_gva == 0)
- + return -1;
- +
- + ret = read_guest_mem(vcpu, msw_peb_gva, msw_peb_p, sizeof(*msw_peb_p));
- + if(ret < 0)
- + return -1;
- +
- + return 0;
- +}
- +
- +static int read_msw_peb_ldr_data(struct kvm_vcpu *vcpu,
- + gva_t msw_peb_ldr_data_gva,
- + struct msw_peb_ldr_data *msw_peb_ldr_data_p)
- +{
- + int ret;
- +
- + if(msw_peb_ldr_data_gva == 0)
- + return -1;
- +
- + ret = read_guest_mem(vcpu, msw_peb_ldr_data_gva,
- + msw_peb_ldr_data_p, sizeof(*msw_peb_ldr_data_p));
- + if(ret < 0)
- + return -1;
- +
- + return 0;
- +}
- +
- +static int read_msw_ldr_mod(struct kvm_vcpu *vcpu, gva_t msw_ldr_mod_gva,
- + struct msw_ldr_mod *msw_ldr_mod_p)
- +{
- + int ret;
- +
- + if(msw_ldr_mod_gva == 0)
- + return -1;
- +
- + ret = read_guest_mem(vcpu, msw_ldr_mod_gva,
- + msw_ldr_mod_p, sizeof(*msw_ldr_mod_p));
- + if(ret < 0)
- + return -1;
- +
- + return 0;
- +}
- +
- +static void read_target_info(struct kvm_vcpu *vcpu, u32 cr3)
- +{
- + struct msw_peb msw_peb;
- + struct msw_peb_ldr_data msw_peb_ldr_data;
- + struct msw_ldr_mod msw_ldr_mod;
- + int ret;
- +
- + if(vcpu->azure.target_cr3 == 0)
- + vcpu->azure.target_cr3 = cr3;
- +
- + if(vcpu->azure.target_entrypoint != 0)
- + return;
- +
- + ret = read_msw_peb(vcpu, &msw_peb);
- + if(ret < 0)
- + return;
- + ret = read_msw_peb_ldr_data(vcpu, msw_peb.loader_data, &msw_peb_ldr_data);
- + if(ret < 0)
- + return;
- + ret = read_msw_ldr_mod(vcpu, msw_peb_ldr_data.in_ld_order_mod_list.flink,
- + &msw_ldr_mod);
- + if(ret < 0)
- + return;
- +
- + if(msw_ldr_mod.base_address == 0)
- + return;
- +
- + vcpu->azure.target_entrypoint = msw_ldr_mod.entrypoint;
- + vcpu->azure.target_base = msw_ldr_mod.base_address;
- + vcpu->azure.target_size = msw_ldr_mod.size_of_image;
- +
- + printk("azure: peb info available utc %ld\n", get_seconds());
- + printk("azure: imagebase (msw_ldr_mod) 0x%x\n", msw_ldr_mod.base_address);
- + printk("azure: imagesize (msw_ldr_mod) 0x%x\n", msw_ldr_mod.size_of_image);
- + printk("azure: entrypoint (msw_ldr_mod) 0x%x\n", msw_ldr_mod.entrypoint);
- +}
- +
- +static void read_msw_image_name(struct kvm_vcpu *vcpu, char *image_name_p)
- +{
- + gva_t msw_eprocess_gva;
- +
- + memset(image_name_p, 0, IMAGE_NAME_LEN);
- +
- + msw_eprocess_gva = read_msw_eprocess_gva(vcpu);
- + if(msw_eprocess_gva == 0)
- + return;
- +
- + read_guest_mem(vcpu, msw_eprocess_gva + EPROCESS_IMAGE_NAME_OFFSET,
- + image_name_p, IMAGE_NAME_LEN - 1);
- +}
- +
- +static uint8_t is_target_process(struct kvm_vcpu *vcpu, u32 cr3)
- +{
- + char image_name[IMAGE_NAME_LEN];
- +
- + if(vcpu->azure.target_cr3 != 0){
- + if(vcpu->azure.target_cr3 == cr3)
- + return 1;
- + else
- + return 0;
- + }
- +
- + read_msw_image_name(vcpu, image_name);
- + if(strcmp(image_name, TARGET_NAME) == 0){
- + printk("azure: target located utc %ld\n", get_seconds());
- + printk("azure: target cr3 0x%x\n", cr3);
- + return 1;
- + }
- + else{
- + return 0;
- + }
- +}
- +
- static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
- {
- unsigned long exit_qualification;
- int cr;
- int reg;
- -
- +
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- cr = exit_qualification & 15;
- reg = (exit_qualification >> 8) & 15;
- @@ -2352,6 +2670,16 @@ static int handle_cr(struct kvm_vcpu *vc
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
- skip_emulated_instruction(vcpu);
- + if(vcpu->azure.enabled == 1){
- + if(is_target_process(vcpu, vcpu->arch.regs[reg])){
- + read_target_info(vcpu, vcpu->arch.regs[reg]);
- + vcpu->azure.set_tf = 1;
- + vcpu->azure.prv_set_tf = 1;
- + }
- + else{
- + vcpu->azure.set_tf = 0;
- + }
- + }
- return 1;
- case 4:
- vcpu_load_rsp_rip(vcpu);
- @@ -2490,6 +2818,12 @@ static int handle_wrmsr(struct kvm_vcpu
- }
- skip_emulated_instruction(vcpu);
- +
- + if(vcpu->azure.enabled == 0){
- + vcpu->azure.enabled = 1;
- + printk("azure: enabled\n");
- + }
- +
- return 1;
- }
- @@ -2816,11 +3150,46 @@ static void fixup_rmode_irq(struct vcpu_
- | vmx->rmode.irq.vector;
- }
- +static void unset_hlt_movss_intrblt(void){
- + u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
- + if(interruptibility & 3){
- + interruptibility &= ~(3);
- + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
- + }
- +}
- +
- +static void set_guest_tf(struct kvm_vcpu *vcpu){
- + unsigned long flags;
- +
- + flags = vmcs_readl(GUEST_RFLAGS);
- + flags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
- + vmcs_writel(GUEST_RFLAGS, flags);
- + unset_hlt_movss_intrblt();
- + update_exception_bitmap(vcpu);
- +}
- +
- +static void unset_guest_tf(struct kvm_vcpu *vcpu){
- + unsigned long flags;
- +
- + flags = vmcs_readl(GUEST_RFLAGS);
- + flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
- + vmcs_writel(GUEST_RFLAGS, flags);
- + update_exception_bitmap(vcpu);
- +}
- +
- static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 intr_info;
- + if(vcpu->azure.set_tf == 1)
- + set_guest_tf(vcpu);
- +
- + if(vcpu->azure.set_tf == 0 && vcpu->azure.prv_set_tf == 1){
- + unset_guest_tf(vcpu);
- + vcpu->azure.prv_set_tf = 0;
- + }
- +
- /*
- * Loading guest fpu may have cleared host cr0.ts
- */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement