From nobody Sat Nov 16 01:33:14 2024 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org ARC-Seal: i=1; a=rsa-sha256; t=1621456362; cv=none; d=zohomail.com; s=zohoarc; b=YjpPIPLYEw9l3nvk7t88OZh71y/k+g3EngBIuObInBSYFVbFmCfBQAH7I8A8wx7SvxW3+vejaajgqPVJLJy5BPiqJVmr+yzyisgdajxJXlGgVnUyCV/ZEDIxreDxXMQlftqxFAfbyqcCDeP7PMl+SiYXmtfOzqPYs5VKwQEg36M= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1621456362; h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To; bh=pm9Vwvik4WoTNYYfhUB3sDBv6vMtAnvlcEC44A0FzcE=; b=VZYcuKiS06gs6q/2tBergPmi2/Db6aeGTBu0K/T4SoIqGkIMplg3m0lhWXeRpf1aMa0EvpURhmOb1XfKXNCFOwzYJbWy1IKWwzXLGiNJKsJ+ZJz2YRyWwCsFcyYrYgMTuj1tQ+Zp4231K0J9Hj62n/OOxx0++i6Mez9gtGxdrec= ARC-Authentication-Results: i=1; mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1621456362773287.7608692250959; Wed, 19 May 2021 13:32:42 -0700 (PDT) Received: from localhost ([::1]:50736 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1ljSrx-0007od-Hg for importer@patchew.org; Wed, 19 May 2021 16:32:41 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:53502) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1ljSjI-0005Nf-T3; Wed, 19 May 2021 16:23:44 -0400 Received: from mail.csgraf.de ([85.25.223.15]:48280 helo=zulu616.server4you.de) by eggs.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1ljSjE-0003Of-9s; Wed, 19 May 2021 16:23:44 -0400 Received: from localhost.localdomain (dynamic-095-114-039-201.95.114.pool.telefonica.de [95.114.39.201]) by csgraf.de (Postfix) with ESMTPSA id 0E40960806A7; Wed, 19 May 2021 22:23:02 +0200 (CEST) From: Alexander Graf To: QEMU Developers Subject: [PATCH v8 11/19] hvf: Introduce hvf vcpu struct Date: Wed, 19 May 2021 22:22:45 +0200 Message-Id: <20210519202253.76782-12-agraf@csgraf.de> X-Mailer: git-send-email 2.30.1 (Apple Git-130) In-Reply-To: <20210519202253.76782-1-agraf@csgraf.de> References: <20210519202253.76782-1-agraf@csgraf.de> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=85.25.223.15; envelope-from=agraf@csgraf.de; helo=zulu616.server4you.de X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Peter Maydell , Eduardo Habkost , =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= , Richard Henderson , Cameron Esfahani , =?UTF-8?q?Alex=20Benn=C3=A9e?= , Roman Bolshakov , qemu-arm , Frank Yang , Paolo Bonzini , Peter Collingbourne Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" We will need more than a single field for hvf going forward. To keep the global vcpu struct uncluttered, let's allocate a special hvf vcpu struct, similar to how hax does it. Signed-off-by: Alexander Graf Reviewed-by: Roman Bolshakov Tested-by: Roman Bolshakov Reviewed-by: Alex Benn=C3=A9e Reviewed-by: Sergio Lopez --- v4 -> v5: - Use g_free() on destroy --- accel/hvf/hvf-accel-ops.c | 8 +- include/hw/core/cpu.h | 3 +- include/sysemu/hvf_int.h | 4 + target/i386/hvf/hvf.c | 104 +++++++++--------- target/i386/hvf/vmx.h | 24 +++-- target/i386/hvf/x86.c | 28 ++--- target/i386/hvf/x86_descr.c | 26 ++--- target/i386/hvf/x86_emu.c | 62 +++++------ target/i386/hvf/x86_mmu.c | 4 +- target/i386/hvf/x86_task.c | 12 +-- target/i386/hvf/x86hvf.c | 210 ++++++++++++++++++------------------ 11 files changed, 248 insertions(+), 237 deletions(-) diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 14fc49791e..ded918c443 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -363,16 +363,20 @@ type_init(hvf_type_init); =20 static void hvf_vcpu_destroy(CPUState *cpu) { - hv_return_t ret =3D hv_vcpu_destroy(cpu->hvf_fd); + hv_return_t ret =3D hv_vcpu_destroy(cpu->hvf->fd); assert_hvf_ok(ret); =20 hvf_arch_vcpu_destroy(cpu); + g_free(cpu->hvf); + cpu->hvf =3D NULL; } =20 static int hvf_init_vcpu(CPUState *cpu) { int r; =20 + cpu->hvf =3D g_malloc0(sizeof(*cpu->hvf)); + /* init cpu signals */ sigset_t set; struct sigaction sigact; @@ -384,7 +388,7 @@ static int hvf_init_vcpu(CPUState *cpu) pthread_sigmask(SIG_BLOCK, NULL, &set); sigdelset(&set, SIG_IPI); =20 - r =3D hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT); + r =3D hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); cpu->vcpu_dirty =3D 1; assert_hvf_ok(r); =20 diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index d45f78290e..47d82a5aa9 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -251,6 +251,7 @@ struct KVMState; struct kvm_run; =20 struct hax_vcpu_state; +struct hvf_vcpu_state; =20 #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) @@ -436,7 +437,7 @@ struct CPUState { =20 struct hax_vcpu_state *hax_vcpu; =20 - int hvf_fd; + struct hvf_vcpu_state *hvf; =20 /* track IOMMUs whose translations we've cached in the TCG TLB */ GArray *iommu_notifiers; diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h index fd1dcaf26e..8b66a4e7d0 100644 --- a/include/sysemu/hvf_int.h +++ b/include/sysemu/hvf_int.h @@ -43,6 +43,10 @@ struct HVFState { }; extern HVFState *hvf_state; =20 +struct hvf_vcpu_state { + int fd; +}; + void assert_hvf_ok(hv_return_t ret); int hvf_arch_init_vcpu(CPUState *cpu); void hvf_arch_vcpu_destroy(CPUState *cpu); diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 02f7be6cfd..346dbcc26f 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -80,11 +80,11 @@ void vmx_update_tpr(CPUState *cpu) int tpr =3D cpu_get_apic_tpr(x86_cpu->apic_state) << 4; int irr =3D apic_get_highest_priority_irr(x86_cpu->apic_state); =20 - wreg(cpu->hvf_fd, HV_X86_TPR, tpr); + wreg(cpu->hvf->fd, HV_X86_TPR, tpr); if (irr =3D=3D -1) { - wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); } else { - wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : + wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : irr >> 4); } } @@ -92,7 +92,7 @@ void vmx_update_tpr(CPUState *cpu) static void update_apic_tpr(CPUState *cpu) { X86CPU *x86_cpu =3D X86_CPU(cpu); - int tpr =3D rreg(cpu->hvf_fd, HV_X86_TPR) >> 4; + int tpr =3D rreg(cpu->hvf->fd, HV_X86_TPR) >> 4; cpu_set_apic_tpr(x86_cpu->apic_state, tpr); } =20 @@ -244,43 +244,43 @@ int hvf_arch_init_vcpu(CPUState *cpu) } =20 /* set VMCS control fields */ - wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS, + wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, VMCS_PIN_BASED_CTLS_EXTINT | VMCS_PIN_BASED_CTLS_NMI | VMCS_PIN_BASED_CTLS_VNMI)); - wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, + wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, VMCS_PRI_PROC_BASED_CTLS_HLT | VMCS_PRI_PROC_BASED_CTLS_MWAIT | VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET | VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) | VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL); - wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS, + wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES)); =20 - wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_= cap_entry, + wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx= _cap_entry, 0)); - wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ + wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ =20 - wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); =20 x86cpu =3D X86_CPU(cpu); x86cpu->env.xsave_buf =3D qemu_memalign(4096, 4096); =20 - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1); - hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1); =20 return 0; } @@ -321,16 +321,16 @@ static void hvf_store_events(CPUState *cpu, uint32_t = ins_len, uint64_t idtvec_in } if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { env->has_error_code =3D true; - env->error_code =3D rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERRO= R); + env->error_code =3D rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERR= OR); } } - if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & + if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) { env->hflags2 |=3D HF2_NMI_MASK; } else { env->hflags2 &=3D ~HF2_NMI_MASK; } - if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & + if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags |=3D HF_INHIBIT_IRQ_MASK; @@ -409,20 +409,20 @@ int hvf_vcpu_exec(CPUState *cpu) return EXCP_HLT; } =20 - hv_return_t r =3D hv_vcpu_run(cpu->hvf_fd); + hv_return_t r =3D hv_vcpu_run(cpu->hvf->fd); assert_hvf_ok(r); =20 /* handle VMEXIT */ - uint64_t exit_reason =3D rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON); - uint64_t exit_qual =3D rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION); - uint32_t ins_len =3D (uint32_t)rvmcs(cpu->hvf_fd, + uint64_t exit_reason =3D rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON); + uint64_t exit_qual =3D rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION= ); + uint32_t ins_len =3D (uint32_t)rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH); =20 - uint64_t idtvec_info =3D rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INF= O); + uint64_t idtvec_info =3D rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_IN= FO); =20 hvf_store_events(cpu, ins_len, idtvec_info); - rip =3D rreg(cpu->hvf_fd, HV_X86_RIP); - env->eflags =3D rreg(cpu->hvf_fd, HV_X86_RFLAGS); + rip =3D rreg(cpu->hvf->fd, HV_X86_RIP); + env->eflags =3D rreg(cpu->hvf->fd, HV_X86_RFLAGS); =20 qemu_mutex_lock_iothread(); =20 @@ -452,7 +452,7 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_EPT_FAULT: { hvf_slot *slot; - uint64_t gpa =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRES= S); + uint64_t gpa =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRE= SS); =20 if (((idtvec_info & VMCS_IDT_VEC_VALID) =3D=3D 0) && ((exit_qual & EXIT_QUAL_NMIUDTI) !=3D 0)) { @@ -497,7 +497,7 @@ int hvf_vcpu_exec(CPUState *cpu) store_regs(cpu); break; } else if (!string && !in) { - RAX(env) =3D rreg(cpu->hvf_fd, HV_X86_RAX); + RAX(env) =3D rreg(cpu->hvf->fd, HV_X86_RAX); hvf_handle_io(env, port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; @@ -513,21 +513,21 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_CPUID: { - uint32_t rax =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); - uint32_t rbx =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX); - uint32_t rcx =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); - uint32_t rdx =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); + uint32_t rax =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); + uint32_t rbx =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX); + uint32_t rcx =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); + uint32_t rdx =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); =20 if (rax =3D=3D 1) { /* CPUID1.ecx.OSXSAVE needs to know CR4 */ - env->cr[4] =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4); + env->cr[4] =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); } hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); =20 - wreg(cpu->hvf_fd, HV_X86_RAX, rax); - wreg(cpu->hvf_fd, HV_X86_RBX, rbx); - wreg(cpu->hvf_fd, HV_X86_RCX, rcx); - wreg(cpu->hvf_fd, HV_X86_RDX, rdx); + wreg(cpu->hvf->fd, HV_X86_RAX, rax); + wreg(cpu->hvf->fd, HV_X86_RBX, rbx); + wreg(cpu->hvf->fd, HV_X86_RCX, rcx); + wreg(cpu->hvf->fd, HV_X86_RDX, rdx); =20 macvm_set_rip(cpu, rip + ins_len); break; @@ -535,16 +535,16 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_XSETBV: { X86CPU *x86_cpu =3D X86_CPU(cpu); CPUX86State *env =3D &x86_cpu->env; - uint32_t eax =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); - uint32_t ecx =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); - uint32_t edx =3D (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); + uint32_t eax =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); + uint32_t ecx =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); + uint32_t edx =3D (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); =20 if (ecx) { macvm_set_rip(cpu, rip + ins_len); break; } env->xcr0 =3D ((uint64_t)edx << 32) | eax; - wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1); + wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1); macvm_set_rip(cpu, rip + ins_len); break; } @@ -583,11 +583,11 @@ int hvf_vcpu_exec(CPUState *cpu) =20 switch (cr) { case 0x0: { - macvm_set_cr0(cpu->hvf_fd, RRX(env, reg)); + macvm_set_cr0(cpu->hvf->fd, RRX(env, reg)); break; } case 4: { - macvm_set_cr4(cpu->hvf_fd, RRX(env, reg)); + macvm_set_cr4(cpu->hvf->fd, RRX(env, reg)); break; } case 8: { @@ -623,7 +623,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_TASK_SWITCH: { - uint64_t vinfo =3D rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO); + uint64_t vinfo =3D rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO= ); x68_segment_selector sel =3D {.sel =3D exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, = vinfo @@ -636,8 +636,8 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_RDPMC: - wreg(cpu->hvf_fd, HV_X86_RAX, 0); - wreg(cpu->hvf_fd, HV_X86_RDX, 0); + wreg(cpu->hvf->fd, HV_X86_RAX, 0); + wreg(cpu->hvf->fd, HV_X86_RDX, 0); macvm_set_rip(cpu, rip + ins_len); break; case VMX_REASON_VMCALL: diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index 24c4cdf0be..6df87116f6 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -30,6 +30,8 @@ #include "vmcs.h" #include "cpu.h" #include "x86.h" +#include "sysemu/hvf.h" +#include "sysemu/hvf_int.h" =20 #include "exec/address-spaces.h" =20 @@ -179,15 +181,15 @@ static inline void macvm_set_rip(CPUState *cpu, uint6= 4_t rip) uint64_t val; =20 /* BUG, should take considering overlap.. */ - wreg(cpu->hvf_fd, HV_X86_RIP, rip); + wreg(cpu->hvf->fd, HV_X86_RIP, rip); env->eip =3D rip; =20 /* after moving forward in rip, we need to clean INTERRUPTABILITY */ - val =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); + val =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags &=3D ~HF_INHIBIT_IRQ_MASK; - wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, + wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); } @@ -199,9 +201,9 @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu) CPUX86State *env =3D &x86_cpu->env; =20 env->hflags2 &=3D ~HF2_NMI_MASK; - uint32_t gi =3D (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBIL= ITY); + uint32_t gi =3D (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBI= LITY); gi &=3D ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } =20 static inline void vmx_set_nmi_blocking(CPUState *cpu) @@ -210,16 +212,16 @@ static inline void vmx_set_nmi_blocking(CPUState *cpu) CPUX86State *env =3D &x86_cpu->env; =20 env->hflags2 |=3D HF2_NMI_MASK; - uint32_t gi =3D (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILI= TY); + uint32_t gi =3D (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBIL= ITY); gi |=3D VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } =20 static inline void vmx_set_nmi_window_exiting(CPUState *cpu) { uint64_t val; - val =3D rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | + val =3D rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); =20 } @@ -228,8 +230,8 @@ static inline void vmx_clear_nmi_window_exiting(CPUStat= e *cpu) { =20 uint64_t val; - val =3D rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & + val =3D rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); } =20 diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index cd045183a8..2898bb70a8 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -62,11 +62,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, } =20 if (GDT_SEL =3D=3D sel.ti) { - base =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); - limit =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); + base =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); + limit =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); - limit =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); + base =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); + limit =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); } =20 if (sel.index * 8 >=3D limit) { @@ -85,11 +85,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, uint32_t limit; =20 if (GDT_SEL =3D=3D sel.ti) { - base =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); - limit =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); + base =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); + limit =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); - limit =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); + base =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); + limit =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); } =20 if (sel.index * 8 >=3D limit) { @@ -103,8 +103,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_de= sc, int gate) { - target_ulong base =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE); - uint32_t limit =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT); + target_ulong base =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE); + uint32_t limit =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT); =20 memset(idt_desc, 0, sizeof(*idt_desc)); if (gate * 8 >=3D limit) { @@ -118,7 +118,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x8= 6_call_gate *idt_desc, =20 bool x86_is_protected(struct CPUState *cpu) { - uint64_t cr0 =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); + uint64_t cr0 =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); return cr0 & CR0_PE; } =20 @@ -136,7 +136,7 @@ bool x86_is_v8086(struct CPUState *cpu) =20 bool x86_is_long_mode(struct CPUState *cpu) { - return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; + return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; } =20 bool x86_is_long64_mode(struct CPUState *cpu) @@ -149,13 +149,13 @@ bool x86_is_long64_mode(struct CPUState *cpu) =20 bool x86_is_paging_mode(struct CPUState *cpu) { - uint64_t cr0 =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); + uint64_t cr0 =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); return cr0 & CR0_PG; } =20 bool x86_is_pae_enabled(struct CPUState *cpu) { - uint64_t cr4 =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4); + uint64_t cr4 =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); return cr4 & CR4_PAE; } =20 diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index 9f539e73f6..af15c06ac5 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -48,47 +48,47 @@ static const struct vmx_segment_field { =20 uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit); + return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); } =20 uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes); + return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); } =20 uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg) { - return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base); + return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); } =20 x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) { x68_segment_selector sel; - sel.sel =3D rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector); + sel.sel =3D rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); return sel; } =20 void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector= selector, X86Seg seg) { - wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel); + wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel); } =20 void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment = *desc, X86Seg seg) { - desc->sel =3D rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector); - desc->base =3D rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base); - desc->limit =3D rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit); - desc->ar =3D rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes); + desc->sel =3D rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); + desc->base =3D rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); + desc->limit =3D rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); + desc->ar =3D rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); } =20 void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,= X86Seg seg) { const struct vmx_segment_field *sf =3D &vmx_segment_fields[seg]; =20 - wvmcs(cpu->hvf_fd, sf->base, desc->base); - wvmcs(cpu->hvf_fd, sf->limit, desc->limit); - wvmcs(cpu->hvf_fd, sf->selector, desc->sel); - wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar); + wvmcs(cpu->hvf->fd, sf->base, desc->base); + wvmcs(cpu->hvf->fd, sf->limit, desc->limit); + wvmcs(cpu->hvf->fd, sf->selector, desc->sel); + wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar); } =20 void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selec= tor selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_= desc) diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index e52c39ddb1..7c8203b21f 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -674,7 +674,7 @@ void simulate_rdmsr(struct CPUState *cpu) =20 switch (msr) { case MSR_IA32_TSC: - val =3D rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET); + val =3D rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET); break; case MSR_IA32_APICBASE: val =3D cpu_get_apic_base(X86_CPU(cpu)->apic_state); @@ -683,16 +683,16 @@ void simulate_rdmsr(struct CPUState *cpu) val =3D x86_cpu->ucode_rev; break; case MSR_EFER: - val =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER); + val =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER); break; case MSR_FSBASE: - val =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE); + val =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE); break; case MSR_GSBASE: - val =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE); + val =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE); break; case MSR_KERNELGSBASE: - val =3D rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE); + val =3D rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE); break; case MSR_STAR: abort(); @@ -780,13 +780,13 @@ void simulate_wrmsr(struct CPUState *cpu) cpu_set_apic_base(X86_CPU(cpu)->apic_state, data); break; case MSR_FSBASE: - wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data); + wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data); break; case MSR_GSBASE: - wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data); + wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data); break; case MSR_KERNELGSBASE: - wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data); + wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data); break; case MSR_STAR: abort(); @@ -799,9 +799,9 @@ void simulate_wrmsr(struct CPUState *cpu) break; case MSR_EFER: /*printf("new efer %llx\n", EFER(cpu));*/ - wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data); + wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data); if (data & MSR_EFER_NXE) { - hv_vcpu_invalidate_tlb(cpu->hvf_fd); + hv_vcpu_invalidate_tlb(cpu->hvf->fd); } break; case MSR_MTRRphysBase(0): @@ -1425,21 +1425,21 @@ void load_regs(struct CPUState *cpu) CPUX86State *env =3D &x86_cpu->env; =20 int i =3D 0; - RRX(env, R_EAX) =3D rreg(cpu->hvf_fd, HV_X86_RAX); - RRX(env, R_EBX) =3D rreg(cpu->hvf_fd, HV_X86_RBX); - RRX(env, R_ECX) =3D rreg(cpu->hvf_fd, HV_X86_RCX); - RRX(env, R_EDX) =3D rreg(cpu->hvf_fd, HV_X86_RDX); - RRX(env, R_ESI) =3D rreg(cpu->hvf_fd, HV_X86_RSI); - RRX(env, R_EDI) =3D rreg(cpu->hvf_fd, HV_X86_RDI); - RRX(env, R_ESP) =3D rreg(cpu->hvf_fd, HV_X86_RSP); - RRX(env, R_EBP) =3D rreg(cpu->hvf_fd, HV_X86_RBP); + RRX(env, R_EAX) =3D rreg(cpu->hvf->fd, HV_X86_RAX); + RRX(env, R_EBX) =3D rreg(cpu->hvf->fd, HV_X86_RBX); + RRX(env, R_ECX) =3D rreg(cpu->hvf->fd, HV_X86_RCX); + RRX(env, R_EDX) =3D rreg(cpu->hvf->fd, HV_X86_RDX); + RRX(env, R_ESI) =3D rreg(cpu->hvf->fd, HV_X86_RSI); + RRX(env, R_EDI) =3D rreg(cpu->hvf->fd, HV_X86_RDI); + RRX(env, R_ESP) =3D rreg(cpu->hvf->fd, HV_X86_RSP); + RRX(env, R_EBP) =3D rreg(cpu->hvf->fd, HV_X86_RBP); for (i =3D 8; i < 16; i++) { - RRX(env, i) =3D rreg(cpu->hvf_fd, HV_X86_RAX + i); + RRX(env, i) =3D rreg(cpu->hvf->fd, HV_X86_RAX + i); } =20 - env->eflags =3D rreg(cpu->hvf_fd, HV_X86_RFLAGS); + env->eflags =3D rreg(cpu->hvf->fd, HV_X86_RFLAGS); rflags_to_lflags(env); - env->eip =3D rreg(cpu->hvf_fd, HV_X86_RIP); + env->eip =3D rreg(cpu->hvf->fd, HV_X86_RIP); } =20 void store_regs(struct CPUState *cpu) @@ -1448,20 +1448,20 @@ void store_regs(struct CPUState *cpu) CPUX86State *env =3D &x86_cpu->env; =20 int i =3D 0; - wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env)); - wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env)); - wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env)); - wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env)); - wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env)); - wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env)); - wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env)); - wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env)); + wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env)); + wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env)); + wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env)); + wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env)); + wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env)); + wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env)); + wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env)); + wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env)); for (i =3D 8; i < 16; i++) { - wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i)); + wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i)); } =20 lflags_to_rflags(env); - wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags); + wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags); macvm_set_rip(cpu, env->eip); } =20 diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 78fff04684..e9ed0f5aa1 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -127,7 +127,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct = gpt_translation *pt, pt->err_code |=3D MMU_PAGE_PT; } =20 - uint32_t cr0 =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); + uint32_t cr0 =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); /* check protection */ if (cr0 & CR0_WP) { if (pt->write_access && !pte_write_access(pte)) { @@ -172,7 +172,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong= addr, int err_code, { int top_level, level; bool is_large =3D false; - target_ulong cr3 =3D rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3); + target_ulong cr3 =3D rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3); uint64_t page_mask =3D pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; =20 memset(pt, 0, sizeof(*pt)); diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index d66dfd7669..422156128b 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -62,7 +62,7 @@ static void load_state_from_tss32(CPUState *cpu, struct x= 86_tss_segment32 *tss) X86CPU *x86_cpu =3D X86_CPU(cpu); CPUX86State *env =3D &x86_cpu->env; =20 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3); + wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3); =20 env->eip =3D tss->eip; env->eflags =3D tss->eflags | 2; @@ -111,11 +111,11 @@ static int task_switch_32(CPUState *cpu, x68_segment_= selector tss_sel, x68_segme =20 void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, i= nt reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { - uint64_t rip =3D rreg(cpu->hvf_fd, HV_X86_RIP); + uint64_t rip =3D rreg(cpu->hvf->fd, HV_X86_RIP); if (!gate_valid || (gate_type !=3D VMCS_INTR_T_HWEXCEPTION && gate_type !=3D VMCS_INTR_T_HWINTR && gate_type !=3D VMCS_INTR_T_NMI)) { - int ins_len =3D rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); + int ins_len =3D rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH); macvm_set_rip(cpu, rip + ins_len); return; } @@ -174,12 +174,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segmen= t_selector tss_sel, int rea //ret =3D task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, = &next_tss_desc); VM_PANIC("task_switch_16"); =20 - macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS= ); + macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_= TS); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); =20 store_regs(cpu); =20 - hv_vcpu_invalidate_tlb(cpu->hvf_fd); - hv_vcpu_flush(cpu->hvf_fd); + hv_vcpu_invalidate_tlb(cpu->hvf->fd); + hv_vcpu_flush(cpu->hvf->fd); } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index cc381307ab..28cfee4f60 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -80,7 +80,7 @@ void hvf_put_xsave(CPUState *cpu_state) =20 x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave); =20 - if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { + if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) { abort(); } } @@ -90,19 +90,19 @@ void hvf_put_segments(CPUState *cpu_state) CPUX86State *env =3D &X86_CPU(cpu_state)->env; struct vmx_segment seg; =20 - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base); + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); =20 - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); =20 - /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */ - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]); + /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); vmx_update_tpr(cpu_state); - wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer); + wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); =20 - macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]); - macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]); + macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]); + macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]); =20 hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); vmx_write_segment_descriptor(cpu_state, &seg, R_CS); @@ -128,31 +128,31 @@ void hvf_put_segments(CPUState *cpu_state) hvf_set_segment(cpu_state, &seg, &env->ldt, false); vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); =20 - hv_vcpu_flush(cpu_state->hvf_fd); + hv_vcpu_flush(cpu_state->hvf->fd); } =20 void hvf_put_msrs(CPUState *cpu_state) { CPUX86State *env =3D &X86_CPU(cpu_state)->env; =20 - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); =20 - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star); =20 #ifdef TARGET_X86_64 - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsba= se); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsb= ase); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar); #endif =20 - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base); - hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base= ); + hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base= ); } =20 =20 @@ -162,7 +162,7 @@ void hvf_get_xsave(CPUState *cpu_state) =20 xsave =3D X86_CPU(cpu_state)->env.xsave_buf; =20 - if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { + if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) { abort(); } =20 @@ -201,17 +201,17 @@ void hvf_get_segments(CPUState *cpu_state) vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); hvf_get_segment(&env->ldt, &seg); =20 - env->idt.limit =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT); - env->idt.base =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE); - env->gdt.limit =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT); - env->gdt.base =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE); + env->idt.limit =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT); + env->idt.base =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE); + env->gdt.limit =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + env->gdt.base =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE); =20 - env->cr[0] =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0); + env->cr[0] =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0); env->cr[2] =3D 0; - env->cr[3] =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3); - env->cr[4] =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4); + env->cr[3] =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3); + env->cr[4] =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4); =20 - env->efer =3D rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER); + env->efer =3D rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER); } =20 void hvf_get_msrs(CPUState *cpu_state) @@ -219,27 +219,27 @@ void hvf_get_msrs(CPUState *cpu_state) CPUX86State *env =3D &X86_CPU(cpu_state)->env; uint64_t tmp; =20 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); env->sysenter_cs =3D tmp; =20 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); env->sysenter_esp =3D tmp; =20 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); env->sysenter_eip =3D tmp; =20 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star); =20 #ifdef TARGET_X86_64 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar); - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsba= se); - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask); - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsb= ase); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar); #endif =20 - hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp); + hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp); =20 - env->tsc =3D rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET); + env->tsc =3D rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET); } =20 int hvf_put_registers(CPUState *cpu_state) @@ -247,26 +247,26 @@ int hvf_put_registers(CPUState *cpu_state) X86CPU *x86cpu =3D X86_CPU(cpu_state); CPUX86State *env =3D &x86cpu->env; =20 - wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]); - wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]); - wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]); - wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]); - wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]); - wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]); - wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]); - wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]); - wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]); - wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]); - wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]); - wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]); - wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]); - wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]); - wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]); - wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]); - wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags); - wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip); + wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); + wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); + wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); + wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); + wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); + wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); + wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); + wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); + wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]); + wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]); + wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]); + wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]); + wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]); + wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]); + wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]); + wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]); + wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags); + wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip); =20 - wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0); + wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0); =20 hvf_put_xsave(cpu_state); =20 @@ -274,14 +274,14 @@ int hvf_put_registers(CPUState *cpu_state) =20 hvf_put_msrs(cpu_state); =20 - wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]); - wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]); - wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]); - wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]); - wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]); - wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]); - wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]); - wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]); + wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]); + wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]); + wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]); + wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]); + wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]); + wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]); + wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]); + wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]); =20 return 0; } @@ -291,40 +291,40 @@ int hvf_get_registers(CPUState *cpu_state) X86CPU *x86cpu =3D X86_CPU(cpu_state); CPUX86State *env =3D &x86cpu->env; =20 - env->regs[R_EAX] =3D rreg(cpu_state->hvf_fd, HV_X86_RAX); - env->regs[R_EBX] =3D rreg(cpu_state->hvf_fd, HV_X86_RBX); - env->regs[R_ECX] =3D rreg(cpu_state->hvf_fd, HV_X86_RCX); - env->regs[R_EDX] =3D rreg(cpu_state->hvf_fd, HV_X86_RDX); - env->regs[R_EBP] =3D rreg(cpu_state->hvf_fd, HV_X86_RBP); - env->regs[R_ESP] =3D rreg(cpu_state->hvf_fd, HV_X86_RSP); - env->regs[R_ESI] =3D rreg(cpu_state->hvf_fd, HV_X86_RSI); - env->regs[R_EDI] =3D rreg(cpu_state->hvf_fd, HV_X86_RDI); - env->regs[8] =3D rreg(cpu_state->hvf_fd, HV_X86_R8); - env->regs[9] =3D rreg(cpu_state->hvf_fd, HV_X86_R9); - env->regs[10] =3D rreg(cpu_state->hvf_fd, HV_X86_R10); - env->regs[11] =3D rreg(cpu_state->hvf_fd, HV_X86_R11); - env->regs[12] =3D rreg(cpu_state->hvf_fd, HV_X86_R12); - env->regs[13] =3D rreg(cpu_state->hvf_fd, HV_X86_R13); - env->regs[14] =3D rreg(cpu_state->hvf_fd, HV_X86_R14); - env->regs[15] =3D rreg(cpu_state->hvf_fd, HV_X86_R15); + env->regs[R_EAX] =3D rreg(cpu_state->hvf->fd, HV_X86_RAX); + env->regs[R_EBX] =3D rreg(cpu_state->hvf->fd, HV_X86_RBX); + env->regs[R_ECX] =3D rreg(cpu_state->hvf->fd, HV_X86_RCX); + env->regs[R_EDX] =3D rreg(cpu_state->hvf->fd, HV_X86_RDX); + env->regs[R_EBP] =3D rreg(cpu_state->hvf->fd, HV_X86_RBP); + env->regs[R_ESP] =3D rreg(cpu_state->hvf->fd, HV_X86_RSP); + env->regs[R_ESI] =3D rreg(cpu_state->hvf->fd, HV_X86_RSI); + env->regs[R_EDI] =3D rreg(cpu_state->hvf->fd, HV_X86_RDI); + env->regs[8] =3D rreg(cpu_state->hvf->fd, HV_X86_R8); + env->regs[9] =3D rreg(cpu_state->hvf->fd, HV_X86_R9); + env->regs[10] =3D rreg(cpu_state->hvf->fd, HV_X86_R10); + env->regs[11] =3D rreg(cpu_state->hvf->fd, HV_X86_R11); + env->regs[12] =3D rreg(cpu_state->hvf->fd, HV_X86_R12); + env->regs[13] =3D rreg(cpu_state->hvf->fd, HV_X86_R13); + env->regs[14] =3D rreg(cpu_state->hvf->fd, HV_X86_R14); + env->regs[15] =3D rreg(cpu_state->hvf->fd, HV_X86_R15); =20 - env->eflags =3D rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); - env->eip =3D rreg(cpu_state->hvf_fd, HV_X86_RIP); + env->eflags =3D rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); + env->eip =3D rreg(cpu_state->hvf->fd, HV_X86_RIP); =20 hvf_get_xsave(cpu_state); - env->xcr0 =3D rreg(cpu_state->hvf_fd, HV_X86_XCR0); + env->xcr0 =3D rreg(cpu_state->hvf->fd, HV_X86_XCR0); =20 hvf_get_segments(cpu_state); hvf_get_msrs(cpu_state); =20 - env->dr[0] =3D rreg(cpu_state->hvf_fd, HV_X86_DR0); - env->dr[1] =3D rreg(cpu_state->hvf_fd, HV_X86_DR1); - env->dr[2] =3D rreg(cpu_state->hvf_fd, HV_X86_DR2); - env->dr[3] =3D rreg(cpu_state->hvf_fd, HV_X86_DR3); - env->dr[4] =3D rreg(cpu_state->hvf_fd, HV_X86_DR4); - env->dr[5] =3D rreg(cpu_state->hvf_fd, HV_X86_DR5); - env->dr[6] =3D rreg(cpu_state->hvf_fd, HV_X86_DR6); - env->dr[7] =3D rreg(cpu_state->hvf_fd, HV_X86_DR7); + env->dr[0] =3D rreg(cpu_state->hvf->fd, HV_X86_DR0); + env->dr[1] =3D rreg(cpu_state->hvf->fd, HV_X86_DR1); + env->dr[2] =3D rreg(cpu_state->hvf->fd, HV_X86_DR2); + env->dr[3] =3D rreg(cpu_state->hvf->fd, HV_X86_DR3); + env->dr[4] =3D rreg(cpu_state->hvf->fd, HV_X86_DR4); + env->dr[5] =3D rreg(cpu_state->hvf->fd, HV_X86_DR5); + env->dr[6] =3D rreg(cpu_state->hvf->fd, HV_X86_DR6); + env->dr[7] =3D rreg(cpu_state->hvf->fd, HV_X86_DR7); =20 x86_update_hflags(env); return 0; @@ -333,16 +333,16 @@ int hvf_get_registers(CPUState *cpu_state) static void vmx_set_int_window_exiting(CPUState *cpu) { uint64_t val; - val =3D rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | + val =3D rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } =20 void vmx_clear_int_window_exiting(CPUState *cpu) { uint64_t val; - val =3D rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & + val =3D rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } =20 @@ -378,7 +378,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state) uint64_t info =3D 0; if (have_event) { info =3D vector | intr_type | VMCS_INTR_VALID; - uint64_t reason =3D rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON); + uint64_t reason =3D rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON); if (env->nmi_injected && reason !=3D EXIT_REASON_TASK_SWITCH) { vmx_clear_nmi_blocking(cpu_state); } @@ -387,17 +387,17 @@ bool hvf_inject_interrupts(CPUState *cpu_state) info &=3D ~(1 << 12); /* clear undefined bit */ if (intr_type =3D=3D VMCS_INTR_T_SWINTR || intr_type =3D=3D VMCS_INTR_T_SWEXCEPTION) { - wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_= len); + wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins= _len); } =20 if (env->has_error_code) { - wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, + wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, env->error_code); /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ info |=3D VMCS_INTR_DEL_ERRCODE; } /*printf("reinject %lx err %d\n", info, err);*/ - wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); }; } =20 @@ -405,7 +405,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state) if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { cpu_state->interrupt_request &=3D ~CPU_INTERRUPT_NMI; info =3D VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; - wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); } else { vmx_set_nmi_window_exiting(cpu_state); } @@ -417,7 +417,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state) int line =3D cpu_get_pic_interrupt(&x86cpu->env); cpu_state->interrupt_request &=3D ~CPU_INTERRUPT_HARD; if (line >=3D 0) { - wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | + wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } @@ -433,7 +433,7 @@ int hvf_process_events(CPUState *cpu_state) X86CPU *cpu =3D X86_CPU(cpu_state); CPUX86State *env =3D &cpu->env; =20 - env->eflags =3D rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); + env->eflags =3D rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); =20 if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { cpu_synchronize_state(cpu_state); --=20 2.30.1 (Apple Git-130)