From nobody Mon Feb 9 12:44:12 2026 Delivered-To: importer@patchew.org Received-SPF: none (zohomail.com: 192.237.175.120 is neither permitted nor denied by domain of lists.xenproject.org) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; spf=none (zohomail.com: 192.237.175.120 is neither permitted nor denied by domain of lists.xenproject.org) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail(p=none dis=none) header.from=intel.com ARC-Seal: i=1; a=rsa-sha256; t=1578503722; cv=none; d=zohomail.com; s=zohoarc; b=lcMaXRwDtVWP2ypD2sIELN7TMdmRJ7yJ/LtQqAXU3sQYG46E9ekDmphnv5ZhVuxjcuEAVCtYmy/FyAn8ET4mqub2l1siJLv671LYjehRIyylLQITCsQbexeueaURoJuxc+XOLg17kVWIPPqhaCm665XXwIogvdz8mog4c+Mqgy8= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1578503722; h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To; bh=8m0SqPsBC1iLoadcpf4MM9d/QnDoM9aVkXKU57sP5Dw=; b=eVNGIeCz6h4818YDUn+pUrBfmusS7IH8NcaWWDA+rt5QW0JdNcVvnKJyXn8np7w/zgTx1P9o+DKymgQ+h6MVP+g54l6x9n1gmCJpurEVUJO8FuzchDMo1yS8VPwQymqNwOBk9NE+fv3vklNUWbBTZkL1Q4S6DaDTUogyBjk8bao= ARC-Authentication-Results: i=1; mx.zohomail.com; spf=none (zohomail.com: 192.237.175.120 is neither permitted nor denied by domain of lists.xenproject.org) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail header.from= (p=none dis=none) header.from= Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1578503722441610.6038005785947; Wed, 8 Jan 2020 09:15:22 -0800 (PST) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1ipEum-0000TO-8K; Wed, 08 Jan 2020 17:14:40 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1ipEuk-0000T4-Q2 for xen-devel@lists.xenproject.org; Wed, 08 Jan 2020 17:14:38 +0000 Received: from mga14.intel.com (unknown [192.55.52.115]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 5603e6bd-323a-11ea-b85f-12813bfff9fa; Wed, 08 Jan 2020 17:14:33 +0000 (UTC) Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 08 Jan 2020 09:14:31 -0800 Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.251.132.23]) by orsmga005.jf.intel.com with ESMTP; 08 Jan 2020 09:14:31 -0800 X-Inumbo-ID: 5603e6bd-323a-11ea-b85f-12813bfff9fa X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,410,1571727600"; d="scan'208";a="395806065" From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 8 Jan 2020 09:13:58 -0800 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [PATCH v4 01/18] x86/hvm: introduce hvm_copy_context_and_params X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Tamas K Lengyel , Wei Liu , Jan Beulich , =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Currently the hvm parameters are only accessible via the HVMOP hypercalls. = In this patch we introduce a new function that can copy both the hvm context a= nd parameters directly into a target domain. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 241 +++++++++++++++++++++------------- xen/include/asm-x86/hvm/hvm.h | 2 + 2 files changed, 152 insertions(+), 91 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 4723f5d09c..24f08d7043 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4067,16 +4067,17 @@ static int hvmop_set_evtchn_upcall_vector( } =20 static int hvm_allow_set_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index, + uint64_t new_value) { - uint64_t value =3D d->arch.hvm.params[a->index]; + uint64_t value =3D d->arch.hvm.params[index]; int rc; =20 rc =3D xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param); if ( rc ) return rc; =20 - switch ( a->index ) + switch ( index ) { /* The following parameters can be set by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4109,7 +4110,7 @@ static int hvm_allow_set_param(struct domain *d, if ( rc ) return rc; =20 - switch ( a->index ) + switch ( index ) { /* The following parameters should only be changed once. */ case HVM_PARAM_VIRIDIAN: @@ -4119,7 +4120,7 @@ static int hvm_allow_set_param(struct domain *d, case HVM_PARAM_NR_IOREQ_SERVER_PAGES: case HVM_PARAM_ALTP2M: case HVM_PARAM_MCA_CAP: - if ( value !=3D 0 && a->value !=3D value ) + if ( value !=3D 0 && new_value !=3D value ) rc =3D -EEXIST; break; default: @@ -4129,49 +4130,32 @@ static int hvm_allow_set_param(struct domain *d, return rc; } =20 -static int hvmop_set_param( - XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) +static int hvm_set_param(struct domain *d, uint32_t index, uint64_t value) { struct domain *curr_d =3D current->domain; - struct xen_hvm_param a; - struct domain *d; - struct vcpu *v; int rc; + struct vcpu *v; =20 - if ( copy_from_guest(&a, arg, 1) ) - return -EFAULT; - - if ( a.index >=3D HVM_NR_PARAMS ) + if ( index >=3D HVM_NR_PARAMS ) return -EINVAL; =20 - /* Make sure the above bound check is not bypassed during speculation.= */ - block_speculation(); - - d =3D rcu_lock_domain_by_any_id(a.domid); - if ( d =3D=3D NULL ) - return -ESRCH; - - rc =3D -EINVAL; - if ( !is_hvm_domain(d) ) - goto out; - - rc =3D hvm_allow_set_param(d, &a); + rc =3D hvm_allow_set_param(d, index, value); if ( rc ) goto out; =20 - switch ( a.index ) + switch ( index ) { case HVM_PARAM_CALLBACK_IRQ: - hvm_set_callback_via(d, a.value); + hvm_set_callback_via(d, value); hvm_latch_shinfo_size(d); break; case HVM_PARAM_TIMER_MODE: - if ( a.value > HVMPTM_one_missed_tick_pending ) + if ( value > HVMPTM_one_missed_tick_pending ) rc =3D -EINVAL; break; case HVM_PARAM_VIRIDIAN: - if ( (a.value & ~HVMPV_feature_mask) || - !(a.value & HVMPV_base_freq) ) + if ( (value & ~HVMPV_feature_mask) || + !(value & HVMPV_base_freq) ) rc =3D -EINVAL; break; case HVM_PARAM_IDENT_PT: @@ -4181,7 +4165,7 @@ static int hvmop_set_param( */ if ( !paging_mode_hap(d) || !cpu_has_vmx ) { - d->arch.hvm.params[a.index] =3D a.value; + d->arch.hvm.params[index] =3D value; break; } =20 @@ -4196,7 +4180,7 @@ static int hvmop_set_param( =20 rc =3D 0; domain_pause(d); - d->arch.hvm.params[a.index] =3D a.value; + d->arch.hvm.params[index] =3D value; for_each_vcpu ( d, v ) paging_update_cr3(v, false); domain_unpause(d); @@ -4205,23 +4189,23 @@ static int hvmop_set_param( break; case HVM_PARAM_DM_DOMAIN: /* The only value this should ever be set to is DOMID_SELF */ - if ( a.value !=3D DOMID_SELF ) + if ( value !=3D DOMID_SELF ) rc =3D -EINVAL; =20 - a.value =3D curr_d->domain_id; + value =3D curr_d->domain_id; break; case HVM_PARAM_ACPI_S_STATE: rc =3D 0; - if ( a.value =3D=3D 3 ) + if ( value =3D=3D 3 ) hvm_s3_suspend(d); - else if ( a.value =3D=3D 0 ) + else if ( value =3D=3D 0 ) hvm_s3_resume(d); else rc =3D -EINVAL; =20 break; case HVM_PARAM_ACPI_IOPORTS_LOCATION: - rc =3D pmtimer_change_ioport(d, a.value); + rc =3D pmtimer_change_ioport(d, value); break; case HVM_PARAM_MEMORY_EVENT_CR0: case HVM_PARAM_MEMORY_EVENT_CR3: @@ -4236,24 +4220,24 @@ static int hvmop_set_param( rc =3D xsm_hvm_param_nested(XSM_PRIV, d); if ( rc ) break; - if ( a.value > 1 ) + if ( value > 1 ) rc =3D -EINVAL; /* * Remove the check below once we have * shadow-on-shadow. */ - if ( !paging_mode_hap(d) && a.value ) + if ( !paging_mode_hap(d) && value ) rc =3D -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_ALTP2M] ) rc =3D -EINVAL; /* Set up NHVM state for any vcpus that are already up. */ - if ( a.value && + if ( value && !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) for_each_vcpu(d, v) if ( rc =3D=3D 0 ) rc =3D nestedhvm_vcpu_initialise(v); - if ( !a.value || rc ) + if ( !value || rc ) for_each_vcpu(d, v) nestedhvm_vcpu_destroy(v); break; @@ -4261,30 +4245,30 @@ static int hvmop_set_param( rc =3D xsm_hvm_param_altp2mhvm(XSM_PRIV, d); if ( rc ) break; - if ( a.value > XEN_ALTP2M_limited ) + if ( value > XEN_ALTP2M_limited ) rc =3D -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) rc =3D -EINVAL; break; case HVM_PARAM_TRIPLE_FAULT_REASON: - if ( a.value > SHUTDOWN_MAX ) + if ( value > SHUTDOWN_MAX ) rc =3D -EINVAL; break; case HVM_PARAM_IOREQ_SERVER_PFN: - d->arch.hvm.ioreq_gfn.base =3D a.value; + d->arch.hvm.ioreq_gfn.base =3D value; break; case HVM_PARAM_NR_IOREQ_SERVER_PAGES: { unsigned int i; =20 - if ( a.value =3D=3D 0 || - a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) + if ( value =3D=3D 0 || + value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) { rc =3D -EINVAL; break; } - for ( i =3D 0; i < a.value; i++ ) + for ( i =3D 0; i < value; i++ ) set_bit(i, &d->arch.hvm.ioreq_gfn.mask); =20 break; @@ -4296,35 +4280,35 @@ static int hvmop_set_param( sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN > sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); - if ( a.value ) - set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask); + if ( value ) + set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask); break; =20 case HVM_PARAM_X87_FIP_WIDTH: - if ( a.value !=3D 0 && a.value !=3D 4 && a.value !=3D 8 ) + if ( value !=3D 0 && value !=3D 4 && value !=3D 8 ) { rc =3D -EINVAL; break; } - d->arch.x87_fip_width =3D a.value; + d->arch.x87_fip_width =3D value; break; =20 case HVM_PARAM_VM86_TSS: /* Hardware would silently truncate high bits. */ - if ( a.value !=3D (uint32_t)a.value ) + if ( value !=3D (uint32_t)value ) { if ( d =3D=3D curr_d ) domain_crash(d); rc =3D -EINVAL; } /* Old hvmloader binaries hardcode the size to 128 bytes. */ - if ( a.value ) - a.value |=3D (128ULL << 32) | VM86_TSS_UPDATED; - a.index =3D HVM_PARAM_VM86_TSS_SIZED; + if ( value ) + value |=3D (128ULL << 32) | VM86_TSS_UPDATED; + index =3D HVM_PARAM_VM86_TSS_SIZED; break; =20 case HVM_PARAM_VM86_TSS_SIZED: - if ( (a.value >> 32) < sizeof(struct tss32) ) + if ( (value >> 32) < sizeof(struct tss32) ) { if ( d =3D=3D curr_d ) domain_crash(d); @@ -4335,26 +4319,56 @@ static int hvmop_set_param( * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap * plus one padding byte). */ - if ( (a.value >> 32) > sizeof(struct tss32) + + if ( (value >> 32) > sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1 ) - a.value =3D (uint32_t)a.value | + value =3D (uint32_t)value | ((sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1) << 32); - a.value |=3D VM86_TSS_UPDATED; + value |=3D VM86_TSS_UPDATED; break; =20 case HVM_PARAM_MCA_CAP: - rc =3D vmce_enable_mca_cap(d, a.value); + rc =3D vmce_enable_mca_cap(d, value); break; } =20 if ( rc !=3D 0 ) goto out; =20 - d->arch.hvm.params[a.index] =3D a.value; + d->arch.hvm.params[index] =3D value; =20 HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u =3D %"PRIx64, - a.index, a.value); + index, value); + + out: + return rc; +} + +int hvmop_set_param( + XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) +{ + struct xen_hvm_param a; + struct domain *d; + int rc; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + if ( a.index >=3D HVM_NR_PARAMS ) + return -EINVAL; + + /* Make sure the above bound check is not bypassed during speculation.= */ + block_speculation(); + + d =3D rcu_lock_domain_by_any_id(a.domid); + if ( d =3D=3D NULL ) + return -ESRCH; + + rc =3D -EINVAL; + if ( !is_hvm_domain(d) ) + goto out; + + rc =3D hvm_set_param(d, a.index, a.value); =20 out: rcu_unlock_domain(d); @@ -4362,7 +4376,7 @@ static int hvmop_set_param( } =20 static int hvm_allow_get_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index) { int rc; =20 @@ -4370,7 +4384,7 @@ static int hvm_allow_get_param(struct domain *d, if ( rc ) return rc; =20 - switch ( a->index ) + switch ( index ) { /* The following parameters can be read by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4400,6 +4414,43 @@ static int hvm_allow_get_param(struct domain *d, return rc; } =20 +static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value) +{ + int rc; + + if ( index >=3D HVM_NR_PARAMS || !value ) + return -EINVAL; + + rc =3D hvm_allow_get_param(d, index); + if ( rc ) + return rc; + + switch ( index ) + { + case HVM_PARAM_ACPI_S_STATE: + *value =3D d->arch.hvm.is_s3_suspended ? 3 : 0; + break; + + case HVM_PARAM_VM86_TSS: + *value =3D (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; + break; + + case HVM_PARAM_VM86_TSS_SIZED: + *value =3D d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & + ~VM86_TSS_UPDATED; + break; + + case HVM_PARAM_X87_FIP_WIDTH: + *value =3D d->arch.x87_fip_width; + break; + default: + *value =3D d->arch.hvm.params[index]; + break; + } + + return 0; +}; + static int hvmop_get_param( XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) { @@ -4424,33 +4475,10 @@ static int hvmop_get_param( if ( !is_hvm_domain(d) ) goto out; =20 - rc =3D hvm_allow_get_param(d, &a); + rc =3D hvm_get_param(d, a.index, &a.value); if ( rc ) goto out; =20 - switch ( a.index ) - { - case HVM_PARAM_ACPI_S_STATE: - a.value =3D d->arch.hvm.is_s3_suspended ? 3 : 0; - break; - - case HVM_PARAM_VM86_TSS: - a.value =3D (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; - break; - - case HVM_PARAM_VM86_TSS_SIZED: - a.value =3D d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & - ~VM86_TSS_UPDATED; - break; - - case HVM_PARAM_X87_FIP_WIDTH: - a.value =3D d->arch.x87_fip_width; - break; - default: - a.value =3D d->arch.hvm.params[a.index]; - break; - } - rc =3D __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; =20 HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u =3D %"PRIx64, @@ -5266,6 +5294,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x= 86_segment seg, alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg); } =20 +int hvm_copy_context_and_params(struct domain *src, struct domain *dst) +{ + int rc, i; + struct hvm_domain_context c =3D { }; + + c.size =3D hvm_save_size(src); + if ( (c.data =3D xmalloc_bytes(c.size)) =3D=3D NULL ) + return -ENOMEM; + + for ( i =3D 0; i < HVM_NR_PARAMS; i++ ) + { + uint64_t value =3D 0; + + if ( hvm_get_param(src, i, &value) || !value ) + continue; + + if ( (rc =3D hvm_set_param(dst, i, value)) ) + goto out; + } + + if ( (rc =3D hvm_save(src, &c)) ) + goto out; + + c.cur =3D 0; + rc =3D hvm_load(dst, &c); + +out: + xfree(c.data); + return rc; +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 09793c12e9..6106b82c95 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -336,6 +336,8 @@ unsigned long hvm_cr4_guest_valid_bits(const struct dom= ain *d, bool restore); bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), void *ctxt); =20 +int hvm_copy_context_and_params(struct domain *src, struct domain *dst); + #ifdef CONFIG_HVM =20 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) --=20 2.20.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel