From nobody Mon Feb 9 05:45:17 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail(p=none dis=none) header.from=arm.com Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1669911027402411.5334425916906; Thu, 1 Dec 2022 08:10:27 -0800 (PST) Received: from list by lists.xenproject.org with outflank-mailman.450966.708516 (Exim 4.92) (envelope-from ) id 1p0m8A-0005fb-J9; Thu, 01 Dec 2022 16:09:46 +0000 Received: by outflank-mailman (output) from mailman id 450966.708516; Thu, 01 Dec 2022 16:09:46 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1p0m8A-0005fU-Fz; Thu, 01 Dec 2022 16:09:46 +0000 Received: by outflank-mailman (input) for mailman id 450966; Thu, 01 Dec 2022 16:09:44 +0000 Received: from se1-gles-flk1-in.inumbo.com ([94.247.172.50] helo=se1-gles-flk1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1p0m88-0005Wl-ID for xen-devel@lists.xenproject.org; Thu, 01 Dec 2022 16:09:44 +0000 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by se1-gles-flk1.inumbo.com (Halon) with ESMTP id 912c8996-7192-11ed-8fd2-01056ac49cbb; Thu, 01 Dec 2022 17:09:43 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id A5B3DED1; Thu, 1 Dec 2022 08:09:49 -0800 (PST) Received: from e109506.cambridge.arm.com (e109506.cambridge.arm.com [10.1.199.62]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 433683F73B; Thu, 1 Dec 2022 08:09:42 -0800 (PST) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: 912c8996-7192-11ed-8fd2-01056ac49cbb From: Rahul Singh To: xen-devel@lists.xenproject.org Cc: Stefano Stabellini , Julien Grall , Bertrand Marquis , Volodymyr Babchuk Subject: [RFC PATCH 09/21] xen/arm: vsmmuv3: Add support for cmdqueue handling Date: Thu, 1 Dec 2022 16:02:33 +0000 Message-Id: <6976a8484515fe02e9c2bd65cfb6a93632a228eb.1669888522.git.rahul.singh@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZM-MESSAGEID: 1669911028783100001 Content-Type: text/plain; charset="utf-8" Add support for virtual cmdqueue handling for guests Signed-off-by: Rahul Singh --- xen/drivers/passthrough/arm/vsmmu-v3.c | 101 +++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c b/xen/drivers/passthrou= gh/arm/vsmmu-v3.c index c3f99657e6..cc651a2dc8 100644 --- a/xen/drivers/passthrough/arm/vsmmu-v3.c +++ b/xen/drivers/passthrough/arm/vsmmu-v3.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ =20 +#include #include #include #include @@ -24,6 +25,26 @@ /* Struct to hold the vIOMMU ops and vIOMMU type */ extern const struct viommu_desc __read_mostly *cur_viommu; =20 +/* SMMUv3 command definitions */ +#define CMDQ_OP_PREFETCH_CFG 0x1 +#define CMDQ_OP_CFGI_STE 0x3 +#define CMDQ_OP_CFGI_ALL 0x4 +#define CMDQ_OP_CFGI_CD 0x5 +#define CMDQ_OP_CFGI_CD_ALL 0x6 +#define CMDQ_OP_TLBI_NH_ASID 0x11 +#define CMDQ_OP_TLBI_NH_VA 0x12 +#define CMDQ_OP_TLBI_NSNH_ALL 0x30 +#define CMDQ_OP_CMD_SYNC 0x46 + +/* Queue Handling */ +#define Q_BASE(q) ((q)->q_base & Q_BASE_ADDR_MASK) +#define Q_CONS_ENT(q) (Q_BASE(q) + Q_IDX(q, (q)->cons) * (q)->ent_size) +#define Q_PROD_ENT(q) (Q_BASE(q) + Q_IDX(q, (q)->prod) * (q)->ent_size) + +/* Helper Macros */ +#define smmu_get_cmdq_enabled(x) FIELD_GET(CR0_CMDQEN, x) +#define smmu_cmd_get_command(x) FIELD_GET(CMDQ_0_OP, x) + /* virtual smmu queue */ struct arm_vsmmu_queue { uint64_t q_base; /* base register */ @@ -48,8 +69,80 @@ struct virt_smmu { uint64_t gerror_irq_cfg0; uint64_t evtq_irq_cfg0; struct arm_vsmmu_queue evtq, cmdq; + spinlock_t cmd_queue_lock; }; =20 +/* Queue manipulation functions */ +static bool queue_empty(struct arm_vsmmu_queue *q) +{ + return Q_IDX(q, q->prod) =3D=3D Q_IDX(q, q->cons) && + Q_WRP(q, q->prod) =3D=3D Q_WRP(q, q->cons); +} + +static void queue_inc_cons(struct arm_vsmmu_queue *q) +{ + uint32_t cons =3D (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + q->cons =3D Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); +} + +static void dump_smmu_command(uint64_t *command) +{ + gdprintk(XENLOG_ERR, "cmd 0x%02llx: %016lx %016lx\n", + smmu_cmd_get_command(command[0]), command[0], command[1]); +} +static int arm_vsmmu_handle_cmds(struct virt_smmu *smmu) +{ + struct arm_vsmmu_queue *q =3D &smmu->cmdq; + struct domain *d =3D smmu->d; + uint64_t command[CMDQ_ENT_DWORDS]; + paddr_t addr; + + if ( !smmu_get_cmdq_enabled(smmu->cr[0]) ) + return 0; + + while ( !queue_empty(q) ) + { + int ret; + + addr =3D Q_CONS_ENT(q); + ret =3D access_guest_memory_by_ipa(d, addr, command, + sizeof(command), false); + if ( ret ) + return ret; + + switch ( smmu_cmd_get_command(command[0]) ) + { + case CMDQ_OP_CFGI_STE: + break; + case CMDQ_OP_PREFETCH_CFG: + case CMDQ_OP_CFGI_CD: + case CMDQ_OP_CFGI_CD_ALL: + case CMDQ_OP_CFGI_ALL: + case CMDQ_OP_CMD_SYNC: + break; + case CMDQ_OP_TLBI_NH_ASID: + case CMDQ_OP_TLBI_NSNH_ALL: + case CMDQ_OP_TLBI_NH_VA: + if ( !iommu_iotlb_flush_all(smmu->d, 1) ) + break; + default: + gdprintk(XENLOG_ERR, "vSMMUv3: unhandled command\n"); + dump_smmu_command(command); + break; + } + + if ( ret ) + { + gdprintk(XENLOG_ERR, + "vSMMUv3: command error %d while handling command\n", + ret); + dump_smmu_command(command); + } + queue_inc_cons(q); + } + return 0; +} + static int vsmmuv3_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, void *priv) { @@ -103,9 +196,15 @@ static int vsmmuv3_mmio_write(struct vcpu *v, mmio_inf= o_t *info, break; =20 case VREG32(ARM_SMMU_CMDQ_PROD): + spin_lock(&smmu->cmd_queue_lock); reg32 =3D smmu->cmdq.prod; vreg_reg32_update(®32, r, info); smmu->cmdq.prod =3D reg32; + + if ( arm_vsmmu_handle_cmds(smmu) ) + gdprintk(XENLOG_ERR, "error handling vSMMUv3 commands\n"); + + spin_unlock(&smmu->cmd_queue_lock); break; =20 case VREG32(ARM_SMMU_CMDQ_CONS): @@ -321,6 +420,8 @@ static int vsmmuv3_init_single(struct domain *d, paddr_= t addr, paddr_t size) smmu->evtq.q_base =3D FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS); smmu->evtq.ent_size =3D EVTQ_ENT_DWORDS * DWORDS_BYTES; =20 + spin_lock_init(&smmu->cmd_queue_lock); + register_mmio_handler(d, &vsmmuv3_mmio_handler, addr, size, smmu); =20 /* Register the vIOMMU to be able to clean it up later. */ --=20 2.25.1