Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
We only issue one cmd at a time for now.
Support for batching of commands will be added later after analysing the
impact.
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 38 ++++++++++++++++++++++++++++++++++++++
hw/arm/smmuv3-accel.h | 8 ++++++++
hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
3 files changed, 76 insertions(+)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index f4e01fba6d..9ad8595ce2 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -218,6 +218,44 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
return true;
}
+/*
+ * This issues the invalidation cmd to the host SMMUv3.
+ * Note: sdev can be NULL for certain invalidation commands
+ * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
+ */
+bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
+ Error **errp)
+{
+ SMMUv3State *s = ARM_SMMUV3(bs);
+ SMMUv3AccelState *s_accel = s->s_accel;
+ IOMMUFDViommu *viommu_core;
+ uint32_t entry_num = 1;
+
+ if (!s->accel || !s_accel->viommu) {
+ return true;
+ }
+
+ /*
+ * We may end up here for any emulated PCI bridge or root port type devices.
+ * However, passing invalidation commands with sid (eg: CFGI_CD) to host
+ * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if
+ * sdev is valid.
+ */
+ if (sdev) {
+ SMMUv3AccelDevice *accel_dev = container_of(sdev, SMMUv3AccelDevice,
+ sdev);
+ if (!accel_dev->vdev) {
+ return true;
+ }
+ }
+
+ viommu_core = &s_accel->viommu->core;
+ return iommufd_backend_invalidate_cache(
+ viommu_core->iommufd, viommu_core->viommu_id,
+ IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
+ sizeof(Cmd), &entry_num, cmd, errp);
+}
+
static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus,
PCIBus *bus, int devfn)
{
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index 6242614c00..3bdba47616 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -46,6 +46,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
Error **errp);
bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
Error **errp);
+bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
+ Error **errp);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
{
@@ -62,6 +64,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
{
return true;
}
+static inline bool
+smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
+ Error **errp)
+{
+ return true;
+}
#endif
#endif /* HW_ARM_SMMUV3_ACCEL_H */
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 1fd8aaa0c7..3963bdc87f 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1381,6 +1381,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
{
uint32_t sid = CMD_SID(&cmd);
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
+ Error *local_err = NULL;
if (CMD_SSEC(&cmd)) {
cmd_error = SMMU_CERROR_ILL;
@@ -1393,11 +1394,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
trace_smmuv3_cmdq_cfgi_cd(sid);
smmuv3_flush_config(sdev);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
}
case SMMU_CMD_TLBI_NH_ASID:
{
int asid = CMD_ASID(&cmd);
+ Error *local_err = NULL;
int vmid = -1;
if (!STAGE1_SUPPORTED(s)) {
@@ -1416,6 +1423,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
}
case SMMU_CMD_TLBI_NH_ALL:
@@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
QEMU_FALLTHROUGH;
}
case SMMU_CMD_TLBI_NSNH_ALL:
+ {
+ Error *local_err = NULL;
+
trace_smmuv3_cmdq_tlbi_nsnh();
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_all(bs);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
+ }
case SMMU_CMD_TLBI_NH_VAA:
case SMMU_CMD_TLBI_NH_VA:
+ {
+ Error *local_err = NULL;
+
if (!STAGE1_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
+ }
case SMMU_CMD_TLBI_S12_VMALL:
{
int vmid = CMD_VMID(&cmd);
--
2.43.0
Hi Shameer,
On 9/29/25 3:36 PM, Shameer Kolothum wrote:
> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> We only issue one cmd at a time for now.
>
> Support for batching of commands will be added later after analysing the
> impact.
>
> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
> ---
> hw/arm/smmuv3-accel.c | 38 ++++++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3-accel.h | 8 ++++++++
> hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
> 3 files changed, 76 insertions(+)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index f4e01fba6d..9ad8595ce2 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -218,6 +218,44 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> return true;
> }
>
> +/*
> + * This issues the invalidation cmd to the host SMMUv3.
> + * Note: sdev can be NULL for certain invalidation commands
> + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
> + */
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + SMMUv3State *s = ARM_SMMUV3(bs);
> + SMMUv3AccelState *s_accel = s->s_accel;
> + IOMMUFDViommu *viommu_core;
> + uint32_t entry_num = 1;
> +
> + if (!s->accel || !s_accel->viommu) {
Can you explain/document why !s_accel->viommu is handled as no error?
> + return true;
> + }
> +
> + /*
> + * We may end up here for any emulated PCI bridge or root port type devices.
> + * However, passing invalidation commands with sid (eg: CFGI_CD) to host
> + * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if
> + * sdev is valid.
Only propagate errors to host if the SDEV matches a VFIO devices?
> + */
> + if (sdev) {
> + SMMUv3AccelDevice *accel_dev = container_of(sdev, SMMUv3AccelDevice,
> + sdev);
> + if (!accel_dev->vdev) {
> + return true;
> + }
> + }
> +
> + viommu_core = &s_accel->viommu->core;
> + return iommufd_backend_invalidate_cache(
> + viommu_core->iommufd, viommu_core->viommu_id,
> + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
> + sizeof(Cmd), &entry_num, cmd, errp);
what shall we do if iommufd_backend_invalidate_cache() succeeds with
output entry_num is different from onput one? In current case we have
entry_num = 1 so maybe this is not possible but we might leave a comment
at least?
> +}
> +
> static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus,
> PCIBus *bus, int devfn)
> {
> diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> index 6242614c00..3bdba47616 100644
> --- a/hw/arm/smmuv3-accel.h
> +++ b/hw/arm/smmuv3-accel.h
> @@ -46,6 +46,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
> Error **errp);
> bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> Error **errp);
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp);
> #else
> static inline void smmuv3_accel_init(SMMUv3State *s)
> {
> @@ -62,6 +64,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> {
> return true;
> }
> +static inline bool
> +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + return true;
> +}
> #endif
>
> #endif /* HW_ARM_SMMUV3_ACCEL_H */
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 1fd8aaa0c7..3963bdc87f 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1381,6 +1381,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> {
> uint32_t sid = CMD_SID(&cmd);
> SMMUDevice *sdev = smmu_find_sdev(bs, sid);
> + Error *local_err = NULL;
>
> if (CMD_SSEC(&cmd)) {
> cmd_error = SMMU_CERROR_ILL;
> @@ -1393,11 +1394,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>
> trace_smmuv3_cmdq_cfgi_cd(sid);
> smmuv3_flush_config(sdev);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ASID:
> {
> int asid = CMD_ASID(&cmd);
> + Error *local_err = NULL;
> int vmid = -1;
>
> if (!STAGE1_SUPPORTED(s)) {
> @@ -1416,6 +1423,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> trace_smmuv3_cmdq_tlbi_nh_asid(asid);
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ALL:
do we check somewhere that accel mode only applies to stage=1?
Thanks
Eric
> @@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> QEMU_FALLTHROUGH;
> }
> case SMMU_CMD_TLBI_NSNH_ALL:
> + {
> + Error *local_err = NULL;
> +
> trace_smmuv3_cmdq_tlbi_nsnh();
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_all(bs);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_NH_VAA:
> case SMMU_CMD_TLBI_NH_VA:
> + {
> + Error *local_err = NULL;
> +
> if (!STAGE1_SUPPORTED(s)) {
> cmd_error = SMMU_CERROR_ILL;
> break;
> }
> smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_S12_VMALL:
> {
> int vmid = CMD_VMID(&cmd);
Hi Eric,
> -----Original Message-----
> From: Eric Auger <eric.auger@redhat.com>
> Sent: 27 October 2025 10:14
> To: Shameer Kolothum <skolothumtho@nvidia.com>; qemu-
> arm@nongnu.org; qemu-devel@nongnu.org
> Cc: peter.maydell@linaro.org; Jason Gunthorpe <jgg@nvidia.com>; Nicolin
> Chen <nicolinc@nvidia.com>; ddutile@redhat.com; berrange@redhat.com;
> Nathan Chen <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>;
> smostafa@google.com; wangzhou1@hisilicon.com;
> jiangkunkun@huawei.com; jonathan.cameron@huawei.com;
> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; yi.l.liu@intel.com;
> shameerkolothum@gmail.com
> Subject: Re: [PATCH v4 13/27] hw/arm/smmuv3-accel: Add support to issue
> invalidation cmd to host
>
> External email: Use caution opening links or attachments
>
>
> Hi Shameer,
>
> On 9/29/25 3:36 PM, Shameer Kolothum wrote:
> > Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> > We only issue one cmd at a time for now.
> >
> > Support for batching of commands will be added later after analysing
> > the impact.
> >
> > Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
> > ---
> > hw/arm/smmuv3-accel.c | 38
> ++++++++++++++++++++++++++++++++++++++
> > hw/arm/smmuv3-accel.h | 8 ++++++++
> > hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
> > 3 files changed, 76 insertions(+)
> >
> > diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c index
> > f4e01fba6d..9ad8595ce2 100644
> > --- a/hw/arm/smmuv3-accel.c
> > +++ b/hw/arm/smmuv3-accel.c
> > @@ -218,6 +218,44 @@ bool
> smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange
> *range,
> > return true;
> > }
> >
> > +/*
> > + * This issues the invalidation cmd to the host SMMUv3.
> > + * Note: sdev can be NULL for certain invalidation commands
> > + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
> > + */
> > +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd,
> SMMUDevice *sdev,
> > + Error **errp) {
> > + SMMUv3State *s = ARM_SMMUV3(bs);
> > + SMMUv3AccelState *s_accel = s->s_accel;
> > + IOMMUFDViommu *viommu_core;
> > + uint32_t entry_num = 1;
> > +
> > + if (!s->accel || !s_accel->viommu) {
> Can you explain/document why !s_accel->viommu is handled as no error?
!s_accel->viommu basically means there are no vfio-pci devices with iommufd
backend. I will add a comment.
> > + return true;
> > + }
> > +
> > + /*
> > + * We may end up here for any emulated PCI bridge or root port type
> devices.
> > + * However, passing invalidation commands with sid (eg: CFGI_CD) to host
> > + * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if
> > + * sdev is valid.
> Only propagate errors to host if the SDEV matches a VFIO devices?
The vdev is only allocated for a valid idev device. The below !accel_dev->vdev
checks that.
> > + */
> > + if (sdev) {
> > + SMMUv3AccelDevice *accel_dev = container_of(sdev,
> SMMUv3AccelDevice,
> > + sdev);
> > + if (!accel_dev->vdev) {
> > + return true;
> > + }
> > + }
> > +
> > + viommu_core = &s_accel->viommu->core;
>
> > + return iommufd_backend_invalidate_cache(
> > + viommu_core->iommufd, viommu_core->viommu_id,
> > + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
> > + sizeof(Cmd), &entry_num, cmd, errp);
> what shall we do if iommufd_backend_invalidate_cache() succeeds with
> output entry_num is different from onput one? In current case we have
> entry_num = 1 so maybe this is not possible but we might leave a comment at
> least?
Ok. I will add that to comment.
> > +}
> > +
> > static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs,
> SMMUPciBus *sbus,
> > PCIBus *bus, int
> > devfn) { diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> > index 6242614c00..3bdba47616 100644
> > --- a/hw/arm/smmuv3-accel.h
> > +++ b/hw/arm/smmuv3-accel.h
> > @@ -46,6 +46,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State
> *s, SMMUDevice *sdev, int sid,
> > Error **errp); bool
> > smmuv3_accel_install_nested_ste_range(SMMUv3State *s,
> SMMUSIDRange *range,
> > Error **errp);
> > +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd,
> SMMUDevice *sdev,
> > + Error **errp);
> > #else
> > static inline void smmuv3_accel_init(SMMUv3State *s) { @@ -62,6
> > +64,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s,
> > SMMUSIDRange *range, {
> > return true;
> > }
> > +static inline bool
> > +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice
> *sdev,
> > + Error **errp) {
> > + return true;
> > +}
> > #endif
> >
> > #endif /* HW_ARM_SMMUV3_ACCEL_H */
> > diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index
> > 1fd8aaa0c7..3963bdc87f 100644
> > --- a/hw/arm/smmuv3.c
> > +++ b/hw/arm/smmuv3.c
> > @@ -1381,6 +1381,7 @@ static int
> smmuv3_cmdq_consume(SMMUv3State *s)
> > {
> > uint32_t sid = CMD_SID(&cmd);
> > SMMUDevice *sdev = smmu_find_sdev(bs, sid);
> > + Error *local_err = NULL;
> >
> > if (CMD_SSEC(&cmd)) {
> > cmd_error = SMMU_CERROR_ILL; @@ -1393,11 +1394,17 @@
> > static int smmuv3_cmdq_consume(SMMUv3State *s)
> >
> > trace_smmuv3_cmdq_cfgi_cd(sid);
> > smmuv3_flush_config(sdev);
> > + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
> > + error_report_err(local_err);
> > + cmd_error = SMMU_CERROR_ILL;
> > + break;
> > + }
> > break;
> > }
> > case SMMU_CMD_TLBI_NH_ASID:
> > {
> > int asid = CMD_ASID(&cmd);
> > + Error *local_err = NULL;
> > int vmid = -1;
> >
> > if (!STAGE1_SUPPORTED(s)) { @@ -1416,6 +1423,11 @@ static
> > int smmuv3_cmdq_consume(SMMUv3State *s)
> > trace_smmuv3_cmdq_tlbi_nh_asid(asid);
> > smmu_inv_notifiers_all(&s->smmu_state);
> > smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
> > + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> > + error_report_err(local_err);
> > + cmd_error = SMMU_CERROR_ILL;
> > + break;
> > + }
> > break;
> > }
> > case SMMU_CMD_TLBI_NH_ALL:
> do we check somewhere that accel mode only applies to stage=1?
See patch #7. We mandate stage-1 for accel=on case.
Thanks,
Shameer
On 10/27/25 1:20 PM, Shameer Kolothum wrote:
> Hi Eric,
>
>> -----Original Message-----
>> From: Eric Auger <eric.auger@redhat.com>
>> Sent: 27 October 2025 10:14
>> To: Shameer Kolothum <skolothumtho@nvidia.com>; qemu-
>> arm@nongnu.org; qemu-devel@nongnu.org
>> Cc: peter.maydell@linaro.org; Jason Gunthorpe <jgg@nvidia.com>; Nicolin
>> Chen <nicolinc@nvidia.com>; ddutile@redhat.com; berrange@redhat.com;
>> Nathan Chen <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>;
>> smostafa@google.com; wangzhou1@hisilicon.com;
>> jiangkunkun@huawei.com; jonathan.cameron@huawei.com;
>> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; yi.l.liu@intel.com;
>> shameerkolothum@gmail.com
>> Subject: Re: [PATCH v4 13/27] hw/arm/smmuv3-accel: Add support to issue
>> invalidation cmd to host
>>
>> External email: Use caution opening links or attachments
>>
>>
>> Hi Shameer,
>>
>> On 9/29/25 3:36 PM, Shameer Kolothum wrote:
>>> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
>>> We only issue one cmd at a time for now.
>>>
>>> Support for batching of commands will be added later after analysing
>>> the impact.
>>>
>>> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
>>> ---
>>> hw/arm/smmuv3-accel.c | 38
>> ++++++++++++++++++++++++++++++++++++++
>>> hw/arm/smmuv3-accel.h | 8 ++++++++
>>> hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
>>> 3 files changed, 76 insertions(+)
>>>
>>> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c index
>>> f4e01fba6d..9ad8595ce2 100644
>>> --- a/hw/arm/smmuv3-accel.c
>>> +++ b/hw/arm/smmuv3-accel.c
>>> @@ -218,6 +218,44 @@ bool
>> smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange
>> *range,
>>> return true;
>>> }
>>>
>>> +/*
>>> + * This issues the invalidation cmd to the host SMMUv3.
>>> + * Note: sdev can be NULL for certain invalidation commands
>>> + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
>>> + */
>>> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd,
>> SMMUDevice *sdev,
>>> + Error **errp) {
>>> + SMMUv3State *s = ARM_SMMUV3(bs);
>>> + SMMUv3AccelState *s_accel = s->s_accel;
>>> + IOMMUFDViommu *viommu_core;
>>> + uint32_t entry_num = 1;
>>> +
>>> + if (!s->accel || !s_accel->viommu) {
>> Can you explain/document why !s_accel->viommu is handled as no error?
> !s_accel->viommu basically means there are no vfio-pci devices with iommufd
> backend. I will add a comment.
>
>>> + return true;
>>> + }
>>> +
>>> + /*
>>> + * We may end up here for any emulated PCI bridge or root port type
>> devices.
>>> + * However, passing invalidation commands with sid (eg: CFGI_CD) to host
>>> + * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if
>>> + * sdev is valid.
>> Only propagate errors to host if the SDEV matches a VFIO devices?
> The vdev is only allocated for a valid idev device. The below !accel_dev->vdev
> checks that.
yes. I was suggesting a rephrasing
>
>>> + */
>>> + if (sdev) {
>>> + SMMUv3AccelDevice *accel_dev = container_of(sdev,
>> SMMUv3AccelDevice,
>>> + sdev);
>>> + if (!accel_dev->vdev) {
>>> + return true;
>>> + }
>>> + }
>>> +
>>> + viommu_core = &s_accel->viommu->core;
>>> + return iommufd_backend_invalidate_cache(
>>> + viommu_core->iommufd, viommu_core->viommu_id,
>>> + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
>>> + sizeof(Cmd), &entry_num, cmd, errp);
>> what shall we do if iommufd_backend_invalidate_cache() succeeds with
>> output entry_num is different from onput one? In current case we have
>> entry_num = 1 so maybe this is not possible but we might leave a comment at
>> least?
> Ok. I will add that to comment.
>
>>> +}
>>> +
>>> static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs,
>> SMMUPciBus *sbus,
>>> PCIBus *bus, int
>>> devfn) { diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
>>> index 6242614c00..3bdba47616 100644
>>> --- a/hw/arm/smmuv3-accel.h
>>> +++ b/hw/arm/smmuv3-accel.h
>>> @@ -46,6 +46,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State
>> *s, SMMUDevice *sdev, int sid,
>>> Error **errp); bool
>>> smmuv3_accel_install_nested_ste_range(SMMUv3State *s,
>> SMMUSIDRange *range,
>>> Error **errp);
>>> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd,
>> SMMUDevice *sdev,
>>> + Error **errp);
>>> #else
>>> static inline void smmuv3_accel_init(SMMUv3State *s) { @@ -62,6
>>> +64,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s,
>>> SMMUSIDRange *range, {
>>> return true;
>>> }
>>> +static inline bool
>>> +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice
>> *sdev,
>>> + Error **errp) {
>>> + return true;
>>> +}
>>> #endif
>>>
>>> #endif /* HW_ARM_SMMUV3_ACCEL_H */
>>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index
>>> 1fd8aaa0c7..3963bdc87f 100644
>>> --- a/hw/arm/smmuv3.c
>>> +++ b/hw/arm/smmuv3.c
>>> @@ -1381,6 +1381,7 @@ static int
>> smmuv3_cmdq_consume(SMMUv3State *s)
>>> {
>>> uint32_t sid = CMD_SID(&cmd);
>>> SMMUDevice *sdev = smmu_find_sdev(bs, sid);
>>> + Error *local_err = NULL;
>>>
>>> if (CMD_SSEC(&cmd)) {
>>> cmd_error = SMMU_CERROR_ILL; @@ -1393,11 +1394,17 @@
>>> static int smmuv3_cmdq_consume(SMMUv3State *s)
>>>
>>> trace_smmuv3_cmdq_cfgi_cd(sid);
>>> smmuv3_flush_config(sdev);
>>> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
>>> + error_report_err(local_err);
>>> + cmd_error = SMMU_CERROR_ILL;
>>> + break;
>>> + }
>>> break;
>>> }
>>> case SMMU_CMD_TLBI_NH_ASID:
>>> {
>>> int asid = CMD_ASID(&cmd);
>>> + Error *local_err = NULL;
>>> int vmid = -1;
>>>
>>> if (!STAGE1_SUPPORTED(s)) { @@ -1416,6 +1423,11 @@ static
>>> int smmuv3_cmdq_consume(SMMUv3State *s)
>>> trace_smmuv3_cmdq_tlbi_nh_asid(asid);
>>> smmu_inv_notifiers_all(&s->smmu_state);
>>> smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
>>> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
>>> + error_report_err(local_err);
>>> + cmd_error = SMMU_CERROR_ILL;
>>> + break;
>>> + }
>>> break;
>>> }
>>> case SMMU_CMD_TLBI_NH_ALL:
>> do we check somewhere that accel mode only applies to stage=1?
> See patch #7. We mandate stage-1 for accel=on case.
Yes. I noticed that later on. thanks
Eric
>
> Thanks,
> Shameer
>
On Mon, Sep 29, 2025 at 02:36:29PM +0100, Shameer Kolothum wrote:
> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> We only issue one cmd at a time for now.
>
> Support for batching of commands will be added later after analysing the
> impact.
>
> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
> ---
> hw/arm/smmuv3-accel.c | 38 ++++++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3-accel.h | 8 ++++++++
> hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
> 3 files changed, 76 insertions(+)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index f4e01fba6d..9ad8595ce2 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -218,6 +218,44 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> return true;
> }
>
> +/*
> + * This issues the invalidation cmd to the host SMMUv3.
> + * Note: sdev can be NULL for certain invalidation commands
> + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
* Note a TLBI command is passed in with a NULL sdev.
> + */
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + SMMUv3State *s = ARM_SMMUV3(bs);
> + SMMUv3AccelState *s_accel = s->s_accel;
> + IOMMUFDViommu *viommu_core;
> + uint32_t entry_num = 1;
> +
> + if (!s->accel || !s_accel->viommu) {
if (!accel || !s_accel->viommu) {
> + /*
> + * We may end up here for any emulated PCI bridge or root port type devices.
> + * However, passing invalidation commands with sid (eg: CFGI_CD) to host
> + * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if
> + * sdev is valid.
> + */
I think we should use "allowed" over "matters".
> + if (sdev) {
> + SMMUv3AccelDevice *accel_dev = container_of(sdev, SMMUv3AccelDevice,
> + sdev);
> + if (!accel_dev->vdev) {
> + return true;
> + }
> + }
And we could simplify with:
/*
* Only a device associated with the vIOMMU (by having a valid vdev) is
* allowed to flush its device cache
*/
if (sdev && !container_of(sdev, SMMUv3AccelDevice, sdev)->vdev) {
return true;
}
> @@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> QEMU_FALLTHROUGH;
> }
> case SMMU_CMD_TLBI_NSNH_ALL:
> + {
> + Error *local_err = NULL;
> +
> trace_smmuv3_cmdq_tlbi_nsnh();
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_all(bs);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
local_err is not used but only only printed. It'd be cleaner to
move it inside smmuv3_accel_issue_inv_cmd(). So, you would not
need to add "{}" nor have an "Error *" parameter in the helper.
Only cmd_error should stay.
With these,
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
On Mon, 29 Sep 2025 14:36:29 +0100 Shameer Kolothum <skolothumtho@nvidia.com> wrote: > Provide a helper and use that to issue the invalidation cmd to host SMMUv3. > We only issue one cmd at a time for now. > > Support for batching of commands will be added later after analysing the > impact. > > Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com> LGTM Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
© 2016 - 2025 Red Hat, Inc.