Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
We only issue one cmd at a time for now.
Support for batching of commands will be added later after analysing the
impact.
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 35 +++++++++++++++++++++++++++++++++++
hw/arm/smmuv3-accel.h | 8 ++++++++
hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index 395c8175da..a2deda3c32 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -213,6 +213,41 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
return true;
}
+/*
+ * This issues the invalidation cmd to the host SMMUv3.
+ * Note: sdev can be NULL for certain invalidation commands
+ * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
+ */
+bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
+ Error **errp)
+{
+ SMMUv3State *s = ARM_SMMUV3(bs);
+ SMMUv3AccelState *s_accel = s->s_accel;
+ IOMMUFDViommu *viommu;
+ uint32_t entry_num = 1;
+
+ /* No vIOMMU means no VFIO/IOMMUFD devices, nothing to invalidate. */
+ if (!s_accel || !s_accel->vsmmu) {
+ return true;
+ }
+
+ /*
+ * Called for emulated bridges or root ports, but SID-based
+ * invalidations (e.g. CFGI_CD) apply only to vfio-pci endpoints
+ * with a valid vIOMMU vdev.
+ */
+ if (sdev && !container_of(sdev, SMMUv3AccelDevice, sdev)->vdev) {
+ return true;
+ }
+
+ viommu = &s_accel->vsmmu->viommu;
+ /* Single command (entry_num = 1); no need to check returned entry_num */
+ return iommufd_backend_invalidate_cache(
+ viommu->iommufd, viommu->viommu_id,
+ IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
+ sizeof(Cmd), &entry_num, cmd, errp);
+}
+
static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus,
PCIBus *bus, int devfn)
{
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index 8931e83dc5..ee79548370 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -51,6 +51,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
Error **errp);
bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
Error **errp);
+bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
+ Error **errp);
void smmuv3_accel_gbpa_update(SMMUv3State *s);
void smmuv3_accel_reset(SMMUv3State *s);
#else
@@ -69,6 +71,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
{
return true;
}
+static inline bool
+smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
+ Error **errp)
+{
+ return true;
+}
static inline void smmuv3_accel_gbpa_update(SMMUv3State *s)
{
}
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index cc32b618ed..15173ddc9c 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1381,6 +1381,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
{
uint32_t sid = CMD_SID(&cmd);
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
+ Error *local_err = NULL;
if (CMD_SSEC(&cmd)) {
cmd_error = SMMU_CERROR_ILL;
@@ -1393,11 +1394,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
trace_smmuv3_cmdq_cfgi_cd(sid);
smmuv3_flush_config(sdev);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
}
case SMMU_CMD_TLBI_NH_ASID:
{
int asid = CMD_ASID(&cmd);
+ Error *local_err = NULL;
int vmid = -1;
if (!STAGE1_SUPPORTED(s)) {
@@ -1416,6 +1423,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
}
case SMMU_CMD_TLBI_NH_ALL:
@@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
QEMU_FALLTHROUGH;
}
case SMMU_CMD_TLBI_NSNH_ALL:
+ {
+ Error *local_err = NULL;
+
trace_smmuv3_cmdq_tlbi_nsnh();
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_all(bs);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
+ }
case SMMU_CMD_TLBI_NH_VAA:
case SMMU_CMD_TLBI_NH_VA:
+ {
+ Error *local_err = NULL;
+
if (!STAGE1_SUPPORTED(s)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
+ if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
+ error_report_err(local_err);
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
break;
+ }
case SMMU_CMD_TLBI_S12_VMALL:
{
int vmid = CMD_VMID(&cmd);
--
2.43.0
On 10/31/25 11:49 AM, Shameer Kolothum wrote:
> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> We only issue one cmd at a time for now.
>
> Support for batching of commands will be added later after analysing the
> impact.
>
> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
> Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Eric
> ---
> hw/arm/smmuv3-accel.c | 35 +++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3-accel.h | 8 ++++++++
> hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
> 3 files changed, 73 insertions(+)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index 395c8175da..a2deda3c32 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -213,6 +213,41 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> return true;
> }
>
> +/*
> + * This issues the invalidation cmd to the host SMMUv3.
> + * Note: sdev can be NULL for certain invalidation commands
> + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
> + */
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + SMMUv3State *s = ARM_SMMUV3(bs);
> + SMMUv3AccelState *s_accel = s->s_accel;
> + IOMMUFDViommu *viommu;
> + uint32_t entry_num = 1;
> +
> + /* No vIOMMU means no VFIO/IOMMUFD devices, nothing to invalidate. */
> + if (!s_accel || !s_accel->vsmmu) {
> + return true;
> + }
> +
> + /*
> + * Called for emulated bridges or root ports, but SID-based
> + * invalidations (e.g. CFGI_CD) apply only to vfio-pci endpoints
> + * with a valid vIOMMU vdev.
> + */
> + if (sdev && !container_of(sdev, SMMUv3AccelDevice, sdev)->vdev) {
> + return true;
> + }
> +
> + viommu = &s_accel->vsmmu->viommu;
> + /* Single command (entry_num = 1); no need to check returned entry_num */
> + return iommufd_backend_invalidate_cache(
> + viommu->iommufd, viommu->viommu_id,
> + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
> + sizeof(Cmd), &entry_num, cmd, errp);
> +}
> +
> static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus,
> PCIBus *bus, int devfn)
> {
> diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> index 8931e83dc5..ee79548370 100644
> --- a/hw/arm/smmuv3-accel.h
> +++ b/hw/arm/smmuv3-accel.h
> @@ -51,6 +51,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
> Error **errp);
> bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> Error **errp);
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp);
> void smmuv3_accel_gbpa_update(SMMUv3State *s);
> void smmuv3_accel_reset(SMMUv3State *s);
> #else
> @@ -69,6 +71,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range,
> {
> return true;
> }
> +static inline bool
> +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + return true;
> +}
> static inline void smmuv3_accel_gbpa_update(SMMUv3State *s)
> {
> }
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index cc32b618ed..15173ddc9c 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1381,6 +1381,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> {
> uint32_t sid = CMD_SID(&cmd);
> SMMUDevice *sdev = smmu_find_sdev(bs, sid);
> + Error *local_err = NULL;
>
> if (CMD_SSEC(&cmd)) {
> cmd_error = SMMU_CERROR_ILL;
> @@ -1393,11 +1394,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>
> trace_smmuv3_cmdq_cfgi_cd(sid);
> smmuv3_flush_config(sdev);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ASID:
> {
> int asid = CMD_ASID(&cmd);
> + Error *local_err = NULL;
> int vmid = -1;
>
> if (!STAGE1_SUPPORTED(s)) {
> @@ -1416,6 +1423,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> trace_smmuv3_cmdq_tlbi_nh_asid(asid);
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ALL:
> @@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> QEMU_FALLTHROUGH;
> }
> case SMMU_CMD_TLBI_NSNH_ALL:
> + {
> + Error *local_err = NULL;
> +
> trace_smmuv3_cmdq_tlbi_nsnh();
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_all(bs);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_NH_VAA:
> case SMMU_CMD_TLBI_NH_VA:
> + {
> + Error *local_err = NULL;
> +
> if (!STAGE1_SUPPORTED(s)) {
> cmd_error = SMMU_CERROR_ILL;
> break;
> }
> smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_S12_VMALL:
> {
> int vmid = CMD_VMID(&cmd);
On Fri, Oct 31, 2025 at 10:49:50AM +0000, Shameer Kolothum wrote:
> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> We only issue one cmd at a time for now.
>
> Support for batching of commands will be added later after analysing the
> impact.
>
> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
> Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
I think I have given my tag in v4.. anyway..
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
> case SMMU_CMD_TLBI_NH_VAA:
> case SMMU_CMD_TLBI_NH_VA:
> + {
> + Error *local_err = NULL;
> +
> if (!STAGE1_SUPPORTED(s)) {
> cmd_error = SMMU_CERROR_ILL;
> break;
> }
> smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
The local_err isn't used anywhere but by the error_report_err()
alone. So, it could be moved into smmuv3_accel_issue_inv_cmd().
Nicolin
> -----Original Message-----
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: 01 November 2025 00:35
> To: Shameer Kolothum <skolothumtho@nvidia.com>
> Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org;
> eric.auger@redhat.com; peter.maydell@linaro.org; Jason Gunthorpe
> <jgg@nvidia.com>; ddutile@redhat.com; berrange@redhat.com; Nathan
> Chen <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>;
> smostafa@google.com; wangzhou1@hisilicon.com;
> jiangkunkun@huawei.com; jonathan.cameron@huawei.com;
> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; yi.l.liu@intel.com;
> Krishnakant Jaju <kjaju@nvidia.com>
> Subject: Re: [PATCH v5 17/32] hw/arm/smmuv3-accel: Add support to issue
> invalidation cmd to host
>
> On Fri, Oct 31, 2025 at 10:49:50AM +0000, Shameer Kolothum wrote:
> > Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> > We only issue one cmd at a time for now.
> >
> > Support for batching of commands will be added later after analysing the
> > impact.
> >
> > Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
> > Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
> > Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
>
> I think I have given my tag in v4.. anyway..
Sorry I missed that.
>
> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Thanks.
> > case SMMU_CMD_TLBI_NH_VAA:
> > case SMMU_CMD_TLBI_NH_VA:
> > + {
> > + Error *local_err = NULL;
> > +
> > if (!STAGE1_SUPPORTED(s)) {
> > cmd_error = SMMU_CERROR_ILL;
> > break;
> > }
> > smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> > + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> > + error_report_err(local_err);
> > + cmd_error = SMMU_CERROR_ILL;
> > + break;
> > + }
> > break;
> > + }
>
> The local_err isn't used anywhere but by the error_report_err()
> alone. So, it could be moved into smmuv3_accel_issue_inv_cmd().
Though that is true, it is following the same pattern as
smmuv3_accel_install_nested_ste()/_range() functions. The general
idea is, we will pass the errp to accel functions and report or propagate
from here.
Thanks,
Shameer
On Mon, Nov 03, 2025 at 07:28:14AM -0800, Shameer Kolothum wrote:
> > > case SMMU_CMD_TLBI_NH_VAA:
> > > case SMMU_CMD_TLBI_NH_VA:
> > > + {
> > > + Error *local_err = NULL;
> > > +
> > > if (!STAGE1_SUPPORTED(s)) {
> > > cmd_error = SMMU_CERROR_ILL;
> > > break;
> > > }
> > > smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> > > + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> > > + error_report_err(local_err);
> > > + cmd_error = SMMU_CERROR_ILL;
> > > + break;
> > > + }
> > > break;
> > > + }
> >
> > The local_err isn't used anywhere but by the error_report_err()
> > alone. So, it could be moved into smmuv3_accel_issue_inv_cmd().
>
> Though that is true, it is following the same pattern as
> smmuv3_accel_install_nested_ste()/_range() functions.
We could drop the one in smmuv3_accel_install_nested_ste() too.
> The general
> idea is, we will pass the errp to accel functions and report or propagate
> from here.
But there is no "errp" in smmuv3_cmdq_consume() to propagate the
these local_errs further? It ends at the error_report_err().
If we only get local_err and print them, why not just print them
inside the _accel functions?
Nicolin
> -----Original Message----- > From: Nicolin Chen <nicolinc@nvidia.com> > Sent: 03 November 2025 17:44 > To: Shameer Kolothum <skolothumtho@nvidia.com> > Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org; > eric.auger@redhat.com; peter.maydell@linaro.org; Jason Gunthorpe > <jgg@nvidia.com>; ddutile@redhat.com; berrange@redhat.com; Nathan > Chen <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>; > smostafa@google.com; wangzhou1@hisilicon.com; > jiangkunkun@huawei.com; jonathan.cameron@huawei.com; > zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; yi.l.liu@intel.com; > Krishnakant Jaju <kjaju@nvidia.com> > Subject: Re: [PATCH v5 17/32] hw/arm/smmuv3-accel: Add support to issue > invalidation cmd to host > > > > > Though that is true, it is following the same pattern as > > smmuv3_accel_install_nested_ste()/_range() functions. > > We could drop the one in smmuv3_accel_install_nested_ste() too. > > > The general > > idea is, we will pass the errp to accel functions and report or > > propagate from here. > > But there is no "errp" in smmuv3_cmdq_consume() to propagate the these > local_errs further? It ends at the error_report_err(). > > If we only get local_err and print them, why not just print them inside the > _accel functions? Right, we don't propagate error now. But in future it might come handy. I would personally keep the error propagation facility if possible. Also, this was added as per Eric's comment on RFC v3. https://lore.kernel.org/qemu-devel/41ceadf1-07de-4c8a-8935-d709ac7cf6bc@redhat.com/ Thanks, Shameer
On Mon, Nov 03, 2025 at 10:17:20AM -0800, Shameer Kolothum wrote:
> > > The general
> > > idea is, we will pass the errp to accel functions and report or
> > > propagate from here.
> >
> > But there is no "errp" in smmuv3_cmdq_consume() to propagate the these
> > local_errs further? It ends at the error_report_err().
> >
> > If we only get local_err and print them, why not just print them inside the
> > _accel functions?
>
> Right, we don’t propagate error now. But in future it might come
> handy. I would personally keep the error propagation facility if possible.
smmuv3_cmdq_consume() is called in smmu_writel() only. Where do we
plan to propagate that in the future?
> Also, this was added as per Eric's comment on RFC v3.
>
> https://lore.kernel.org/qemu-devel/41ceadf1-07de-4c8a-8935-d709ac7cf6bc@redhat.com/
If only we have a top function that does error_report_err() in one
place.. Duplicating error_report_err(local_err) doesn't look clean
to me.
Maybe smmu_writel() could do:
{
+ Error *errp = NULL;
switch (offset) {
case A_XXX:
smmuv3_cmdq_consume(..., errp);
+ return MEMTX_OK;
- break;
...
case A_YYY:
smmuv3_cmdq_consume(..., errp);
+ return MEMTX_OK;
- break;
}
+ error_report_err(errp);
+ return MEMTX_OK;
}
Any better idea, Eric?
Nicolin
Hi,
On 11/3/25 7:51 PM, Nicolin Chen wrote:
> On Mon, Nov 03, 2025 at 10:17:20AM -0800, Shameer Kolothum wrote:
>>>> The general
>>>> idea is, we will pass the errp to accel functions and report or
>>>> propagate from here.
>>> But there is no "errp" in smmuv3_cmdq_consume() to propagate the these
>>> local_errs further? It ends at the error_report_err().
>>>
>>> If we only get local_err and print them, why not just print them inside the
>>> _accel functions?
>> Right, we don’t propagate error now. But in future it might come
>> handy. I would personally keep the error propagation facility if possible.
> smmuv3_cmdq_consume() is called in smmu_writel() only. Where do we
> plan to propagate that in the future?
>
>> Also, this was added as per Eric's comment on RFC v3.
>>
>> https://lore.kernel.org/qemu-devel/41ceadf1-07de-4c8a-8935-d709ac7cf6bc@redhat.com/
> If only we have a top function that does error_report_err() in one
> place.. Duplicating error_report_err(local_err) doesn't look clean
> to me.
>
> Maybe smmu_writel() could do:
> {
> + Error *errp = NULL;
>
> switch (offset) {
> case A_XXX:
> smmuv3_cmdq_consume(..., errp);
> + return MEMTX_OK;
> - break;
> ...
> case A_YYY:
> smmuv3_cmdq_consume(..., errp);
> + return MEMTX_OK;
> - break;
> }
> + error_report_err(errp);
> + return MEMTX_OK;
> }
>
> Any better idea, Eric?
Can't we move local_err outside of case block and after the switch,
if (cmd_error) {
if (local_err) {
error_report_err(local_err);
}
../..
Eric
>
> Nicolin
>
On Tue, Nov 04, 2025 at 09:55:46AM +0100, Eric Auger wrote:
> On 11/3/25 7:51 PM, Nicolin Chen wrote:
> > On Mon, Nov 03, 2025 at 10:17:20AM -0800, Shameer Kolothum wrote:
> >>>> The general
> >>>> idea is, we will pass the errp to accel functions and report or
> >>>> propagate from here.
> >>> But there is no "errp" in smmuv3_cmdq_consume() to propagate the these
> >>> local_errs further? It ends at the error_report_err().
> >>>
> >>> If we only get local_err and print them, why not just print them inside the
> >>> _accel functions?
> >> Right, we don’t propagate error now. But in future it might come
> >> handy. I would personally keep the error propagation facility if possible.
> > smmuv3_cmdq_consume() is called in smmu_writel() only. Where do we
> > plan to propagate that in the future?
> >
> >> Also, this was added as per Eric's comment on RFC v3.
> >>
> >> https://lore.kernel.org/qemu-devel/41ceadf1-07de-4c8a-8935-d709ac7cf6bc@redhat.com/
> > If only we have a top function that does error_report_err() in one
> > place.. Duplicating error_report_err(local_err) doesn't look clean
> > to me.
> >
> > Maybe smmu_writel() could do:
> > {
> > + Error *errp = NULL;
> >
> > switch (offset) {
> > case A_XXX:
> > smmuv3_cmdq_consume(..., errp);
> > + return MEMTX_OK;
> > - break;
> > ...
> > case A_YYY:
> > smmuv3_cmdq_consume(..., errp);
> > + return MEMTX_OK;
> > - break;
> > }
> > + error_report_err(errp);
> > + return MEMTX_OK;
> > }
> >
> > Any better idea, Eric?
>
> Can't we move local_err outside of case block and after the switch,
>
> if (cmd_error) {
> if (local_err) {
> error_report_err(local_err);
> }
> ../..
I see Shameer's vEVENTQ patch (WIP) has errp also that will end
up an error_report_err() in the smmu_writel() for A_EVENTQ_BASE.
So, it seems to be cleaner to do in the top function? I am fine
with adding in cmdq function for now and moving later though.
Thanks
Nicolin
© 2016 - 2025 Red Hat, Inc.