The current implementation does not follow 128-bit write requirement
to update DTE as specified in the AMD I/O Virtualization Techonology
(IOMMU) Specification.
Therefore, modify the struct dev_table_entry to contain union of u128 data
array, and introduce a helper functions update_dte256() to update DTE using
two 128-bit cmpxchg operations to update 256-bit DTE with the modified
structure, and take into account the DTE[V, GV] bits when programming
the DTE to ensure proper order of DTE programming and flushing.
In addition, introduce a per-DTE spin_lock struct dev_data.dte_lock to
provide synchronization when updating the DTE to prevent cmpxchg128
failure.
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu.h | 2 +
drivers/iommu/amd/amd_iommu_types.h | 8 ++-
drivers/iommu/amd/iommu.c | 96 +++++++++++++++++++++++++++++
3 files changed, 105 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 52e18b5f99fd..14a153c7bc12 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -28,6 +28,8 @@ void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size);
+int iommu_flush_sync_dte(struct amd_iommu *iommu, u16 devid);
+
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
#else
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index c9f9a598eb82..fea7544f8c55 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -427,6 +427,8 @@
#define GCR3_VALID 0x01ULL
+#define DTE_INTR_MASK (~GENMASK_ULL(55, 52))
+
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
@@ -833,6 +835,7 @@ struct devid_map {
struct iommu_dev_data {
/*Protect against attach/detach races */
spinlock_t lock;
+ spinlock_t dte_lock; /* DTE lock for 256-bit access */
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
@@ -883,7 +886,10 @@ extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u64 data[4];
+ union {
+ u64 data[4];
+ u128 data128[2];
+ };
};
/*
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 87c5385ce3f2..48a721d10f06 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -85,6 +85,91 @@ static void set_dte_entry(struct amd_iommu *iommu,
*
****************************************************************************/
+static void write_upper(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ struct dev_table_entry old = {};
+
+ do {
+ old.data128[1] = ptr->data128[1];
+ new->data[2] &= ~DTE_INTR_MASK;
+ new->data[2] |= (old.data[2] & DTE_INTR_MASK);
+ } while (!try_cmpxchg128(&ptr->data128[1], &old.data128[1], new->data128[1]));
+}
+
+static void write_lower(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ struct dev_table_entry old = {};
+
+ do {
+ old.data128[0] = ptr->data128[0];
+ } while (!try_cmpxchg128(&ptr->data128[0], &old.data128[0], new->data128[0]));
+}
+
+/*
+ * Note:
+ * IOMMU reads the entire Device Table entry in a single 256-bit transaction
+ * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
+ * need to ensure the following:
+ * - DTE[V|GV] bit is being written last when setting.
+ * - DTE[V|GV] bit is being written first when clearing.
+ *
+ * This function is used only by code, which updates DMA translation part of the DTE.
+ * So, only consider control bits related to DMA when updating the entry.
+ */
+static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry *ptr = &dev_table[dev_data->devid];
+
+ spin_lock(&dev_data->dte_lock);
+
+ if (!(ptr->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is not valid. */
+ write_upper(ptr, new);
+ write_lower(ptr, new);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+ } else if (!(new->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is valid. New DTE is not valid. */
+ write_lower(ptr, new);
+ write_upper(ptr, new);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+ } else {
+ /* Existing & new DTEs are valid. */
+ if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
+ /* Existing DTE has no guest page table. */
+ write_upper(ptr, new);
+ write_lower(ptr, new);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
+ /*
+ * Existing DTE has guest page table,
+ * new DTE has no guest page table,
+ */
+ write_lower(ptr, new);
+ write_upper(ptr, new);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+ } else {
+ /*
+ * Existing DTE has guest page table,
+ * new DTE has guest page table.
+ */
+ struct dev_table_entry clear = {};
+
+ /* First disable DTE */
+ write_lower(ptr, &clear);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+
+ /* Then update DTE */
+ write_upper(ptr, new);
+ write_lower(ptr, new);
+ iommu_flush_sync_dte(iommu, dev_data->devid);
+ }
+ }
+
+ spin_unlock(&dev_data->dte_lock);
+}
+
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -205,6 +290,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return NULL;
spin_lock_init(&dev_data->lock);
+ spin_lock_init(&dev_data->dte_lock);
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
@@ -1256,6 +1342,16 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
return iommu_queue_command(iommu, &cmd);
}
+int iommu_flush_sync_dte(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+
+ ret = iommu_flush_dte(iommu, devid);
+ if (!ret)
+ iommu_completion_wait(iommu);
+ return ret;
+}
+
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
--
2.34.1
On Mon, Sep 16, 2024 at 05:18:01PM +0000, Suravee Suthikulpanit wrote:
> +static void write_lower(struct dev_table_entry *ptr, struct dev_table_entry *new)
> +{
> + struct dev_table_entry old = {};
> +
> + do {
> + old.data128[0] = ptr->data128[0];
> + } while (!try_cmpxchg128(&ptr->data128[0], &old.data128[0], new->data128[0]));
> +}
> +
> +/*
> + * Note:
> + * IOMMU reads the entire Device Table entry in a single 256-bit transaction
> + * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
> + * need to ensure the following:
I wonder if the intention here was to use a SSE operation to do the
256 bit store from the CPU side too? Just thinking aloud
> + if (!(ptr->data[0] & DTE_FLAG_V)) {
> + /* Existing DTE is not valid. */
> + write_upper(ptr, new);
> + write_lower(ptr, new);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> + } else if (!(new->data[0] & DTE_FLAG_V)) {
> + /* Existing DTE is valid. New DTE is not valid. */
> + write_lower(ptr, new);
> + write_upper(ptr, new);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> + } else {
> + /* Existing & new DTEs are valid. */
> + if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
> + /* Existing DTE has no guest page table. */
> + write_upper(ptr, new);
> + write_lower(ptr, new);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> + } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
> + /*
> + * Existing DTE has guest page table,
> + * new DTE has no guest page table,
> + */
> + write_lower(ptr, new);
> + write_upper(ptr, new);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> + } else {
> + /*
> + * Existing DTE has guest page table,
> + * new DTE has guest page table.
> + */
> + struct dev_table_entry clear = {};
> +
> + /* First disable DTE */
> + write_lower(ptr, &clear);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> +
> + /* Then update DTE */
> + write_upper(ptr, new);
> + write_lower(ptr, new);
> + iommu_flush_sync_dte(iommu, dev_data->devid);
> + }
There is one branch missing where GV is valid in both and the [1]
doesn't change. Ie atomic replace of a GCR3 table.
And maybe this will need more branches later for the viommu stuff?
But otherwise yes this captures what is needed just fine.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> @@ -1256,6 +1342,16 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
> +int iommu_flush_sync_dte(struct amd_iommu *iommu, u16 devid)
> +{
> + int ret;
> +
> + ret = iommu_flush_dte(iommu, devid);
> + if (!ret)
> + iommu_completion_wait(iommu);
> + return ret;
> +}
Maybe this doesn't need to return an error since we can't handle
failure to flush DTE tables..
Jason
On 9/27/2024 2:46 AM, Jason Gunthorpe wrote:
> On Mon, Sep 16, 2024 at 05:18:01PM +0000, Suravee Suthikulpanit wrote:
>
> ....
>
>> + if (!(ptr->data[0] & DTE_FLAG_V)) {
>> + /* Existing DTE is not valid. */
>> + write_upper(ptr, new);
>> + write_lower(ptr, new);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> + } else if (!(new->data[0] & DTE_FLAG_V)) {
>> + /* Existing DTE is valid. New DTE is not valid. */
>> + write_lower(ptr, new);
>> + write_upper(ptr, new);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> + } else {
>> + /* Existing & new DTEs are valid. */
>> + if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
>> + /* Existing DTE has no guest page table. */
>> + write_upper(ptr, new);
>> + write_lower(ptr, new);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> + } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
>> + /*
>> + * Existing DTE has guest page table,
>> + * new DTE has no guest page table,
>> + */
>> + write_lower(ptr, new);
>> + write_upper(ptr, new);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> + } else {
>> + /*
>> + * Existing DTE has guest page table,
>> + * new DTE has guest page table.
>> + */
>> + struct dev_table_entry clear = {};
>> +
>> + /* First disable DTE */
>> + write_lower(ptr, &clear);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> +
>> + /* Then update DTE */
>> + write_upper(ptr, new);
>> + write_lower(ptr, new);
>> + iommu_flush_sync_dte(iommu, dev_data->devid);
>> + }
>
> There is one branch missing where GV is valid in both and the [1]
> doesn't change. Ie atomic replace of a GCR3 table.
Not sure if I follow this.
> And maybe this will need more branches later for the viommu stuff?
I will take care of this later once we introduce the change for vIOMMU
stuff.
> But otherwise yes this captures what is needed just fine.
>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>
>> @@ -1256,6 +1342,16 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
>> +int iommu_flush_sync_dte(struct amd_iommu *iommu, u16 devid)
>> +{
>> + int ret;
>> +
>> + ret = iommu_flush_dte(iommu, devid);
>> + if (!ret)
>> + iommu_completion_wait(iommu);
>> + return ret;
>> +}
>
> Maybe this doesn't need to return an error since we can't handle
> failure to flush DTE tables..
Okey.
Thanks,
Suravee
On Thu, Oct 03, 2024 at 11:15:53PM +0700, Suthikulpanit, Suravee wrote:
> On 9/27/2024 2:46 AM, Jason Gunthorpe wrote:
> > On Mon, Sep 16, 2024 at 05:18:01PM +0000, Suravee Suthikulpanit wrote:
> >
> > ....
> >
> > > + if (!(ptr->data[0] & DTE_FLAG_V)) {
> > > + /* Existing DTE is not valid. */
> > > + write_upper(ptr, new);
> > > + write_lower(ptr, new);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > + } else if (!(new->data[0] & DTE_FLAG_V)) {
> > > + /* Existing DTE is valid. New DTE is not valid. */
> > > + write_lower(ptr, new);
> > > + write_upper(ptr, new);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > + } else {
> > > + /* Existing & new DTEs are valid. */
> > > + if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
> > > + /* Existing DTE has no guest page table. */
> > > + write_upper(ptr, new);
> > > + write_lower(ptr, new);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > + } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
> > > + /*
> > > + * Existing DTE has guest page table,
> > > + * new DTE has no guest page table,
> > > + */
> > > + write_lower(ptr, new);
> > > + write_upper(ptr, new);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > + } else {
> > > + /*
> > > + * Existing DTE has guest page table,
> > > + * new DTE has guest page table.
> > > + */
> > > + struct dev_table_entry clear = {};
> > > +
> > > + /* First disable DTE */
> > > + write_lower(ptr, &clear);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > +
> > > + /* Then update DTE */
> > > + write_upper(ptr, new);
> > > + write_lower(ptr, new);
> > > + iommu_flush_sync_dte(iommu, dev_data->devid);
> > > + }
> >
> > There is one branch missing where GV is valid in both and the [1]
> > doesn't change. Ie atomic replace of a GCR3 table.
>
> Not sure if I follow this.
Something like:
if (FIELD_GET(DTE_FLAG_GV, ptr->data[0]) &&
FIELD_GET(DTE_FLAG_GV, new->data[0]) &&
(ptr->data[2] & DTE_INTR_MASK) == (new->data[2] & DTE_INTR_MASK)) {
/* GCR3 table has changed, but the same number of levels, no need to disable DTE */
write_lower(ptr, new);
iommu_flush_sync_dte(iommu, dev_data->devid);
}
Jason
© 2016 - 2026 Red Hat, Inc.