Physical AMD IOMMU supports up to 64 bits of DMA address. When device tries
to read or write from a given DMA address, IOMMU translates the address
using page table assigned to that device. Since IOMMU uses per device page
tables, the emulated IOMMU should use the cache tag of 68 bits
(64 bit address - 12 bit page alignment + 16 bit device ID).
Current emulated AMD IOMMU uses GLib hash table to create software iotlb
and uses 64 bit key to store the IOVA and deviceID, which limits the IOVA
to 60 bits. This causes failure while setting up the device when guest is
booted with "iommu.forcedac=1".
To solve this problem, Use 64 bit IOVA and 16 bit devid as key to store
entries in IOTLB; Use upper 52 bits of IOVA (GFN) and lower 12 bits of
the devid to construct the 64 bit hash key in order avoid the truncation
as much as possible (reducing hash collisions).
Fixes: d29a09ca6842 ("hw/i386: Introduce AMD IOMMU")
Signed-off-by: Sairaj Kodilkar <sarunkod@amd.com>
---
hw/i386/amd_iommu.c | 57 ++++++++++++++++++++++++++++++---------------
hw/i386/amd_iommu.h | 4 ++--
2 files changed, 40 insertions(+), 21 deletions(-)
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index c2cd5213eb1b..5487894aba40 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -106,6 +106,11 @@ typedef struct AMDVIAsKey {
uint8_t devfn;
} AMDVIAsKey;
+typedef struct AMDVIIOTLBKey {
+ uint64_t gfn;
+ uint16_t devid;
+} AMDVIIOTLBKey;
+
uint64_t amdvi_extended_feature_register(AMDVIState *s)
{
uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES;
@@ -377,16 +382,6 @@ static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
PCI_STATUS_SIG_TARGET_ABORT);
}
-static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
-{
- return *((const uint64_t *)v1) == *((const uint64_t *)v2);
-}
-
-static guint amdvi_uint64_hash(gconstpointer v)
-{
- return (guint)*(const uint64_t *)v;
-}
-
static gboolean amdvi_as_equal(gconstpointer v1, gconstpointer v2)
{
const AMDVIAsKey *key1 = v1;
@@ -425,11 +420,30 @@ static AMDVIAddressSpace *amdvi_get_as_by_devid(AMDVIState *s, uint16_t devid)
amdvi_find_as_by_devid, &devid);
}
+static gboolean amdvi_iotlb_equal(gconstpointer v1, gconstpointer v2)
+{
+ const AMDVIIOTLBKey *key1 = v1;
+ const AMDVIIOTLBKey *key2 = v2;
+
+ return key1->devid == key2->devid && key1->gfn == key2->gfn;
+}
+
+static guint amdvi_iotlb_hash(gconstpointer v)
+{
+ const AMDVIIOTLBKey *key = v;
+ /* Use GPA and DEVID to find the bucket */
+ return (guint)(key->gfn << AMDVI_PAGE_SHIFT_4K |
+ (key->devid & ~AMDVI_PAGE_MASK_4K));
+}
+
+
static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
uint64_t devid)
{
- uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
- ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ AMDVIIOTLBKey key = {
+ .gfn = AMDVI_GET_IOTLB_GFN(addr),
+ .devid = devid,
+ };
return g_hash_table_lookup(s->iotlb, &key);
}
@@ -451,8 +465,10 @@ static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
uint64_t devid)
{
- uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
- ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ AMDVIIOTLBKey key = {
+ .gfn = AMDVI_GET_IOTLB_GFN(addr),
+ .devid = devid,
+ };
g_hash_table_remove(s->iotlb, &key);
}
@@ -463,8 +479,10 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
/* don't cache erroneous translations */
if (to_cache.perm != IOMMU_NONE) {
AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
- uint64_t *key = g_new(uint64_t, 1);
- uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
+ AMDVIIOTLBKey *key = g_new(AMDVIIOTLBKey, 1);
+
+ key->gfn = AMDVI_GET_IOTLB_GFN(gpa);
+ key->devid = devid;
trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
PCI_FUNC(devid), gpa, to_cache.translated_addr);
@@ -477,7 +495,8 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
entry->perms = to_cache.perm;
entry->translated_addr = to_cache.translated_addr;
entry->page_mask = to_cache.addr_mask;
- *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ entry->devid = devid;
+
g_hash_table_replace(s->iotlb, key, entry);
}
}
@@ -2526,8 +2545,8 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
}
}
- s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
- amdvi_uint64_equal, g_free, g_free);
+ s->iotlb = g_hash_table_new_full(amdvi_iotlb_hash,
+ amdvi_iotlb_equal, g_free, g_free);
s->address_spaces = g_hash_table_new_full(amdvi_as_hash,
amdvi_as_equal, g_free, g_free);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index 38471b95d153..302ccca5121f 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -220,8 +220,8 @@
#define PAGE_SIZE_PTE_COUNT(pgsz) (1ULL << ((ctz64(pgsz) - 12) % 9))
/* IOTLB */
-#define AMDVI_IOTLB_MAX_SIZE 1024
-#define AMDVI_DEVID_SHIFT 36
+#define AMDVI_IOTLB_MAX_SIZE 1024
+#define AMDVI_GET_IOTLB_GFN(addr) (addr >> AMDVI_PAGE_SHIFT_4K)
/* default extended feature */
#define AMDVI_DEFAULT_EXT_FEATURES \
--
2.34.1
On Fri, Oct 17, 2025 at 11:43:22AM +0530, Sairaj Kodilkar wrote:
> Physical AMD IOMMU supports up to 64 bits of DMA address. When device tries
> to read or write from a given DMA address, IOMMU translates the address
> using page table assigned to that device. Since IOMMU uses per device page
> tables, the emulated IOMMU should use the cache tag of 68 bits
> (64 bit address - 12 bit page alignment + 16 bit device ID).
>
> Current emulated AMD IOMMU uses GLib hash table to create software iotlb
> and uses 64 bit key to store the IOVA and deviceID, which limits the IOVA
> to 60 bits. This causes failure while setting up the device when guest is
> booted with "iommu.forcedac=1".
>
> To solve this problem, Use 64 bit IOVA and 16 bit devid as key to store
> entries in IOTLB; Use upper 52 bits of IOVA (GFN) and lower 12 bits of
> the devid to construct the 64 bit hash key in order avoid the truncation
to avoid
> as much as possible (reducing hash collisions).
>
> Fixes: d29a09ca6842 ("hw/i386: Introduce AMD IOMMU")
> Signed-off-by: Sairaj Kodilkar <sarunkod@amd.com>
> ---
> hw/i386/amd_iommu.c | 57 ++++++++++++++++++++++++++++++---------------
> hw/i386/amd_iommu.h | 4 ++--
> 2 files changed, 40 insertions(+), 21 deletions(-)
>
> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
> index c2cd5213eb1b..5487894aba40 100644
> --- a/hw/i386/amd_iommu.c
> +++ b/hw/i386/amd_iommu.c
> @@ -106,6 +106,11 @@ typedef struct AMDVIAsKey {
> uint8_t devfn;
> } AMDVIAsKey;
>
> +typedef struct AMDVIIOTLBKey {
> + uint64_t gfn;
> + uint16_t devid;
> +} AMDVIIOTLBKey;
> +
> uint64_t amdvi_extended_feature_register(AMDVIState *s)
> {
> uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES;
> @@ -377,16 +382,6 @@ static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
> PCI_STATUS_SIG_TARGET_ABORT);
> }
>
> -static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
> -{
> - return *((const uint64_t *)v1) == *((const uint64_t *)v2);
> -}
> -
> -static guint amdvi_uint64_hash(gconstpointer v)
> -{
> - return (guint)*(const uint64_t *)v;
> -}
> -
> static gboolean amdvi_as_equal(gconstpointer v1, gconstpointer v2)
> {
> const AMDVIAsKey *key1 = v1;
> @@ -425,11 +420,30 @@ static AMDVIAddressSpace *amdvi_get_as_by_devid(AMDVIState *s, uint16_t devid)
> amdvi_find_as_by_devid, &devid);
> }
>
> +static gboolean amdvi_iotlb_equal(gconstpointer v1, gconstpointer v2)
> +{
> + const AMDVIIOTLBKey *key1 = v1;
> + const AMDVIIOTLBKey *key2 = v2;
> +
> + return key1->devid == key2->devid && key1->gfn == key2->gfn;
> +}
> +
> +static guint amdvi_iotlb_hash(gconstpointer v)
> +{
> + const AMDVIIOTLBKey *key = v;
> + /* Use GPA and DEVID to find the bucket */
> + return (guint)(key->gfn << AMDVI_PAGE_SHIFT_4K |
> + (key->devid & ~AMDVI_PAGE_MASK_4K));
> +}
> +
> +
> static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
> uint64_t devid)
> {
> - uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
> - ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
> + AMDVIIOTLBKey key = {
> + .gfn = AMDVI_GET_IOTLB_GFN(addr),
> + .devid = devid,
> + };
> return g_hash_table_lookup(s->iotlb, &key);
> }
>
> @@ -451,8 +465,10 @@ static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
> static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
> uint64_t devid)
> {
> - uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
> - ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
> + AMDVIIOTLBKey key = {
> + .gfn = AMDVI_GET_IOTLB_GFN(addr),
> + .devid = devid,
> + };
> g_hash_table_remove(s->iotlb, &key);
> }
>
> @@ -463,8 +479,10 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
> /* don't cache erroneous translations */
> if (to_cache.perm != IOMMU_NONE) {
> AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
> - uint64_t *key = g_new(uint64_t, 1);
> - uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
> + AMDVIIOTLBKey *key = g_new(AMDVIIOTLBKey, 1);
> +
> + key->gfn = AMDVI_GET_IOTLB_GFN(gpa);
> + key->devid = devid;
>
> trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
> PCI_FUNC(devid), gpa, to_cache.translated_addr);
> @@ -477,7 +495,8 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
> entry->perms = to_cache.perm;
> entry->translated_addr = to_cache.translated_addr;
> entry->page_mask = to_cache.addr_mask;
> - *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
> + entry->devid = devid;
> +
> g_hash_table_replace(s->iotlb, key, entry);
> }
> }
> @@ -2526,8 +2545,8 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
> }
> }
>
> - s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
> - amdvi_uint64_equal, g_free, g_free);
> + s->iotlb = g_hash_table_new_full(amdvi_iotlb_hash,
> + amdvi_iotlb_equal, g_free, g_free);
>
> s->address_spaces = g_hash_table_new_full(amdvi_as_hash,
> amdvi_as_equal, g_free, g_free);
> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
> index 38471b95d153..302ccca5121f 100644
> --- a/hw/i386/amd_iommu.h
> +++ b/hw/i386/amd_iommu.h
> @@ -220,8 +220,8 @@
> #define PAGE_SIZE_PTE_COUNT(pgsz) (1ULL << ((ctz64(pgsz) - 12) % 9))
>
> /* IOTLB */
> -#define AMDVI_IOTLB_MAX_SIZE 1024
> -#define AMDVI_DEVID_SHIFT 36
> +#define AMDVI_IOTLB_MAX_SIZE 1024
> +#define AMDVI_GET_IOTLB_GFN(addr) (addr >> AMDVI_PAGE_SHIFT_4K)
>
> /* default extended feature */
> #define AMDVI_DEFAULT_EXT_FEATURES \
> --
> 2.34.1
On 11/3/25 11:12 AM, Michael S. Tsirkin wrote:
> On Fri, Oct 17, 2025 at 11:43:22AM +0530, Sairaj Kodilkar wrote:
>> Physical AMD IOMMU supports up to 64 bits of DMA address. When device tries
>> to read or write from a given DMA address, IOMMU translates the address
>> using page table assigned to that device. Since IOMMU uses per device page
>> tables, the emulated IOMMU should use the cache tag of 68 bits
>> (64 bit address - 12 bit page alignment + 16 bit device ID).
>>
>> Current emulated AMD IOMMU uses GLib hash table to create software iotlb
>> and uses 64 bit key to store the IOVA and deviceID, which limits the IOVA
>> to 60 bits. This causes failure while setting up the device when guest is
>> booted with "iommu.forcedac=1".
>>
>> To solve this problem, Use 64 bit IOVA and 16 bit devid as key to store
>> entries in IOTLB; Use upper 52 bits of IOVA (GFN) and lower 12 bits of
>> the devid to construct the 64 bit hash key in order avoid the truncation
>
> to avoid
>
Will fix this as well. I'll rework the commit message to the following:
amd_iommu: Support 64-bit address for IOTLB lookup
The physical AMD IOMMU supports up to 64 bits of IOVA. When a device
tries to read or write from a given DMA address, the IOMMU translates
the address using the I/O page tables assigned to that device. Since the
emulated IOMMU uses per-device page tables, an ideal cache tag would
need to be 68 bits (64-bit address - 12-bit page alignment + 16-bit
device ID).
The current software IOTLB implementation uses a GLib hash table with a
64-bit key to hash both the IOVA and device ID, which limits the IOVA to
60 bits. This causes a failure while setting up the device when a guest
is booted with "iommu.forcedac=1", which forces the use of DMA addresses
at the top of the 64-bit address space.
To address this issue, construct the 64-bit hash key using the upper 52
bits of IOVA (GFN) and lower 12 bits of the device ID to avoid
truncation as much as possible (reducing hash collisions).
Alejandro
>> as much as possible (reducing hash collisions).
>>
>> Fixes: d29a09ca6842 ("hw/i386: Introduce AMD IOMMU")
>> Signed-off-by: Sairaj Kodilkar <sarunkod@amd.com>
>> ---
>> hw/i386/amd_iommu.c | 57 ++++++++++++++++++++++++++++++---------------
>> hw/i386/amd_iommu.h | 4 ++--
>> 2 files changed, 40 insertions(+), 21 deletions(-)
>>
>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>> index c2cd5213eb1b..5487894aba40 100644
>> --- a/hw/i386/amd_iommu.c
>> +++ b/hw/i386/amd_iommu.c
>> @@ -106,6 +106,11 @@ typedef struct AMDVIAsKey {
>> uint8_t devfn;
>> } AMDVIAsKey;
>>
>> +typedef struct AMDVIIOTLBKey {
>> + uint64_t gfn;
>> + uint16_t devid;
>> +} AMDVIIOTLBKey;
>> +
>> uint64_t amdvi_extended_feature_register(AMDVIState *s)
>> {
>> uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES;
>> @@ -377,16 +382,6 @@ static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
>> PCI_STATUS_SIG_TARGET_ABORT);
>> }
>>
>> -static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
>> -{
>> - return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>> -}
>> -
>> -static guint amdvi_uint64_hash(gconstpointer v)
>> -{
>> - return (guint)*(const uint64_t *)v;
>> -}
>> -
>> static gboolean amdvi_as_equal(gconstpointer v1, gconstpointer v2)
>> {
>> const AMDVIAsKey *key1 = v1;
>> @@ -425,11 +420,30 @@ static AMDVIAddressSpace *amdvi_get_as_by_devid(AMDVIState *s, uint16_t devid)
>> amdvi_find_as_by_devid, &devid);
>> }
>>
>> +static gboolean amdvi_iotlb_equal(gconstpointer v1, gconstpointer v2)
>> +{
>> + const AMDVIIOTLBKey *key1 = v1;
>> + const AMDVIIOTLBKey *key2 = v2;
>> +
>> + return key1->devid == key2->devid && key1->gfn == key2->gfn;
>> +}
>> +
>> +static guint amdvi_iotlb_hash(gconstpointer v)
>> +{
>> + const AMDVIIOTLBKey *key = v;
>> + /* Use GPA and DEVID to find the bucket */
>> + return (guint)(key->gfn << AMDVI_PAGE_SHIFT_4K |
>> + (key->devid & ~AMDVI_PAGE_MASK_4K));
>> +}
>> +
>> +
>> static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
>> uint64_t devid)
>> {
>> - uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
>> - ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
>> + AMDVIIOTLBKey key = {
>> + .gfn = AMDVI_GET_IOTLB_GFN(addr),
>> + .devid = devid,
>> + };
>> return g_hash_table_lookup(s->iotlb, &key);
>> }
>>
>> @@ -451,8 +465,10 @@ static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
>> static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
>> uint64_t devid)
>> {
>> - uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
>> - ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
>> + AMDVIIOTLBKey key = {
>> + .gfn = AMDVI_GET_IOTLB_GFN(addr),
>> + .devid = devid,
>> + };
>> g_hash_table_remove(s->iotlb, &key);
>> }
>>
>> @@ -463,8 +479,10 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
>> /* don't cache erroneous translations */
>> if (to_cache.perm != IOMMU_NONE) {
>> AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
>> - uint64_t *key = g_new(uint64_t, 1);
>> - uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
>> + AMDVIIOTLBKey *key = g_new(AMDVIIOTLBKey, 1);
>> +
>> + key->gfn = AMDVI_GET_IOTLB_GFN(gpa);
>> + key->devid = devid;
>>
>> trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
>> PCI_FUNC(devid), gpa, to_cache.translated_addr);
>> @@ -477,7 +495,8 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
>> entry->perms = to_cache.perm;
>> entry->translated_addr = to_cache.translated_addr;
>> entry->page_mask = to_cache.addr_mask;
>> - *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
>> + entry->devid = devid;
>> +
>> g_hash_table_replace(s->iotlb, key, entry);
>> }
>> }
>> @@ -2526,8 +2545,8 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
>> }
>> }
>>
>> - s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
>> - amdvi_uint64_equal, g_free, g_free);
>> + s->iotlb = g_hash_table_new_full(amdvi_iotlb_hash,
>> + amdvi_iotlb_equal, g_free, g_free);
>>
>> s->address_spaces = g_hash_table_new_full(amdvi_as_hash,
>> amdvi_as_equal, g_free, g_free);
>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>> index 38471b95d153..302ccca5121f 100644
>> --- a/hw/i386/amd_iommu.h
>> +++ b/hw/i386/amd_iommu.h
>> @@ -220,8 +220,8 @@
>> #define PAGE_SIZE_PTE_COUNT(pgsz) (1ULL << ((ctz64(pgsz) - 12) % 9))
>>
>> /* IOTLB */
>> -#define AMDVI_IOTLB_MAX_SIZE 1024
>> -#define AMDVI_DEVID_SHIFT 36
>> +#define AMDVI_IOTLB_MAX_SIZE 1024
>> +#define AMDVI_GET_IOTLB_GFN(addr) (addr >> AMDVI_PAGE_SHIFT_4K)
>>
>> /* default extended feature */
>> #define AMDVI_DEFAULT_EXT_FEATURES \
>> --
>> 2.34.1
>
On 10/17/2025 11:43 AM, Sairaj Kodilkar wrote:
> Physical AMD IOMMU supports up to 64 bits of DMA address. When device tries
> to read or write from a given DMA address, IOMMU translates the address
> using page table assigned to that device. Since IOMMU uses per device page
> tables, the emulated IOMMU should use the cache tag of 68 bits
> (64 bit address - 12 bit page alignment + 16 bit device ID).
>
> Current emulated AMD IOMMU uses GLib hash table to create software iotlb
> and uses 64 bit key to store the IOVA and deviceID, which limits the IOVA
> to 60 bits. This causes failure while setting up the device when guest is
> booted with "iommu.forcedac=1".
>
> To solve this problem, Use 64 bit IOVA and 16 bit devid as key to store
> entries in IOTLB; Use upper 52 bits of IOVA (GFN) and lower 12 bits of
> the devid to construct the 64 bit hash key in order avoid the truncation
> as much as possible (reducing hash collisions).
>
> Fixes: d29a09ca6842 ("hw/i386: Introduce AMD IOMMU")
> Signed-off-by: Sairaj Kodilkar <sarunkod@amd.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
-Vasant
© 2016 - 2025 Red Hat, Inc.