The current implementation does not follow 128-bit write requirement
to update DTE as specified in the AMD I/O Virtualization Techonology
(IOMMU) Specification.
Therefore, modify the struct dev_table_entry to contain union of u128 data
array, and introduce two helper functions:
* update_dte256() to update DTE using two 128-bit cmpxchg
operations to update 256-bit DTE with the modified structure.
Also use the struct iommu_dev_data.dte_sem to synchronize 256-bit
data update.
* get_dte256() to copy 256-bit DTE to the provided structrue.
Also use the struct iommu_dev_data.dte_sem to synchronize 256-bit
data access.
Also, update existing code to use the new helper functions in this and
subsequent patches.
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu_types.h | 5 +-
drivers/iommu/amd/iommu.c | 81 +++++++++++++++++++++++------
2 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 65f3a073999d..2787d6af5a59 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -884,7 +884,10 @@ extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u64 data[4];
+ union {
+ u64 data[4];
+ u128 data128[2];
+ };
};
/*
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 994ed02842b9..93bca5c68bca 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -85,6 +85,47 @@ static void set_dte_entry(struct amd_iommu *iommu,
*
****************************************************************************/
+static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry *ptr = &dev_table[dev_data->devid];
+ struct dev_table_entry old;
+ u128 tmp;
+
+ down_write(&dev_data->dte_sem);
+
+ old.data128[0] = ptr->data128[0];
+ old.data128[1] = ptr->data128[1];
+
+ tmp = cmpxchg128(&ptr->data128[1], old.data128[1], new->data128[1]);
+ if (tmp == old.data128[1]) {
+ if (!try_cmpxchg128(&ptr->data128[0], &old.data128[0], new->data128[0])) {
+ /* Restore hi 128-bit */
+ cmpxchg128(&ptr->data128[1], new->data128[1], tmp);
+ pr_err("%s: Failed. devid=%#x, dte=%016llx:%016llx:%016llx:%016llx\n",
+ __func__, dev_data->devid, new->data[0], new->data[1],
+ new->data[2], new->data[3]);
+ }
+ }
+
+ up_write(&dev_data->dte_sem);
+}
+
+static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *dte)
+{
+ struct dev_table_entry *ptr;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ ptr = &dev_table[dev_data->devid];
+
+ down_read(&dev_data->dte_sem);
+ dte->data128[0] = ptr->data128[0];
+ dte->data128[1] = ptr->data128[1];
+ up_read(&dev_data->dte_sem);
+}
+
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -233,8 +274,9 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct dev_table_entry dte;
struct amd_iommu *iommu;
- struct dev_table_entry *dev_table;
+ struct iommu_dev_data *dev_data;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
@@ -244,11 +286,19 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
if (!iommu)
return 0;
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ if (!dev_data)
+ return -EINVAL;
+
+ get_dte256(iommu, dev_data, &dte);
+
+ /* Setup for alias */
+ dev_data = search_dev_data(iommu, alias);
+ if (!dev_data)
+ return -EINVAL;
+
+ update_dte256(iommu, dev_data, &dte);
amd_iommu_set_rlookup_table(iommu, alias);
- dev_table = get_dev_table(iommu);
- memcpy(dev_table[alias].data,
- dev_table[devid].data,
- sizeof(dev_table[alias].data));
return 0;
}
@@ -584,10 +634,13 @@ static void amd_iommu_uninit_device(struct device *dev)
static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry dte;
+ struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
+
+ get_dte256(iommu, dev_data, &dte);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -2667,12 +2720,10 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct protection_domain *pdomain = to_pdomain(domain);
- struct dev_table_entry *dev_table;
struct iommu_dev_data *dev_data;
bool domain_flush = false;
struct amd_iommu *iommu;
unsigned long flags;
- u64 pte_root;
spin_lock_irqsave(&pdomain->lock, flags);
if (!(pdomain->dirty_tracking ^ enable)) {
@@ -2681,16 +2732,16 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
- iommu = get_amd_iommu_from_dev_data(dev_data);
+ struct dev_table_entry dte;
- dev_table = get_dev_table(iommu);
- pte_root = dev_table[dev_data->devid].data[0];
+ iommu = get_amd_iommu_from_dev_data(dev_data);
+ get_dte256(iommu, dev_data, &dte);
- pte_root = (enable ? pte_root | DTE_FLAG_HAD :
- pte_root & ~DTE_FLAG_HAD);
+ dte.data[0] = (enable ? dte.data[0] | DTE_FLAG_HAD :
+ dte.data[0] & ~DTE_FLAG_HAD);
/* Flush device DTE */
- dev_table[dev_data->devid].data[0] = pte_root;
+ update_dte256(iommu, dev_data, &dte);
device_flush_dte(dev_data);
domain_flush = true;
}
--
2.34.1
On Thu, Aug 29, 2024 at 06:07:24PM +0000, Suravee Suthikulpanit wrote:
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index 994ed02842b9..93bca5c68bca 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -85,6 +85,47 @@ static void set_dte_entry(struct amd_iommu *iommu,
> *
> ****************************************************************************/
>
> +static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
> + struct dev_table_entry *new)
> +{
> + struct dev_table_entry *dev_table = get_dev_table(iommu);
> + struct dev_table_entry *ptr = &dev_table[dev_data->devid];
> + struct dev_table_entry old;
> + u128 tmp;
> +
> + down_write(&dev_data->dte_sem);
This locking is too narrow, you need the critical region to span from
the get_dte256() till the update_dte256() because the get is
retrieving the value written by set_dte_irq_entry(), and it must not
change while the new DTE is worked on.
I suggest you copy the IRQ data here in this function under the lock
from old to new and then store it so it is always fresh.
Ideally you would remove get_dte256() because the driver *really*
shouldn't be changing the DTE in some way that already assumes
something is in the DTE (for instance my remarks on the nesting work)
Really the only reason to read the DTE is the get the IRQ data..
> + old.data128[0] = ptr->data128[0];
> + old.data128[1] = ptr->data128[1];
> +
> + tmp = cmpxchg128(&ptr->data128[1], old.data128[1], new->data128[1]);
> + if (tmp == old.data128[1]) {
> + if (!try_cmpxchg128(&ptr->data128[0], &old.data128[0], new->data128[0])) {
> + /* Restore hi 128-bit */
> + cmpxchg128(&ptr->data128[1], new->data128[1], tmp);
I don't think you should restore, this should reflect a locking error
but we still need to move forward and put some kind of correct
data.. The code can't go backwards so it should try to move forwards..
On ordering, I don't know, is this OK?
If you are leaving/entering nesting mode I think you have to write the
[2] value in the right sequence, you don't want to have the viommu
enabled unless the host page table is setup properly. So [2] is
written last when enabling, and first when disabling. Flushes required
after each write to ensure the HW doesn't see a cross-128 word bit
tear.
GuestPagingMode also has to be sequenced correctly, the GCR3 table
pointer should be invalid when it is changed, meaning you have to
write it and flush before storing the GCR3 table, and the reverse to
undo it.
The ordering, including when DTE flushes are needed, is pretty
hard. This is much simpler than, say, ARM, so I think you could open
code it, but it should be a pretty sizable bit of logic to figure out
what to do.
> +static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
> + struct dev_table_entry *dte)
> +{
> + struct dev_table_entry *ptr;
> + struct dev_table_entry *dev_table = get_dev_table(iommu);
> +
> + ptr = &dev_table[dev_data->devid];
> +
> + down_read(&dev_data->dte_sem);
> + dte->data128[0] = ptr->data128[0];
> + dte->data128[1] = ptr->data128[1];
> + up_read(&dev_data->dte_sem);
I don't think you need a rwsem either. As above, you shouldn't be
reading anyhow to build a DTE, and you can't allow the interrupt
update to run regardless, so a simple spinlock would be sufficient and
faster, I think.
> @@ -2681,16 +2732,16 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
> }
>
> list_for_each_entry(dev_data, &pdomain->dev_list, list) {
> - iommu = get_amd_iommu_from_dev_data(dev_data);
> + struct dev_table_entry dte;
>
> - dev_table = get_dev_table(iommu);
> - pte_root = dev_table[dev_data->devid].data[0];
> + iommu = get_amd_iommu_from_dev_data(dev_data);
> + get_dte256(iommu, dev_data, &dte);
>
> - pte_root = (enable ? pte_root | DTE_FLAG_HAD :
> - pte_root & ~DTE_FLAG_HAD);
> + dte.data[0] = (enable ? dte.data[0] | DTE_FLAG_HAD :
> + dte.data[0] & ~DTE_FLAG_HAD);
>
And this doesn't need all the logic just to flip one bit in a single
64bit quantity..
Jason
Hi,
On 8/30/2024 2:28 AM, Jason Gunthorpe wrote:
> On Thu, Aug 29, 2024 at 06:07:24PM +0000, Suravee Suthikulpanit wrote:
>
>> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
>> index 994ed02842b9..93bca5c68bca 100644
>> --- a/drivers/iommu/amd/iommu.c
>> +++ b/drivers/iommu/amd/iommu.c
>> @@ -85,6 +85,47 @@ static void set_dte_entry(struct amd_iommu *iommu,
>> *
>> ****************************************************************************/
>>
>> +static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
>> + struct dev_table_entry *new)
>> +{
>> + struct dev_table_entry *dev_table = get_dev_table(iommu);
>> + struct dev_table_entry *ptr = &dev_table[dev_data->devid];
>> + struct dev_table_entry old;
>> + u128 tmp;
>> +
>> + down_write(&dev_data->dte_sem);
>
> This locking is too narrow, you need the critical region to span from
> the get_dte256() till the update_dte256() because the get is
> retrieving the value written by set_dte_irq_entry(), and it must not
> change while the new DTE is worked on.
Ok.
> I suggest you copy the IRQ data here in this function under the lock
> from old to new and then store it so it is always fresh.
>
> Ideally you would remove get_dte256() because the driver *really*
> shouldn't be changing the DTE in some way that already assumes
> something is in the DTE (for instance my remarks on the nesting work)
>
> Really the only reason to read the DTE is the get the IRQ data..
I plan to use get_dte256() helper function to extract DTE for various
purposes. Getting the IRQ data is only one use case. There are other
fields, which are programmed early in the driver init phrase (i.e.
DTE[96:106]).
>> + old.data128[0] = ptr->data128[0];
>> + old.data128[1] = ptr->data128[1];
>> +
>> + tmp = cmpxchg128(&ptr->data128[1], old.data128[1], new->data128[1]);
>> + if (tmp == old.data128[1]) {
>> + if (!try_cmpxchg128(&ptr->data128[0], &old.data128[0], new->data128[0])) {
>> + /* Restore hi 128-bit */
>> + cmpxchg128(&ptr->data128[1], new->data128[1], tmp);
>
> I don't think you should restore, this should reflect a locking error
> but we still need to move forward and put some kind of correct
> data.. The code can't go backwards so it should try to move forwards..
In case of error, what if we pr_warn and put the device in blocking mode
since we need to prevent malicious DMAs.
> On ordering, I don't know, is this OK?
>
> If you are leaving/entering nesting mode I think you have to write the
> [2] value in the right sequence, you don't want to have the viommu
> enabled unless the host page table is setup properly. So [2] is
> written last when enabling, and first when disabling. Flushes required
> after each write to ensure the HW doesn't see a cross-128 word bit
> tear.
> > GuestPagingMode also has to be sequenced correctly, the GCR3 table
> pointer should be invalid when it is changed, meaning you have to
> write it and flush before storing the GCR3 table, and the reverse to
> undo it.
>
> The ordering, including when DTE flushes are needed, is pretty
> hard. This is much simpler than, say, ARM, so I think you could open
> code it, but it should be a pretty sizable bit of logic to figure out
> what to do.
IOMMU hardware do not do partial interpret of the DTE and SW ensure DTE
flush after updating the DTE. Therefore, ordering should not be of a
concern here as long as the driver correctly program the entry.
>> +static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
>> + struct dev_table_entry *dte)
>> +{
>> + struct dev_table_entry *ptr;
>> + struct dev_table_entry *dev_table = get_dev_table(iommu);
>> +
>> + ptr = &dev_table[dev_data->devid];
>> +
>> + down_read(&dev_data->dte_sem);
>> + dte->data128[0] = ptr->data128[0];
>> + dte->data128[1] = ptr->data128[1];
>> + up_read(&dev_data->dte_sem);
>
> I don't think you need a rwsem either. As above, you shouldn't be
> reading anyhow to build a DTE, and you can't allow the interrupt
> update to run regardless, so a simple spinlock would be sufficient and
> faster, I think.
Ok.
>> @@ -2681,16 +2732,16 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
>> }
>>
>> list_for_each_entry(dev_data, &pdomain->dev_list, list) {
>> - iommu = get_amd_iommu_from_dev_data(dev_data);
>> + struct dev_table_entry dte;
>>
>> - dev_table = get_dev_table(iommu);
>> - pte_root = dev_table[dev_data->devid].data[0];
>> + iommu = get_amd_iommu_from_dev_data(dev_data);
>> + get_dte256(iommu, dev_data, &dte);
>>
>> - pte_root = (enable ? pte_root | DTE_FLAG_HAD :
>> - pte_root & ~DTE_FLAG_HAD);
>> + dte.data[0] = (enable ? dte.data[0] | DTE_FLAG_HAD :
>> + dte.data[0] & ~DTE_FLAG_HAD);
>>
>
> And this doesn't need all the logic just to flip one bit in a single
> 64bit quantity..
Ok
Thanks,
Suravee
On Fri, Sep 06, 2024 at 12:54:25AM +0700, Suthikulpanit, Suravee wrote:
> Hi,
>
> On 8/30/2024 2:28 AM, Jason Gunthorpe wrote:
> > On Thu, Aug 29, 2024 at 06:07:24PM +0000, Suravee Suthikulpanit wrote:
> >
> > > diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> > > index 994ed02842b9..93bca5c68bca 100644
> > > --- a/drivers/iommu/amd/iommu.c
> > > +++ b/drivers/iommu/amd/iommu.c
> > > @@ -85,6 +85,47 @@ static void set_dte_entry(struct amd_iommu *iommu,
> > > *
> > > ****************************************************************************/
> > > +static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
> > > + struct dev_table_entry *new)
> > > +{
> > > + struct dev_table_entry *dev_table = get_dev_table(iommu);
> > > + struct dev_table_entry *ptr = &dev_table[dev_data->devid];
> > > + struct dev_table_entry old;
> > > + u128 tmp;
> > > +
> > > + down_write(&dev_data->dte_sem);
> >
> > This locking is too narrow, you need the critical region to span from
> > the get_dte256() till the update_dte256() because the get is
> > retrieving the value written by set_dte_irq_entry(), and it must not
> > change while the new DTE is worked on.
>
> Ok.
>
> > I suggest you copy the IRQ data here in this function under the lock
> > from old to new and then store it so it is always fresh.
> >
> > Ideally you would remove get_dte256() because the driver *really*
> > shouldn't be changing the DTE in some way that already assumes
> > something is in the DTE (for instance my remarks on the nesting work)
> >
> > Really the only reason to read the DTE is the get the IRQ data..
>
> I plan to use get_dte256() helper function to extract DTE for various
> purposes. Getting the IRQ data is only one use case. There are other fields,
> which are programmed early in the driver init phrase (i.e. DTE[96:106]).
Sure, a model where you have specific 'fixed' fields and you
store them in the DTE is logical. You want to target something like
struct dte new_dte = init_dte(..)
new_dte |= [....]
program_dte()
Where init_dte could read out fixed bits from the existing DTE
> > I don't think you should restore, this should reflect a locking error
> > but we still need to move forward and put some kind of correct
> > data.. The code can't go backwards so it should try to move forwards..
>
> In case of error, what if we pr_warn and put the device in blocking mode
> since we need to prevent malicious DMAs.
IMHO a WARN_ON is fine, and alerts to the possible machine corruption
No need to do blocking, you should have a perfectly valid target DTE
that represents the state the HW is expected to be in. Resolve the
race by making it bin that state and move forwards.
> > On ordering, I don't know, is this OK?
> >
> > If you are leaving/entering nesting mode I think you have to write the
> > [2] value in the right sequence, you don't want to have the viommu
> > enabled unless the host page table is setup properly. So [2] is
> > written last when enabling, and first when disabling. Flushes required
> > after each write to ensure the HW doesn't see a cross-128 word bit
> > tear.
> > > GuestPagingMode also has to be sequenced correctly, the GCR3 table
> > pointer should be invalid when it is changed, meaning you have to
> > write it and flush before storing the GCR3 table, and the reverse to
> > undo it.
> >
> > The ordering, including when DTE flushes are needed, is pretty
> > hard. This is much simpler than, say, ARM, so I think you could open
> > code it, but it should be a pretty sizable bit of logic to figure out
> > what to do.
>
> IOMMU hardware do not do partial interpret of the DTE and SW ensure DTE
> flush after updating the DTE. Therefore, ordering should not be of a concern
> here as long as the driver correctly program the entry.
Even if the IOMMU HW does a perfect 256 bit atomic read you still have
to order the CPU writes correctly. It just means you don't need to
flush.
The guidelines in "2.2.2.2 Making Device Table Entry Changes" make
this clear. The indivudal CPU writes smaller than 256 bits have to be
sequenced right.
This section looks like it was written before translation bits were
placed in the other 128 bit word - it assumes a single 128 bit write
is always sufficient which isn't true anymore.
So you still have the issue of having to decide if you write 128 bit
[0] or [1] first.
Jason
On 9/6/2024 1:21 AM, Jason Gunthorpe wrote: >>> I don't think you should restore, this should reflect a locking error >>> but we still need to move forward and put some kind of correct >>> data.. The code can't go backwards so it should try to move forwards.. >> In case of error, what if we pr_warn and put the device in blocking mode >> since we need to prevent malicious DMAs. > IMHO a WARN_ON is fine, and alerts to the possible machine corruption > > No need to do blocking, you should have a perfectly valid target DTE > that represents the state the HW is expected to be in. Resolve the > race by making it bin that state and move forwards. What do you mean by "making it bin that state". >>> On ordering, I don't know, is this OK? >>> >>> If you are leaving/entering nesting mode I think you have to write the >>> [2] value in the right sequence, you don't want to have the viommu >>> enabled unless the host page table is setup properly. So [2] is >>> written last when enabling, and first when disabling. Flushes required >>> after each write to ensure the HW doesn't see a cross-128 word bit >>> tear. >>>> GuestPagingMode also has to be sequenced correctly, the GCR3 table >>> pointer should be invalid when it is changed, meaning you have to >>> write it and flush before storing the GCR3 table, and the reverse to >>> undo it. >>> >>> The ordering, including when DTE flushes are needed, is pretty >>> hard. This is much simpler than, say, ARM, so I think you could open >>> code it, but it should be a pretty sizable bit of logic to figure out >>> what to do. >> IOMMU hardware do not do partial interpret of the DTE and SW ensure DTE >> flush after updating the DTE. Therefore, ordering should not be of a concern >> here as long as the driver correctly program the entry. > Even if the IOMMU HW does a perfect 256 bit atomic read you still have > to order the CPU writes correctly. It just means you don't need to > flush. > > The guidelines in "2.2.2.2 Making Device Table Entry Changes" make > this clear. The indivudal CPU writes smaller than 256 bits have to be > sequenced right. For the interrupt remapping part, no special step is needed if we can write do 64-bit write. Similary, for the address translation part, no special step is needed if we can do 128-bit write. > This section looks like it was written before translation bits were > placed in the other 128 bit word - it assumes a single 128 bit write > is always sufficient which isn't true anymore. > > So you still have the issue of having to decide if you write 128 bit > [0] or [1] first. The GuestPagingMode bit is in effect when GV=1. So, the higher 128-bit (which contains GuestPagingMode bit) should be written first, and follow by lower 128-bit (which contans GV bit). Thanks, Suravee
On Fri, Sep 06, 2024 at 09:08:06PM +0700, Suthikulpanit, Suravee wrote: > On 9/6/2024 1:21 AM, Jason Gunthorpe wrote: > > > > I don't think you should restore, this should reflect a locking error > > > > but we still need to move forward and put some kind of correct > > > > data.. The code can't go backwards so it should try to move forwards.. > > > In case of error, what if we pr_warn and put the device in blocking mode > > > since we need to prevent malicious DMAs. > > IMHO a WARN_ON is fine, and alerts to the possible machine corruption > > No need to do blocking, you should have a perfectly valid target DTE > > that represents the state the HW is expected to be in. Resolve the > > race by making it bin that state and move forwards. > > What do you mean by "making it bin that state". Sorry, "be in that state" > > The guidelines in "2.2.2.2 Making Device Table Entry Changes" make > > this clear. The indivudal CPU writes smaller than 256 bits have to be > > sequenced right. > > For the interrupt remapping part, no special step is needed if we can write > do 64-bit write. Yes > Similary, for the address translation part, no special step is > needed if we can do 128-bit write. Except for GuestPagingMode, as below. > > This section looks like it was written before translation bits were > > placed in the other 128 bit word - it assumes a single 128 bit write > > is always sufficient which isn't true anymore. > > > > So you still have the issue of having to decide if you write 128 bit > > [0] or [1] first. > > The GuestPagingMode bit is in effect when GV=1. So, the higher 128-bit > (which contains GuestPagingMode bit) should be written first, and follow by > lower 128-bit (which contans GV bit). Yes, exactly. That is what I mean by ordering. When clearing GV=0 you have to do the reverse ordering, write the low 128 then the high. Jason
© 2016 - 2026 Red Hat, Inc.