When updating a page table descriptor in a way that requires break
before make, we temporarily disable the MMU to ensure that we don't
unmap the memory region that the code itself is executing from.
However, this is a condition we can check in a straight-forward manner,
and if the regions are disjoint, we don't have to bother with the MMU
controls, and we can just perform an ordinary break before make.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
ArmPkg/Include/Library/ArmMmuLib.h | 7 +-
ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 102 ++++++++++++++++----
ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S | 43 +++++++--
3 files changed, 123 insertions(+), 29 deletions(-)
diff --git a/ArmPkg/Include/Library/ArmMmuLib.h b/ArmPkg/Include/Library/ArmMmuLib.h
index 7538a8274a72..b745e2230e7e 100644
--- a/ArmPkg/Include/Library/ArmMmuLib.h
+++ b/ArmPkg/Include/Library/ArmMmuLib.h
@@ -52,9 +52,10 @@ ArmClearMemoryRegionReadOnly (
VOID
EFIAPI
ArmReplaceLiveTranslationEntry (
- IN UINT64 *Entry,
- IN UINT64 Value,
- IN UINT64 RegionStart
+ IN UINT64 *Entry,
+ IN UINT64 Value,
+ IN UINT64 RegionStart,
+ IN BOOLEAN DisableMmu
);
EFI_STATUS
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
index 34f1031c4de3..4d75788ed2b2 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -18,6 +18,17 @@
#include <Library/ArmMmuLib.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
+#include <Library/HobLib.h>
+
+STATIC
+VOID (
+ EFIAPI *mReplaceLiveEntryFunc
+ )(
+ IN UINT64 *Entry,
+ IN UINT64 Value,
+ IN UINT64 RegionStart,
+ IN BOOLEAN DisableMmu
+ ) = ArmReplaceLiveTranslationEntry;
STATIC
UINT64
@@ -83,14 +94,40 @@ ReplaceTableEntry (
IN UINT64 *Entry,
IN UINT64 Value,
IN UINT64 RegionStart,
+ IN UINT64 BlockMask,
IN BOOLEAN IsLiveBlockMapping
)
{
- if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
+ BOOLEAN DisableMmu;
+
+ //
+ // Replacing a live block entry with a table entry (or vice versa) requires a
+ // break-before-make sequence as per the architecture. This means the mapping
+ // must be made invalid and cleaned from the TLBs first, and this is a bit of
+ // a hassle if the mapping in question covers the code that is actually doing
+ // the mapping and the unmapping, and so we only bother with this if actually
+ // necessary.
+ //
+
+ if (!IsLiveBlockMapping || !ArmMmuEnabled ()) {
+ // If the mapping is not a live block mapping, or the MMU is not on yet, we
+ // can simply overwrite the entry.
*Entry = Value;
ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
} else {
- ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
+ // If the mapping in question does not cover the code that updates the
+ // entry in memory, or the entry that we are intending to update, we can
+ // use an ordinary break before make. Otherwise, we will need to
+ // temporarily disable the MMU.
+ DisableMmu = FALSE;
+ if ((((RegionStart ^ (UINTN)ArmReplaceLiveTranslationEntry) & ~BlockMask) == 0) ||
+ (((RegionStart ^ (UINTN)Entry) & ~BlockMask) == 0))
+ {
+ DisableMmu = TRUE;
+ DEBUG ((DEBUG_WARN, "%a: splitting block entry with MMU disabled\n", __FUNCTION__));
+ }
+
+ ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart, DisableMmu);
}
}
@@ -155,12 +192,13 @@ IsTableEntry (
STATIC
EFI_STATUS
UpdateRegionMappingRecursive (
- IN UINT64 RegionStart,
- IN UINT64 RegionEnd,
- IN UINT64 AttributeSetMask,
- IN UINT64 AttributeClearMask,
- IN UINT64 *PageTable,
- IN UINTN Level
+ IN UINT64 RegionStart,
+ IN UINT64 RegionEnd,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask,
+ IN UINT64 *PageTable,
+ IN UINTN Level,
+ IN BOOLEAN TableIsLive
)
{
UINTN BlockShift;
@@ -170,6 +208,7 @@ UpdateRegionMappingRecursive (
UINT64 EntryValue;
VOID *TranslationTable;
EFI_STATUS Status;
+ BOOLEAN NextTableIsLive;
ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
@@ -198,7 +237,14 @@ UpdateRegionMappingRecursive (
// the next level. No block mappings are allowed at all at level 0,
// so in that case, we have to recurse unconditionally.
//
+ // One special case to take into account is any region that covers the page
+ // table itself: if we'd cover such a region with block mappings, we are
+ // more likely to end up in the situation later where we need to disable
+ // the MMU in order to update page table entries safely, so prefer page
+ // mappings in that particular case.
+ //
if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
+ ((Level < 3) && (((UINT64)PageTable & ~BlockMask) == RegionStart)) ||
IsTableEntry (*Entry, Level))
{
ASSERT (Level < 3);
@@ -234,7 +280,8 @@ UpdateRegionMappingRecursive (
*Entry & TT_ATTRIBUTES_MASK,
0,
TranslationTable,
- Level + 1
+ Level + 1,
+ FALSE
);
if (EFI_ERROR (Status)) {
//
@@ -246,8 +293,11 @@ UpdateRegionMappingRecursive (
return Status;
}
}
+
+ NextTableIsLive = FALSE;
} else {
TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
+ NextTableIsLive = TableIsLive;
}
//
@@ -259,7 +309,8 @@ UpdateRegionMappingRecursive (
AttributeSetMask,
AttributeClearMask,
TranslationTable,
- Level + 1
+ Level + 1,
+ NextTableIsLive
);
if (EFI_ERROR (Status)) {
if (!IsTableEntry (*Entry, Level)) {
@@ -282,7 +333,8 @@ UpdateRegionMappingRecursive (
Entry,
EntryValue,
RegionStart,
- IsBlockEntry (*Entry, Level)
+ BlockMask,
+ TableIsLive && IsBlockEntry (*Entry, Level)
);
}
} else {
@@ -291,7 +343,7 @@ UpdateRegionMappingRecursive (
EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
: TT_TYPE_BLOCK_ENTRY;
- ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, BlockMask, FALSE);
}
}
@@ -301,10 +353,11 @@ UpdateRegionMappingRecursive (
STATIC
EFI_STATUS
UpdateRegionMapping (
- IN UINT64 RegionStart,
- IN UINT64 RegionLength,
- IN UINT64 AttributeSetMask,
- IN UINT64 AttributeClearMask
+ IN UINT64 RegionStart,
+ IN UINT64 RegionLength,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask,
+ IN BOOLEAN TableIsLive
)
{
UINTN T0SZ;
@@ -321,7 +374,8 @@ UpdateRegionMapping (
AttributeSetMask,
AttributeClearMask,
ArmGetTTBR0BaseAddress (),
- GetRootTableLevel (T0SZ)
+ GetRootTableLevel (T0SZ),
+ TableIsLive
);
}
@@ -336,7 +390,8 @@ FillTranslationTable (
MemoryRegion->VirtualBase,
MemoryRegion->Length,
ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
- 0
+ 0,
+ FALSE
);
}
@@ -410,7 +465,8 @@ ArmSetMemoryAttributes (
BaseAddress,
Length,
PageAttributes,
- PageAttributeMask
+ PageAttributeMask,
+ TRUE
);
}
@@ -423,7 +479,13 @@ SetMemoryRegionAttribute (
IN UINT64 BlockEntryMask
)
{
- return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
+ return UpdateRegionMapping (
+ BaseAddress,
+ Length,
+ Attributes,
+ BlockEntryMask,
+ TRUE
+ );
}
EFI_STATUS
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
index 66ebca571e63..e936a5be4e11 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
@@ -12,6 +12,14 @@
.macro __replace_entry, el
+ // check whether we should disable the MMU
+ cbz x3, .L1_\@
+
+ // clean and invalidate first so that we don't clobber
+ // adjacent entries that are dirty in the caches
+ dc civac, x0
+ dsb nsh
+
// disable the MMU
mrs x8, sctlr_el\el
bic x9, x8, #CTRL_M_BIT
@@ -38,8 +46,33 @@
// re-enable the MMU
msr sctlr_el\el, x8
isb
+ b .L2_\@
+
+.L1_\@:
+ // write invalid entry
+ str xzr, [x0]
+ dsb nshst
+
+ // flush translations for the target address from the TLBs
+ lsr x2, x2, #12
+ .if \el == 1
+ tlbi vaae1, x2
+ .else
+ tlbi vae\el, x2
+ .endif
+ dsb nsh
+
+ // write updated entry
+ str x1, [x0]
+ dsb nshst
+
+.L2_\@:
.endm
+ // Align this routine to a log2 upper bound of its size, so that it is
+ // guaranteed not to cross a page or block boundary.
+ .balign 0x200
+
//VOID
//ArmReplaceLiveTranslationEntry (
// IN UINT64 *Entry,
@@ -53,12 +86,7 @@ ASM_FUNC(ArmReplaceLiveTranslationEntry)
msr daifset, #0xf
isb
- // clean and invalidate first so that we don't clobber
- // adjacent entries that are dirty in the caches
- dc civac, x0
- dsb nsh
-
- EL1_OR_EL2_OR_EL3(x3)
+ EL1_OR_EL2_OR_EL3(x5)
1:__replace_entry 1
b 4f
2:__replace_entry 2
@@ -72,3 +100,6 @@ ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)
ASM_PFX(ArmReplaceLiveTranslationEntrySize):
.long . - ArmReplaceLiveTranslationEntry
+
+ // Double check that we did not overrun the assumed maximum size
+ .org ArmReplaceLiveTranslationEntry + 0x200
--
2.35.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#94331): https://edk2.groups.io/g/devel/message/94331
Mute This Topic: https://groups.io/mt/93922697/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
On 2022-09-26 01:25, Ard Biesheuvel wrote:
> When updating a page table descriptor in a way that requires break
> before make, we temporarily disable the MMU to ensure that we don't
> unmap the memory region that the code itself is executing from.
>
> However, this is a condition we can check in a straight-forward manner,
> and if the regions are disjoint, we don't have to bother with the MMU
> controls, and we can just perform an ordinary break before make.
>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Leif Lindholm <quic_llindhol@quicinc.com>
/
Leif
> ---
> ArmPkg/Include/Library/ArmMmuLib.h | 7 +-
> ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 102 ++++++++++++++++----
> ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S | 43 +++++++--
> 3 files changed, 123 insertions(+), 29 deletions(-)
>
> diff --git a/ArmPkg/Include/Library/ArmMmuLib.h b/ArmPkg/Include/Library/ArmMmuLib.h
> index 7538a8274a72..b745e2230e7e 100644
> --- a/ArmPkg/Include/Library/ArmMmuLib.h
> +++ b/ArmPkg/Include/Library/ArmMmuLib.h
> @@ -52,9 +52,10 @@ ArmClearMemoryRegionReadOnly (
> VOID
>
> EFIAPI
>
> ArmReplaceLiveTranslationEntry (
>
> - IN UINT64 *Entry,
>
> - IN UINT64 Value,
>
> - IN UINT64 RegionStart
>
> + IN UINT64 *Entry,
>
> + IN UINT64 Value,
>
> + IN UINT64 RegionStart,
>
> + IN BOOLEAN DisableMmu
>
> );
>
>
>
> EFI_STATUS
>
> diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> index 34f1031c4de3..4d75788ed2b2 100644
> --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> @@ -18,6 +18,17 @@
> #include <Library/ArmMmuLib.h>
>
> #include <Library/BaseLib.h>
>
> #include <Library/DebugLib.h>
>
> +#include <Library/HobLib.h>
>
> +
>
> +STATIC
>
> +VOID (
>
> + EFIAPI *mReplaceLiveEntryFunc
>
> + )(
>
> + IN UINT64 *Entry,
>
> + IN UINT64 Value,
>
> + IN UINT64 RegionStart,
>
> + IN BOOLEAN DisableMmu
>
> + ) = ArmReplaceLiveTranslationEntry;
>
>
>
> STATIC
>
> UINT64
>
> @@ -83,14 +94,40 @@ ReplaceTableEntry (
> IN UINT64 *Entry,
>
> IN UINT64 Value,
>
> IN UINT64 RegionStart,
>
> + IN UINT64 BlockMask,
>
> IN BOOLEAN IsLiveBlockMapping
>
> )
>
> {
>
> - if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
>
> + BOOLEAN DisableMmu;
>
> +
>
> + //
>
> + // Replacing a live block entry with a table entry (or vice versa) requires a
>
> + // break-before-make sequence as per the architecture. This means the mapping
>
> + // must be made invalid and cleaned from the TLBs first, and this is a bit of
>
> + // a hassle if the mapping in question covers the code that is actually doing
>
> + // the mapping and the unmapping, and so we only bother with this if actually
>
> + // necessary.
>
> + //
>
> +
>
> + if (!IsLiveBlockMapping || !ArmMmuEnabled ()) {
>
> + // If the mapping is not a live block mapping, or the MMU is not on yet, we
>
> + // can simply overwrite the entry.
>
> *Entry = Value;
>
> ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
>
> } else {
>
> - ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
>
> + // If the mapping in question does not cover the code that updates the
>
> + // entry in memory, or the entry that we are intending to update, we can
>
> + // use an ordinary break before make. Otherwise, we will need to
>
> + // temporarily disable the MMU.
>
> + DisableMmu = FALSE;
>
> + if ((((RegionStart ^ (UINTN)ArmReplaceLiveTranslationEntry) & ~BlockMask) == 0) ||
>
> + (((RegionStart ^ (UINTN)Entry) & ~BlockMask) == 0))
>
> + {
>
> + DisableMmu = TRUE;
>
> + DEBUG ((DEBUG_WARN, "%a: splitting block entry with MMU disabled\n", __FUNCTION__));
>
> + }
>
> +
>
> + ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart, DisableMmu);
>
> }
>
> }
>
>
>
> @@ -155,12 +192,13 @@ IsTableEntry (
> STATIC
>
> EFI_STATUS
>
> UpdateRegionMappingRecursive (
>
> - IN UINT64 RegionStart,
>
> - IN UINT64 RegionEnd,
>
> - IN UINT64 AttributeSetMask,
>
> - IN UINT64 AttributeClearMask,
>
> - IN UINT64 *PageTable,
>
> - IN UINTN Level
>
> + IN UINT64 RegionStart,
>
> + IN UINT64 RegionEnd,
>
> + IN UINT64 AttributeSetMask,
>
> + IN UINT64 AttributeClearMask,
>
> + IN UINT64 *PageTable,
>
> + IN UINTN Level,
>
> + IN BOOLEAN TableIsLive
>
> )
>
> {
>
> UINTN BlockShift;
>
> @@ -170,6 +208,7 @@ UpdateRegionMappingRecursive (
> UINT64 EntryValue;
>
> VOID *TranslationTable;
>
> EFI_STATUS Status;
>
> + BOOLEAN NextTableIsLive;
>
>
>
> ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
>
>
>
> @@ -198,7 +237,14 @@ UpdateRegionMappingRecursive (
> // the next level. No block mappings are allowed at all at level 0,
>
> // so in that case, we have to recurse unconditionally.
>
> //
>
> + // One special case to take into account is any region that covers the page
>
> + // table itself: if we'd cover such a region with block mappings, we are
>
> + // more likely to end up in the situation later where we need to disable
>
> + // the MMU in order to update page table entries safely, so prefer page
>
> + // mappings in that particular case.
>
> + //
>
> if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
>
> + ((Level < 3) && (((UINT64)PageTable & ~BlockMask) == RegionStart)) ||
>
> IsTableEntry (*Entry, Level))
>
> {
>
> ASSERT (Level < 3);
>
> @@ -234,7 +280,8 @@ UpdateRegionMappingRecursive (
> *Entry & TT_ATTRIBUTES_MASK,
>
> 0,
>
> TranslationTable,
>
> - Level + 1
>
> + Level + 1,
>
> + FALSE
>
> );
>
> if (EFI_ERROR (Status)) {
>
> //
>
> @@ -246,8 +293,11 @@ UpdateRegionMappingRecursive (
> return Status;
>
> }
>
> }
>
> +
>
> + NextTableIsLive = FALSE;
>
> } else {
>
> TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
>
> + NextTableIsLive = TableIsLive;
>
> }
>
>
>
> //
>
> @@ -259,7 +309,8 @@ UpdateRegionMappingRecursive (
> AttributeSetMask,
>
> AttributeClearMask,
>
> TranslationTable,
>
> - Level + 1
>
> + Level + 1,
>
> + NextTableIsLive
>
> );
>
> if (EFI_ERROR (Status)) {
>
> if (!IsTableEntry (*Entry, Level)) {
>
> @@ -282,7 +333,8 @@ UpdateRegionMappingRecursive (
> Entry,
>
> EntryValue,
>
> RegionStart,
>
> - IsBlockEntry (*Entry, Level)
>
> + BlockMask,
>
> + TableIsLive && IsBlockEntry (*Entry, Level)
>
> );
>
> }
>
> } else {
>
> @@ -291,7 +343,7 @@ UpdateRegionMappingRecursive (
> EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
>
> : TT_TYPE_BLOCK_ENTRY;
>
>
>
> - ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
>
> + ReplaceTableEntry (Entry, EntryValue, RegionStart, BlockMask, FALSE);
>
> }
>
> }
>
>
>
> @@ -301,10 +353,11 @@ UpdateRegionMappingRecursive (
> STATIC
>
> EFI_STATUS
>
> UpdateRegionMapping (
>
> - IN UINT64 RegionStart,
>
> - IN UINT64 RegionLength,
>
> - IN UINT64 AttributeSetMask,
>
> - IN UINT64 AttributeClearMask
>
> + IN UINT64 RegionStart,
>
> + IN UINT64 RegionLength,
>
> + IN UINT64 AttributeSetMask,
>
> + IN UINT64 AttributeClearMask,
>
> + IN BOOLEAN TableIsLive
>
> )
>
> {
>
> UINTN T0SZ;
>
> @@ -321,7 +374,8 @@ UpdateRegionMapping (
> AttributeSetMask,
>
> AttributeClearMask,
>
> ArmGetTTBR0BaseAddress (),
>
> - GetRootTableLevel (T0SZ)
>
> + GetRootTableLevel (T0SZ),
>
> + TableIsLive
>
> );
>
> }
>
>
>
> @@ -336,7 +390,8 @@ FillTranslationTable (
> MemoryRegion->VirtualBase,
>
> MemoryRegion->Length,
>
> ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
>
> - 0
>
> + 0,
>
> + FALSE
>
> );
>
> }
>
>
>
> @@ -410,7 +465,8 @@ ArmSetMemoryAttributes (
> BaseAddress,
>
> Length,
>
> PageAttributes,
>
> - PageAttributeMask
>
> + PageAttributeMask,
>
> + TRUE
>
> );
>
> }
>
>
>
> @@ -423,7 +479,13 @@ SetMemoryRegionAttribute (
> IN UINT64 BlockEntryMask
>
> )
>
> {
>
> - return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
>
> + return UpdateRegionMapping (
>
> + BaseAddress,
>
> + Length,
>
> + Attributes,
>
> + BlockEntryMask,
>
> + TRUE
>
> + );
>
> }
>
>
>
> EFI_STATUS
>
> diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
> index 66ebca571e63..e936a5be4e11 100644
> --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
> +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
> @@ -12,6 +12,14 @@
>
>
> .macro __replace_entry, el
>
>
>
> + // check whether we should disable the MMU
>
> + cbz x3, .L1_\@
>
> +
>
> + // clean and invalidate first so that we don't clobber
>
> + // adjacent entries that are dirty in the caches
>
> + dc civac, x0
>
> + dsb nsh
>
> +
>
> // disable the MMU
>
> mrs x8, sctlr_el\el
>
> bic x9, x8, #CTRL_M_BIT
>
> @@ -38,8 +46,33 @@
> // re-enable the MMU
>
> msr sctlr_el\el, x8
>
> isb
>
> + b .L2_\@
>
> +
>
> +.L1_\@:
>
> + // write invalid entry
>
> + str xzr, [x0]
>
> + dsb nshst
>
> +
>
> + // flush translations for the target address from the TLBs
>
> + lsr x2, x2, #12
>
> + .if \el == 1
>
> + tlbi vaae1, x2
>
> + .else
>
> + tlbi vae\el, x2
>
> + .endif
>
> + dsb nsh
>
> +
>
> + // write updated entry
>
> + str x1, [x0]
>
> + dsb nshst
>
> +
>
> +.L2_\@:
>
> .endm
>
>
>
> + // Align this routine to a log2 upper bound of its size, so that it is
>
> + // guaranteed not to cross a page or block boundary.
>
> + .balign 0x200
>
> +
>
> //VOID
>
> //ArmReplaceLiveTranslationEntry (
>
> // IN UINT64 *Entry,
>
> @@ -53,12 +86,7 @@ ASM_FUNC(ArmReplaceLiveTranslationEntry)
> msr daifset, #0xf
>
> isb
>
>
>
> - // clean and invalidate first so that we don't clobber
>
> - // adjacent entries that are dirty in the caches
>
> - dc civac, x0
>
> - dsb nsh
>
> -
>
> - EL1_OR_EL2_OR_EL3(x3)
>
> + EL1_OR_EL2_OR_EL3(x5)
>
> 1:__replace_entry 1
>
> b 4f
>
> 2:__replace_entry 2
>
> @@ -72,3 +100,6 @@ ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)
>
>
> ASM_PFX(ArmReplaceLiveTranslationEntrySize):
>
> .long . - ArmReplaceLiveTranslationEntry
>
> +
>
> + // Double check that we did not overrun the assumed maximum size
>
> + .org ArmReplaceLiveTranslationEntry + 0x200
>
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#94370): https://edk2.groups.io/g/devel/message/94370
Mute This Topic: https://groups.io/mt/93922697/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
© 2016 - 2026 Red Hat, Inc.