arch/xtensa/include/asm/asm-uaccess.h | 2 +- arch/xtensa/kernel/align.S | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-)
fast_load_store aligns faulting address then reads two words
with l32i. With CONFIG_MMU, bad user access can fault
in this path.
Replace arbitrary jump to .Linvalid_instruction with
access_ok() to validate aligned address before loads
and branch to .Linvalid_instruction on failure.
a0: scratch. a2: sp. Matches existing usage in
Xtensa entry.S.
Tested-by: Ricky Ringler <richard.rringler@gmail.com>
Testing:
- Built Xtensa with CONFIG_MMU enabled
- objdump before/after comparison and validated code path
- Ran emulated Xtensa device with QEMU and manually triggered unaligned and
aligned loads
Signed-off-by: Ricky Ringler <richard.rringler@gmail.com>
---
arch/xtensa/include/asm/asm-uaccess.h | 2 +-
arch/xtensa/kernel/align.S | 9 ++++++---
2 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
index 7cec869136e3..c5baa134d3d8 100644
--- a/arch/xtensa/include/asm/asm-uaccess.h
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -68,7 +68,7 @@
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
- * <sp>
+ * <sp> unused; ignored
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index ee97edce2300..808f9f843d33 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -21,6 +21,9 @@
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
+#ifdef CONFIG_MMU
+#include <asm/asm-uaccess.h>
+#endif
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
#define LOAD_EXCEPTION_HANDLER
@@ -178,15 +181,15 @@ ENTRY(fast_load_store)
bbsi.l a4, OP1_SI_BIT + INSN_OP1, .Linvalid_instruction
1:
- movi a3, ~3
+ movi a3, ~3
and a3, a3, a8 # align memory address
__ssa8 a8
#ifdef CONFIG_MMU
/* l32e can't be used here even when it's available. */
- /* TODO access_ok(a3) could be used here */
- j .Linvalid_instruction
+ movi a5, 8
+ access_ok a3, a5, a0, a2, .Linvalid_instruction
#endif
l32i a5, a3, 0
l32i a6, a3, 4
--
2.43.0
fast_load_store aligns faulting address then reads two words
with l32i. With CONFIG_MMU, bad user access can fault
in this path.
Replace arbitrary jump to .Linvalid_instruction with
access_ok() to validate aligned address before loads
and branch to .Linvalid_instruction on failure.
a0: scratch. a2: sp. Matches existing usage in
Xtensa entry.S.
Tested-by: Ricky Ringler <richard.rringler@gmail.com>
Testing:
- Built Xtensa with CONFIG_MMU enabled
- objdump before/after comparison and validated code path
- Ran emulated Xtensa device with QEMU and manually
triggered unaligned and aligned loads
Signed-off-by: Ricky Ringler <richard.rringler@gmail.com>
---
arch/xtensa/kernel/align.S | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index ee97edce2300..5205c6ebb586 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -21,6 +21,9 @@
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
+#ifdef CONFIG_MMU
+#include <asm/asm-uaccess.h>
+#endif
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
#define LOAD_EXCEPTION_HANDLER
@@ -178,16 +181,17 @@ ENTRY(fast_load_store)
bbsi.l a4, OP1_SI_BIT + INSN_OP1, .Linvalid_instruction
1:
- movi a3, ~3
+ movi a3, ~3
and a3, a3, a8 # align memory address
__ssa8 a8
#ifdef CONFIG_MMU
/* l32e can't be used here even when it's available. */
- /* TODO access_ok(a3) could be used here */
- j .Linvalid_instruction
+ movi a5, 8
+ access_ok a3, a5, a0, a2, .Linvalid_instruction
#endif
+
l32i a5, a3, 0
l32i a6, a3, 4
__src_b a3, a5, a6 # a3 has the data word
--
2.43.0
© 2016 - 2025 Red Hat, Inc.