Prepare for ENDBR poison to become UD1 by making #UD recognise it as a valid
BUG address so we get a nice BUG splat when we trip on one.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/include/asm/bug.h | 6 ++-
arch/x86/include/asm/ibt.h | 8 +++-
arch/x86/kernel/cet.c | 18 ++++++---
arch/x86/kernel/traps.c | 89 +++++++++++++++++++++++++++++++++------------
4 files changed, 90 insertions(+), 31 deletions(-)
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -22,8 +22,10 @@
#define SECOND_BYTE_OPCODE_UD2 0x0b
#define BUG_NONE 0xffff
-#define BUG_UD1 0xfffe
-#define BUG_UD2 0xfffd
+#define BUG_UD2 0xfffe
+#define BUG_UD1 0xfffd
+#define BUG_UD1_UBSAN 0xfffc
+#define BUG_UD1_ENDBR 0xfffb
#ifdef CONFIG_GENERIC_BUG
--- a/arch/x86/include/asm/ibt.h
+++ b/arch/x86/include/asm/ibt.h
@@ -41,7 +41,7 @@
_ASM_PTR fname "\n\t" \
".popsection\n\t"
-static inline __attribute_const__ u32 gen_endbr(void)
+static __always_inline __attribute_const__ u32 gen_endbr(void)
{
u32 endbr;
@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 ge
return endbr;
}
-static inline __attribute_const__ u32 gen_endbr_poison(void)
+static __always_inline __attribute_const__ u32 gen_endbr_poison(void)
{
/*
* 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it
@@ -77,6 +77,10 @@ static inline bool is_endbr(u32 val)
extern __noendbr u64 ibt_save(bool disable);
extern __noendbr void ibt_restore(u64 save);
+struct pt_regs;
+
+extern bool __do_kernel_cp_fault(struct pt_regs *regs);
+
#else /* __ASSEMBLY__ */
#ifdef CONFIG_X86_64
--- a/arch/x86/kernel/cet.c
+++ b/arch/x86/kernel/cet.c
@@ -81,6 +81,17 @@ static void do_user_cp_fault(struct pt_r
static __ro_after_init bool ibt_fatal = true;
+bool __do_kernel_cp_fault(struct pt_regs *regs)
+{
+ pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
+ if (!ibt_fatal) {
+ printk(KERN_DEFAULT CUT_HERE);
+ __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
+ return true;
+ }
+ return false;
+}
+
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
{
if ((error_code & CP_EC) != CP_ENDBR) {
@@ -93,12 +104,9 @@ static void do_kernel_cp_fault(struct pt
return;
}
- pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
- if (!ibt_fatal) {
- printk(KERN_DEFAULT CUT_HERE);
- __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
+ if (__do_kernel_cp_fault(regs))
return;
- }
+
BUG();
}
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -94,10 +94,18 @@ __always_inline int is_valid_bugaddr(uns
/*
* Check for UD1 or UD2, accounting for Address Size Override Prefixes.
- * If it's a UD1, get the ModRM byte to pass along to UBSan.
+ * If it's a UD1, further decode to determine its use:
+ *
+ * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
+ * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
+ * static_call: 0f b9 cc ud1 %esp,%ecx
+ * ENDBR Poison: 0f b9 48 00 ud1 0(%eax),%edx
+ *
+ * Notably UBSAN uses EAX, static_call uses ECX and ENDBR uses EDX.
*/
-__always_inline int decode_bug(unsigned long addr, u32 *imm)
+__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
{
+ unsigned long start = addr;
u8 v;
if (addr < TASK_SIZE_MAX)
@@ -110,24 +118,41 @@ __always_inline int decode_bug(unsigned
return BUG_NONE;
v = *(u8 *)(addr++);
- if (v == SECOND_BYTE_OPCODE_UD2)
+ if (v == SECOND_BYTE_OPCODE_UD2) {
+ *len = addr - start;
return BUG_UD2;
+ }
- if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
+ if (v != SECOND_BYTE_OPCODE_UD1)
return BUG_NONE;
- /* Retrieve the immediate (type value) for the UBSAN UD1 */
- v = *(u8 *)(addr++);
- if (X86_MODRM_RM(v) == 4)
- addr++;
-
*imm = 0;
- if (X86_MODRM_MOD(v) == 1)
- *imm = *(u8 *)addr;
- else if (X86_MODRM_MOD(v) == 2)
- *imm = *(u32 *)addr;
- else
- WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
+ v = *(u8 *)(addr++); /* ModRM */
+
+ /* Decode immediate, if present */
+ if (X86_MODRM_MOD(v) != 3) {
+ if (X86_MODRM_RM(v) == 4)
+ addr++; /* Skip SIB byte */
+
+ if (X86_MODRM_MOD(v) == 1) {
+ *imm = *(s8 *)addr;
+ addr += 1;
+
+ } else if (X86_MODRM_MOD(v) == 2) {
+ *imm = *(s32 *)addr;
+ addr += 4;
+ }
+ }
+
+ /* record instruction length */
+ *len = addr - start;
+
+ if (X86_MODRM_REG(v) == 0) /* EAX */
+ return BUG_UD1_UBSAN;
+
+ if (X86_MODRM_REG(v) == 2 && /* RDX */
+ *len == ENDBR_INSN_SIZE)
+ return BUG_UD1_ENDBR;
return BUG_UD1;
}
@@ -258,8 +283,8 @@ static inline void handle_invalid_op(str
static noinstr bool handle_bug(struct pt_regs *regs)
{
bool handled = false;
- int ud_type;
- u32 imm;
+ int ud_type, ud_len;
+ s32 ud_imm;
/*
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
@@ -267,7 +292,7 @@ static noinstr bool handle_bug(struct pt
* irqentry_enter().
*/
kmsan_unpoison_entry_regs(regs);
- ud_type = decode_bug(regs->ip, &imm);
+ ud_type = decode_bug(regs->ip, &ud_imm, &ud_len);
if (ud_type == BUG_NONE)
return handled;
@@ -281,15 +306,35 @@ static noinstr bool handle_bug(struct pt
*/
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_enable();
- if (ud_type == BUG_UD2) {
+
+ switch (ud_type) {
+ case BUG_UD2:
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
- regs->ip += LEN_UD2;
+ regs->ip += ud_len;
+ handled = true;
+ }
+ break;
+
+ case BUG_UD1_UBSAN:
+ if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
+ pr_crit("%s at %pS\n",
+ report_ubsan_failure(regs, ud_imm),
+ (void *)regs->ip);
+ }
+ break;
+
+ case BUG_UD1_ENDBR:
+ if (__do_kernel_cp_fault(regs)) {
+ regs->ip += ud_len;
handled = true;
}
- } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
- pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
+ break;
+
+ default:
+ break;
}
+
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_disable();
instrumentation_end();