The unconditional nmi_callback() call in do_nmi() calls dummy_nmi_callback()
in all cases other than for a few specific and rare tasks (alternative
patching, microcode loading, etc).
Indirect calls are expensive under retpoline, so rearrange the logic to use
NULL as the default, and skip the call entirely in the common case.
While rearranging the code, fold the exit paths.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
---
xen/arch/x86/traps.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d483aa91f2f1..f526298e997d 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1872,29 +1872,23 @@ static void unknown_nmi_error(const struct cpu_user_regs *regs,
}
}
-static int dummy_nmi_callback(const struct cpu_user_regs *regs, int cpu)
-{
- return 0;
-}
-
-static nmi_callback_t *nmi_callback = dummy_nmi_callback;
+static nmi_callback_t *__read_mostly nmi_callback;
DEFINE_PER_CPU(unsigned int, nmi_count);
void do_nmi(const struct cpu_user_regs *regs)
{
unsigned int cpu = smp_processor_id();
+ nmi_callback_t *callback;
unsigned char reason = 0;
bool handle_unknown = false;
this_cpu(nmi_count)++;
nmi_enter();
- if ( nmi_callback(regs, cpu) )
- {
- nmi_exit();
- return;
- }
+ callback = ACCESS_ONCE(nmi_callback);
+ if ( unlikely(callback) && callback(regs, cpu) )
+ goto out;
/*
* Accessing port 0x61 may trap to SMM which has been actually
@@ -1921,6 +1915,7 @@ void do_nmi(const struct cpu_user_regs *regs)
unknown_nmi_error(regs, reason);
}
+ out:
nmi_exit();
}
@@ -1935,7 +1930,7 @@ nmi_callback_t *set_nmi_callback(nmi_callback_t *callback)
void unset_nmi_callback(void)
{
- nmi_callback = dummy_nmi_callback;
+ nmi_callback = NULL;
}
bool nmi_check_continuation(void)
--
2.11.0