From: David Woodhouse <dwmw@amazon.co.uk>
When kvm_xen_evtchn_send() takes the slow path because the shinfo GPC
needs to be revalidated, it used to violate the SRCU vs. kvm->lock
locking rules and potentially cause a deadlock.
Now that lockdep is learning to catch such things, make sure that code
path is exercised by the selftest.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
.../selftests/kvm/x86_64/xen_shinfo_test.c | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 644d614a9965..3adc2e11b094 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -29,6 +29,9 @@
#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (3 * PAGE_SIZE))
#define DUMMY_REGION_SLOT 11
+#define DUMMY_REGION_GPA_2 (SHINFO_REGION_GPA + (4 * PAGE_SIZE))
+#define DUMMY_REGION_SLOT_2 12
+
#define SHINFO_ADDR (SHINFO_REGION_GPA)
#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
@@ -57,6 +60,7 @@ enum {
TEST_EVTCHN_SLOWPATH,
TEST_EVTCHN_SEND_IOCTL,
TEST_EVTCHN_HCALL,
+ TEST_EVTCHN_HCALL_SLOWPATH,
TEST_EVTCHN_HCALL_EVENTFD,
TEST_TIMER_SETUP,
TEST_TIMER_WAIT,
@@ -270,6 +274,20 @@ static void guest_code(void)
guest_wait_for_irq();
+ GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH);
+
+ /* Same again, but this time the host has messed with memslots
+ * so it should take the slow path in kvm_xen_set_evtchn(). */
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_event_channel_op),
+ "D" (EVTCHNOP_send),
+ "S" (&s));
+
+ GUEST_ASSERT(rax == 0);
+
+ guest_wait_for_irq();
+
GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD);
/* Deliver "outbound" event channel to an eventfd which
@@ -801,6 +819,19 @@ int main(int argc, char *argv[])
alarm(1);
break;
+ case TEST_EVTCHN_HCALL_SLOWPATH:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[0] = 0;
+
+ if (verbose)
+ printf("Testing guest EVTCHNOP_send direct to evtchn after memslot change\n");
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ DUMMY_REGION_GPA_2, DUMMY_REGION_SLOT_2, 1, 0);
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
case TEST_EVTCHN_HCALL_EVENTFD:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
--
2.35.3
On Fri, Jan 13, 2023, David Woodhouse wrote:
> @@ -57,6 +60,7 @@ enum {
> TEST_EVTCHN_SLOWPATH,
> TEST_EVTCHN_SEND_IOCTL,
> TEST_EVTCHN_HCALL,
> + TEST_EVTCHN_HCALL_SLOWPATH,
> TEST_EVTCHN_HCALL_EVENTFD,
> TEST_TIMER_SETUP,
> TEST_TIMER_WAIT,
> @@ -270,6 +274,20 @@ static void guest_code(void)
>
> guest_wait_for_irq();
>
> + GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH);
> +
> + /* Same again, but this time the host has messed with memslots
> + * so it should take the slow path in kvm_xen_set_evtchn(). */
/*
* https://lore.kernel.org/all/CA+55aFyQYJerovMsSoSKS7PessZBr4vNp-3QUUwhqk4A4_jcbg@mail.gmail.com
*/
> + __asm__ __volatile__ ("vmcall" :
> + "=a" (rax) :
> + "a" (__HYPERVISOR_event_channel_op),
> + "D" (EVTCHNOP_send),
> + "S" (&s));
> +
> + GUEST_ASSERT(rax == 0);
There's a lot of copy+paste in this file, and we really should do VMMCALL when
running on AMD. That's easy to do with some changes that are in the queue for
6.3. I'll repost these selftest patches on top of a few patches to add helpers for
doing hypercalls using the Xen ABI.
© 2016 - 2026 Red Hat, Inc.