Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO
when a VM has been killed due to a KVM bug, not -EINVAL.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/vmx/tdx.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 9fb6e5f02cc9..ef4ffcad131f 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1624,7 +1624,7 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm))
- return -EINVAL;
+ return -EIO;
/* nr_premapped will be decreased when tdh_mem_page_add() is called. */
atomic64_inc(&kvm_tdx->nr_premapped);
@@ -1638,7 +1638,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
/* TODO: handle large pages. */
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
- return -EINVAL;
+ return -EIO;
/*
* Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching
@@ -1849,7 +1849,7 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
* and slot move/deletion.
*/
if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
- return -EINVAL;
+ return -EIO;
/*
* The HKID assigned to this TD was already freed and cache was
@@ -1870,7 +1870,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
* there can't be anything populated in the private EPT.
*/
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
- return -EINVAL;
+ return -EIO;
ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
if (ret <= 0)
--
2.51.0.268.g9569e192d0-goog
Sean Christopherson wrote: > Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO > when a VM has been killed due to a KVM bug, not -EINVAL. > > Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> [snip]
On Tue, 2025-08-26 at 17:05 -0700, Sean Christopherson wrote:
> Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO
> when a VM has been killed due to a KVM bug, not -EINVAL.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> arch/x86/kvm/vmx/tdx.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 9fb6e5f02cc9..ef4ffcad131f 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1624,7 +1624,7 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
> struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
>
> if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm))
> - return -EINVAL;
> + return -EIO;
>
> /* nr_premapped will be decreased when tdh_mem_page_add() is called. */
> atomic64_inc(&kvm_tdx->nr_premapped);
> @@ -1638,7 +1638,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
>
> /* TODO: handle large pages. */
> if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
> - return -EINVAL;
> + return -EIO;
>
> /*
> * Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching
> @@ -1849,7 +1849,7 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
> * and slot move/deletion.
> */
> if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
> - return -EINVAL;
> + return -EIO;
>
> /*
> * The HKID assigned to this TD was already freed and cache was
> @@ -1870,7 +1870,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
> * there can't be anything populated in the private EPT.
> */
> if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
> - return -EINVAL;
> + return -EIO;
>
> ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
> if (ret <= 0)
Did you miss?
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index f9ac590e8ff0..fd1b8fea55a9 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1656,10 +1656,10 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm,
gfn_t gfn,
/* TODO: handle large pages. */
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
- return -EINVAL;
+ return -EIO;
if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
- return -EINVAL;
+ return -EIO;
/*
* When zapping private page, write lock is held. So no race condition
We really have a lot of KVM_BUG_ON()s in tdx code. I hesitate to pull them out
but it feels a bit gratuitous.
On Thu, Aug 28, 2025, Rick P Edgecombe wrote:
> On Tue, 2025-08-26 at 17:05 -0700, Sean Christopherson wrote:
> > Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO
> > when a VM has been killed due to a KVM bug, not -EINVAL.
> >
> > Signed-off-by: Sean Christopherson <seanjc@google.com>
> > ---
> > arch/x86/kvm/vmx/tdx.c | 8 ++++----
> > 1 file changed, 4 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > index 9fb6e5f02cc9..ef4ffcad131f 100644
> > --- a/arch/x86/kvm/vmx/tdx.c
> > +++ b/arch/x86/kvm/vmx/tdx.c
> > @@ -1624,7 +1624,7 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
> > struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
> >
> > if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm))
> > - return -EINVAL;
> > + return -EIO;
> >
> > /* nr_premapped will be decreased when tdh_mem_page_add() is called. */
> > atomic64_inc(&kvm_tdx->nr_premapped);
> > @@ -1638,7 +1638,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
> >
> > /* TODO: handle large pages. */
> > if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
> > - return -EINVAL;
> > + return -EIO;
> >
> > /*
> > * Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching
> > @@ -1849,7 +1849,7 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
> > * and slot move/deletion.
> > */
> > if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
> > - return -EINVAL;
> > + return -EIO;
> >
> > /*
> > * The HKID assigned to this TD was already freed and cache was
> > @@ -1870,7 +1870,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
> > * there can't be anything populated in the private EPT.
> > */
> > if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
> > - return -EINVAL;
> > + return -EIO;
> >
> > ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
> > if (ret <= 0)
>
>
> Did you miss?
I did indeed.
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index f9ac590e8ff0..fd1b8fea55a9 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1656,10 +1656,10 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm,
> gfn_t gfn,
>
> /* TODO: handle large pages. */
> if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
> - return -EINVAL;
> + return -EIO;
>
> if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
> - return -EINVAL;
> + return -EIO;
>
> /*
> * When zapping private page, write lock is held. So no race condition
>
>
> We really have a lot of KVM_BUG_ON()s in tdx code. I hesitate to pull them out
> but it feels a bit gratuitous.
Generally speaking, the number of KVM_BUG_ON()s is fine. What we can do though
is reduce the amount of boilerplate and the number of paths the propagate a SEAMCALL
err through multiple layers, e.g. by eliminating single-use helpers (which is made
easier by reducing boilerplate and thus lines of code).
Concretely, if we combine the KVM_BUG_ON() usage with pr_tdx_error():
#define __TDX_BUG_ON(__err, __fn_str, __kvm, __fmt, __args...) \
({ \
struct kvm *_kvm = (__kvm); \
bool __ret = !!(__err); \
\
if (WARN_ON_ONCE(__ret && (!_kvm || !_kvm->vm_bugged))) { \
if (_kvm) \
kvm_vm_bugged(_kvm); \
pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx" \
__fmt "\n", __err, __args); \
} \
unlikely(__ret); \
})
#define TDX_BUG_ON(__err, __fn, __kvm) \
__TDX_BUG_ON(__err, #__fn, __kvm, "%s", "")
#define TDX_BUG_ON_1(__err, __fn, __rcx, __kvm) \
__TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx", __rcx)
#define TDX_BUG_ON_2(__err, __fn, __rcx, __rdx, __kvm) \
__TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx", __rcx, __rdx)
#define TDX_BUG_ON_3(__err, __fn, __rcx, __rdx, __r8, __kvm) \
__TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx, r8 0x%llx", __rcx, __rdx, __r8)
And a macro to handle retry when kicking vCPUs out of the guest:
#define tdh_do_no_vcpus(tdh_func, kvm, args...) \
({ \
struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
u64 __err; \
\
lockdep_assert_held_write(&kvm->mmu_lock); \
\
__err = tdh_func(args); \
if (unlikely(tdx_operand_busy(__err))) { \
WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, true); \
kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE); \
\
__err = tdh_func(args); \
\
WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, false); \
} \
__err; \
})
And do a bit of massaging, then we can end up e.g. this, which IMO is much easier
to follow than the current form of tdx_sept_remove_private_spte(), which has
several duplicate sanity checks and error handlers.
The tdh_do_no_vcpus() macro is a little mean, but I think it's a net positive
as eliminates quite a lot of "noise", and thus makes it easier to focus on the
logic. And alternative to a trampoline macro would be to implement a guard()
and then do a scoped_guard(), but I think that'd be just as hard to read, and
would require almost as much boilerplate as there is today.
static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
enum pg_level level, u64 spte)
{
struct page *page = pfn_to_page(spte_to_pfn(spte));
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
gpa_t gpa = gfn_to_gpa(gfn);
u64 err, entry, level_state;
/*
* HKID is released after all private pages have been removed, and set
* before any might be populated. Warn if zapping is attempted when
* there can't be anything populated in the private EPT.
*/
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
return;
/* TODO: handle large pages. */
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
return;
err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
tdx_level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_RANGE_BLOCK, entry, level_state, kvm))
return;
/*
* TDX requires TLB tracking before dropping private page. Do
* it here, although it is also done later.
*/
tdx_track(kvm);
/*
* When zapping private page, write lock is held. So no race condition
* with other vcpu sept operation.
* Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
*/
err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
tdx_level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_PAGE_REMOVE, entry, level_state, kvm))
return;
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
if (TDX_BUG_ON(err, TDH_PHYMEM_PAGE_WBINVD, kvm))
return;
tdx_clear_page(page);
}
On Thu, 2025-08-28 at 12:21 -0700, Sean Christopherson wrote:
> Generally speaking, the number of KVM_BUG_ON()s is fine. What we can do though
> is reduce the amount of boilerplate and the number of paths the propagate a SEAMCALL
> err through multiple layers, e.g. by eliminating single-use helpers (which is made
> easier by reducing boilerplate and thus lines of code).
>
> Concretely, if we combine the KVM_BUG_ON() usage with pr_tdx_error():
>
> #define __TDX_BUG_ON(__err, __fn_str, __kvm, __fmt, __args...) \
> ({ \
> struct kvm *_kvm = (__kvm); \
> bool __ret = !!(__err); \
> \
> if (WARN_ON_ONCE(__ret && (!_kvm || !_kvm->vm_bugged))) { \
> if (_kvm) \
> kvm_vm_bugged(_kvm); \
> pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx" \
> __fmt "\n", __err, __args); \
> } \
> unlikely(__ret); \
> })
>
> #define TDX_BUG_ON(__err, __fn, __kvm) \
> __TDX_BUG_ON(__err, #__fn, __kvm, "%s", "")
>
> #define TDX_BUG_ON_1(__err, __fn, __rcx, __kvm) \
> __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx", __rcx)
>
> #define TDX_BUG_ON_2(__err, __fn, __rcx, __rdx, __kvm) \
> __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx", __rcx, __rdx)
>
> #define TDX_BUG_ON_3(__err, __fn, __rcx, __rdx, __r8, __kvm) \
> __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx, r8 0x%llx", __rcx, __rdx, __r8)
In general sounds good. But there it's a bit strange to specify them rcx, rdx,
etc in a general helper. This is fallout from the existing chain of strange
naming:
For example tdh_mem_range_block() plucks them from those registers and calls
them ext_err1 due to their conditional meaning. Then KVM gives them some more
meaning with 'entry' and 'level_state". Then prints them out as original
register names. How about keeping the KVM names, like:
#define TDX_BUG_ON_2(__err, __fn, arg1, arg2, __kvm) \
__TDX_BUG_ON(__err, #__fn, __kvm, ", " #arg1 " 0x%llx, " #arg2 "
0x%llx", arg1, arg2)
so you get: entry: 0x00 level:0xF00
I *think* there is a way to make this work like var args and have a single
function, but it becomes impossible for people to read.
>
>
> And a macro to handle retry when kicking vCPUs out of the guest:
>
> #define tdh_do_no_vcpus(tdh_func, kvm, args...) \
> ({ \
> struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
> u64 __err; \
> \
> lockdep_assert_held_write(&kvm->mmu_lock); \
There is a functional change in that the lock assert is not required if BUSY
avoidance can be guaranteed to not happen. I don't think it should be needed
today. I guess it's probably better to not rely on hitting rare races to catch
an issue like that.
> \
> __err = tdh_func(args); \
> if (unlikely(tdx_operand_busy(__err))) { \
> WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, true); \
> kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE); \
> \
> __err = tdh_func(args); \
> \
> WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, false); \
> } \
> __err; \
> })
>
> And do a bit of massaging, then we can end up e.g. this, which IMO is much easier
> to follow than the current form of tdx_sept_remove_private_spte(), which has
> several duplicate sanity checks and error handlers.
>
> The tdh_do_no_vcpus() macro is a little mean, but I think it's a net positive
> as eliminates quite a lot of "noise", and thus makes it easier to focus on the
> logic. And alternative to a trampoline macro would be to implement a guard()
> and then do a scoped_guard(), but I think that'd be just as hard to read, and
> would require almost as much boilerplate as there is today.
>
> static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
> enum pg_level level, u64 spte)
> {
> struct page *page = pfn_to_page(spte_to_pfn(spte));
> int tdx_level = pg_level_to_tdx_sept_level(level);
> struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
> gpa_t gpa = gfn_to_gpa(gfn);
> u64 err, entry, level_state;
>
> /*
> * HKID is released after all private pages have been removed, and set
> * before any might be populated. Warn if zapping is attempted when
> * there can't be anything populated in the private EPT.
> */
> if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
> return;
>
> /* TODO: handle large pages. */
> if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
> return;
>
> err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
> tdx_level, &entry, &level_state);
> if (TDX_BUG_ON_2(err, TDH_MEM_RANGE_BLOCK, entry, level_state, kvm))
> return;
>
> /*
> * TDX requires TLB tracking before dropping private page. Do
> * it here, although it is also done later.
> */
> tdx_track(kvm);
>
> /*
> * When zapping private page, write lock is held. So no race condition
> * with other vcpu sept operation.
> * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
> */
> err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
> tdx_level, &entry, &level_state);
> if (TDX_BUG_ON_2(err, TDH_MEM_PAGE_REMOVE, entry, level_state, kvm))
> return;
>
> err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
> if (TDX_BUG_ON(err, TDH_PHYMEM_PAGE_WBINVD, kvm))
> return;
>
> tdx_clear_page(page);
> }
Seems like tasteful macro-ization to me.
On Thu, Aug 28, 2025, Rick P Edgecombe wrote:
> On Thu, 2025-08-28 at 12:21 -0700, Sean Christopherson wrote:
> > Generally speaking, the number of KVM_BUG_ON()s is fine. What we can do though
> > is reduce the amount of boilerplate and the number of paths the propagate a SEAMCALL
> > err through multiple layers, e.g. by eliminating single-use helpers (which is made
> > easier by reducing boilerplate and thus lines of code).
> >
> > Concretely, if we combine the KVM_BUG_ON() usage with pr_tdx_error():
> >
> > #define __TDX_BUG_ON(__err, __fn_str, __kvm, __fmt, __args...) \
> > ({ \
> > struct kvm *_kvm = (__kvm); \
> > bool __ret = !!(__err); \
> > \
> > if (WARN_ON_ONCE(__ret && (!_kvm || !_kvm->vm_bugged))) { \
> > if (_kvm) \
> > kvm_vm_bugged(_kvm); \
> > pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx" \
> > __fmt "\n", __err, __args); \
> > } \
> > unlikely(__ret); \
> > })
> >
> > #define TDX_BUG_ON(__err, __fn, __kvm) \
> > __TDX_BUG_ON(__err, #__fn, __kvm, "%s", "")
> >
> > #define TDX_BUG_ON_1(__err, __fn, __rcx, __kvm) \
> > __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx", __rcx)
> >
> > #define TDX_BUG_ON_2(__err, __fn, __rcx, __rdx, __kvm) \
> > __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx", __rcx, __rdx)
> >
> > #define TDX_BUG_ON_3(__err, __fn, __rcx, __rdx, __r8, __kvm) \
> > __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx, r8 0x%llx", __rcx, __rdx, __r8)
>
> In general sounds good. But there it's a bit strange to specify them rcx, rdx,
> etc in a general helper. This is fallout from the existing chain of strange
> naming:
>
> For example tdh_mem_range_block() plucks them from those registers and calls
> them ext_err1 due to their conditional meaning. Then KVM gives them some more
> meaning with 'entry' and 'level_state". Then prints them out as original
> register names. How about keeping the KVM names, like:
>
> #define TDX_BUG_ON_2(__err, __fn, arg1, arg2, __kvm) \
> __TDX_BUG_ON(__err, #__fn, __kvm, ", " #arg1 " 0x%llx, " #arg2 "
> 0x%llx", arg1, arg2)
>
> so you get: entry: 0x00 level:0xF00
Ooh, nice, I'll tack on a patch.
> I *think* there is a way to make this work like var args and have a single
> function, but it becomes impossible for people to read.
Heh, and would probably take two months to decipher the compiler errors in order
to get it working :-)
> > And a macro to handle retry when kicking vCPUs out of the guest:
> >
> > #define tdh_do_no_vcpus(tdh_func, kvm, args...) \
> > ({ \
> > struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
> > u64 __err; \
> > \
> > lockdep_assert_held_write(&kvm->mmu_lock); \
>
> There is a functional change
Ugh, I missed that. I'll do a prep change to make that explicit.
> in that the lock assert is not required if BUSY
> avoidance can be guaranteed to not happen. I don't think it should be needed
> today. I guess it's probably better to not rely on hitting rare races to catch
> an issue like that.
But that's not actually what the code does. The lockdep assert won't trip because
KVM never removes S-EPT entries under read-lock:
if (is_mirror_sp(sp)) {
KVM_BUG_ON(shared, kvm);
remove_external_spte(kvm, gfn, old_spte, level);
}
Not because KVM actually guarantees -EBUSY is avoided. So the current code is
flawed, it just doesn't cause problems.
On Thu, 2025-08-28 at 14:00 -0700, Sean Christopherson wrote:
> But that's not actually what the code does. The lockdep assert won't trip because
> KVM never removes S-EPT entries under read-lock:
Right
>
> if (is_mirror_sp(sp)) {
> KVM_BUG_ON(shared, kvm);
> remove_external_spte(kvm, gfn, old_spte, level);
> }
>
> Not because KVM actually guarantees -EBUSY is avoided. So the current code is
> flawed, it just doesn't cause problems.
Flawed, as in the lockdep should assert regardless of EBUSY? Seems good to me.
Probably if we wanted to try to call tdx_sept_remove_private_spte() under read
lock with special plans to avoid EBUSY we should think twice anyway.
On Thu, Aug 28, 2025, Rick P Edgecombe wrote:
> On Thu, 2025-08-28 at 14:00 -0700, Sean Christopherson wrote:
> > But that's not actually what the code does. The lockdep assert won't trip because
> > KVM never removes S-EPT entries under read-lock:
>
> Right
>
> >
> > if (is_mirror_sp(sp)) {
> > KVM_BUG_ON(shared, kvm);
> > remove_external_spte(kvm, gfn, old_spte, level);
> > }
> >
> > Not because KVM actually guarantees -EBUSY is avoided. So the current code is
> > flawed, it just doesn't cause problems.
>
> Flawed, as in the lockdep should assert regardless of EBUSY?
Yep, exactly.
> Seems good to me.
> Probably if we wanted to try to call tdx_sept_remove_private_spte() under read
> lock with special plans to avoid EBUSY we should think twice anyway.
Heh, add a few zeros to "twice" :-D
On Tue, Aug 26, 2025 at 05:05:16PM -0700, Sean Christopherson wrote: > Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO > when a VM has been killed due to a KVM bug, not -EINVAL. Looks good to me, though currently the "-EIO" will not be returned to userspace either. In the fault path, RET_PF_RETRY is returned instead, while in the zap paths, void is returned. > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/vmx/tdx.c | 8 ++++---- > 1 file changed, 4 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c > index 9fb6e5f02cc9..ef4ffcad131f 100644 > --- a/arch/x86/kvm/vmx/tdx.c > +++ b/arch/x86/kvm/vmx/tdx.c > @@ -1624,7 +1624,7 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn, > struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); > > if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm)) > - return -EINVAL; > + return -EIO; > > /* nr_premapped will be decreased when tdh_mem_page_add() is called. */ > atomic64_inc(&kvm_tdx->nr_premapped); > @@ -1638,7 +1638,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn, > > /* TODO: handle large pages. */ > if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm)) > - return -EINVAL; > + return -EIO; > > /* > * Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching > @@ -1849,7 +1849,7 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn, > * and slot move/deletion. > */ > if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm)) > - return -EINVAL; > + return -EIO; > > /* > * The HKID assigned to this TD was already freed and cache was > @@ -1870,7 +1870,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn, > * there can't be anything populated in the private EPT. > */ > if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm)) > - return -EINVAL; > + return -EIO; > > ret = tdx_sept_zap_private_spte(kvm, gfn, level, page); > if (ret <= 0) > -- > 2.51.0.268.g9569e192d0-goog >
On Wed, Aug 27, 2025, Yan Zhao wrote: > On Tue, Aug 26, 2025 at 05:05:16PM -0700, Sean Christopherson wrote: > > Return -EIO when a KVM_BUG_ON() is tripped, as KVM's ABI is to return -EIO > > when a VM has been killed due to a KVM bug, not -EINVAL. > Looks good to me, though currently the "-EIO" will not be returned to userspace > either. In the fault path, RET_PF_RETRY is returned instead, while in the zap > paths, void is returned. Yeah, I suspected as much. I'll call that out in the changeloge, i.e. that this is really just for internal consistency.
© 2016 - 2026 Red Hat, Inc.