[PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side

Bibo Mao posted 3 patches 1 week ago
[PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Bibo Mao 1 week ago
Function vcpu_is_preempted() is used to check whether vCPU is preempted
or not. Here add implementation with vcpu_is_preempted() when option
CONFIG_PARAVIRT is enabled.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/loongarch/include/asm/qspinlock.h |  5 +++++
 arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
 2 files changed, 21 insertions(+)

diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
index e76d3aa1e1eb..9a5b7ba1f4cb 100644
--- a/arch/loongarch/include/asm/qspinlock.h
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
 	return true;
 }
 
+#ifdef CONFIG_SMP
+#define vcpu_is_preempted	vcpu_is_preempted
+bool vcpu_is_preempted(int cpu);
+#endif
+
 #endif /* CONFIG_PARAVIRT */
 
 #include <asm-generic/qspinlock.h>
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index b1b51f920b23..d4163679adc4 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
 }
 
 #ifdef CONFIG_SMP
+DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
 static int pv_time_cpu_online(unsigned int cpu)
 {
 	unsigned long flags;
@@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
 
 	return 0;
 }
+
+bool notrace vcpu_is_preempted(int cpu)
+{
+	struct kvm_steal_time *src;
+
+	if (!static_branch_unlikely(&virt_preempt_key))
+		return false;
+
+	src = &per_cpu(steal_time, cpu);
+	return !!(src->preempted & KVM_VCPU_PREEMPTED);
+}
+EXPORT_SYMBOL(vcpu_is_preempted);
 #endif
 
 static void pv_cpu_reboot(void *unused)
@@ -308,6 +321,9 @@ int __init pv_time_init(void)
 		pr_err("Failed to install cpu hotplug callbacks\n");
 		return r;
 	}
+
+	if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
+		static_branch_enable(&virt_preempt_key);
 #endif
 
 	static_call_update(pv_steal_clock, paravt_steal_clock);
-- 
2.39.3
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Huacai Chen 1 week ago
Hi, Bibo,

On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>
> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> or not. Here add implementation with vcpu_is_preempted() when option
> CONFIG_PARAVIRT is enabled.
>
> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> ---
>  arch/loongarch/include/asm/qspinlock.h |  5 +++++
>  arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>  2 files changed, 21 insertions(+)
>
> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
> --- a/arch/loongarch/include/asm/qspinlock.h
> +++ b/arch/loongarch/include/asm/qspinlock.h
> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>         return true;
>  }
>
> +#ifdef CONFIG_SMP
> +#define vcpu_is_preempted      vcpu_is_preempted
> +bool vcpu_is_preempted(int cpu);
In V1 there is a build error because you reference mp_ops, so in V2
you needn't put it in CONFIG_SMP.
On the other hand, even if you really build a UP guest kernel, when
multiple guests run together, you probably need vcpu_is_preemtped.


Huacai

> +#endif
> +
>  #endif /* CONFIG_PARAVIRT */
>
>  #include <asm-generic/qspinlock.h>
> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> index b1b51f920b23..d4163679adc4 100644
> --- a/arch/loongarch/kernel/paravirt.c
> +++ b/arch/loongarch/kernel/paravirt.c
> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
>  }
>
>  #ifdef CONFIG_SMP
> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
>  static int pv_time_cpu_online(unsigned int cpu)
>  {
>         unsigned long flags;
> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>
>         return 0;
>  }
> +
> +bool notrace vcpu_is_preempted(int cpu)
> +{
> +       struct kvm_steal_time *src;
> +
> +       if (!static_branch_unlikely(&virt_preempt_key))
> +               return false;
> +
> +       src = &per_cpu(steal_time, cpu);
> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
> +}
> +EXPORT_SYMBOL(vcpu_is_preempted);
>  #endif
>
>  static void pv_cpu_reboot(void *unused)
> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
>                 pr_err("Failed to install cpu hotplug callbacks\n");
>                 return r;
>         }
> +
> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> +               static_branch_enable(&virt_preempt_key);
>  #endif
>
>         static_call_update(pv_steal_clock, paravt_steal_clock);
> --
> 2.39.3
>
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Bibo Mao 1 week ago

On 2025/11/24 下午2:33, Huacai Chen wrote:
> Hi, Bibo,
> 
> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
>> or not. Here add implementation with vcpu_is_preempted() when option
>> CONFIG_PARAVIRT is enabled.
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>>   arch/loongarch/include/asm/qspinlock.h |  5 +++++
>>   arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>>   2 files changed, 21 insertions(+)
>>
>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
>> --- a/arch/loongarch/include/asm/qspinlock.h
>> +++ b/arch/loongarch/include/asm/qspinlock.h
>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>>          return true;
>>   }
>>
>> +#ifdef CONFIG_SMP
>> +#define vcpu_is_preempted      vcpu_is_preempted
>> +bool vcpu_is_preempted(int cpu);
> In V1 there is a build error because you reference mp_ops, so in V2
> you needn't put it in CONFIG_SMP.
The compile failure problem is that vcpu_is_preempted() is redefined in 
both arch/loongarch/kernel/paravirt.c and include/linux/sched.h

The problem is that <asm/spinlock.h> is not included by sched.h, if 
CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif

> On the other hand, even if you really build a UP guest kernel, when
> multiple guests run together, you probably need vcpu_is_preemtped.
It is not relative with multiple VMs. When vcpu_is_preempted() is 
called, it is to detect whether dest CPU is preempted or not, the cpu 
from smp_processor_id() should not be preempted. So in generic 
vcpu_is_preempted() works on multiple vCPUs.

Regards
Bibo Mao
> 
> 
> Huacai
> 
>> +#endif
>> +
>>   #endif /* CONFIG_PARAVIRT */
>>
>>   #include <asm-generic/qspinlock.h>
>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
>> index b1b51f920b23..d4163679adc4 100644
>> --- a/arch/loongarch/kernel/paravirt.c
>> +++ b/arch/loongarch/kernel/paravirt.c
>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
>>   }
>>
>>   #ifdef CONFIG_SMP
>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
>>   static int pv_time_cpu_online(unsigned int cpu)
>>   {
>>          unsigned long flags;
>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>>
>>          return 0;
>>   }
>> +
>> +bool notrace vcpu_is_preempted(int cpu)
>> +{
>> +       struct kvm_steal_time *src;
>> +
>> +       if (!static_branch_unlikely(&virt_preempt_key))
>> +               return false;
>> +
>> +       src = &per_cpu(steal_time, cpu);
>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
>> +}
>> +EXPORT_SYMBOL(vcpu_is_preempted);
>>   #endif
>>
>>   static void pv_cpu_reboot(void *unused)
>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
>>                  pr_err("Failed to install cpu hotplug callbacks\n");
>>                  return r;
>>          }
>> +
>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
>> +               static_branch_enable(&virt_preempt_key);
>>   #endif
>>
>>          static_call_update(pv_steal_clock, paravt_steal_clock);
>> --
>> 2.39.3
>>

Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Huacai Chen 1 week ago
On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
>
>
>
> On 2025/11/24 下午2:33, Huacai Chen wrote:
> > Hi, Bibo,
> >
> > On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
> >>
> >> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> >> or not. Here add implementation with vcpu_is_preempted() when option
> >> CONFIG_PARAVIRT is enabled.
> >>
> >> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> >> ---
> >>   arch/loongarch/include/asm/qspinlock.h |  5 +++++
> >>   arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
> >>   2 files changed, 21 insertions(+)
> >>
> >> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> >> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
> >> --- a/arch/loongarch/include/asm/qspinlock.h
> >> +++ b/arch/loongarch/include/asm/qspinlock.h
> >> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> >>          return true;
> >>   }
> >>
> >> +#ifdef CONFIG_SMP
> >> +#define vcpu_is_preempted      vcpu_is_preempted
> >> +bool vcpu_is_preempted(int cpu);
> > In V1 there is a build error because you reference mp_ops, so in V2
> > you needn't put it in CONFIG_SMP.
> The compile failure problem is that vcpu_is_preempted() is redefined in
> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
you can consider to inline the whole vcpu_is_preempted() here.

>
> The problem is that <asm/spinlock.h> is not included by sched.h, if
> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
> #ifdef CONFIG_SMP
> # include <asm/spinlock.h>
> #else
> # include <linux/spinlock_up.h>
> #endif
>
> > On the other hand, even if you really build a UP guest kernel, when
> > multiple guests run together, you probably need vcpu_is_preemtped.
> It is not relative with multiple VMs. When vcpu_is_preempted() is
> called, it is to detect whether dest CPU is preempted or not, the cpu
> from smp_processor_id() should not be preempted. So in generic
> vcpu_is_preempted() works on multiple vCPUs.
OK, I'm wrong here.


Huacai

>
> Regards
> Bibo Mao
> >
> >
> > Huacai
> >
> >> +#endif
> >> +
> >>   #endif /* CONFIG_PARAVIRT */
> >>
> >>   #include <asm-generic/qspinlock.h>
> >> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> >> index b1b51f920b23..d4163679adc4 100644
> >> --- a/arch/loongarch/kernel/paravirt.c
> >> +++ b/arch/loongarch/kernel/paravirt.c
> >> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
> >>   }
> >>
> >>   #ifdef CONFIG_SMP
> >> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
> >>   static int pv_time_cpu_online(unsigned int cpu)
> >>   {
> >>          unsigned long flags;
> >> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
> >>
> >>          return 0;
> >>   }
> >> +
> >> +bool notrace vcpu_is_preempted(int cpu)
> >> +{
> >> +       struct kvm_steal_time *src;
> >> +
> >> +       if (!static_branch_unlikely(&virt_preempt_key))
> >> +               return false;
> >> +
> >> +       src = &per_cpu(steal_time, cpu);
> >> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
> >> +}
> >> +EXPORT_SYMBOL(vcpu_is_preempted);
> >>   #endif
> >>
> >>   static void pv_cpu_reboot(void *unused)
> >> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
> >>                  pr_err("Failed to install cpu hotplug callbacks\n");
> >>                  return r;
> >>          }
> >> +
> >> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> >> +               static_branch_enable(&virt_preempt_key);
> >>   #endif
> >>
> >>          static_call_update(pv_steal_clock, paravt_steal_clock);
> >> --
> >> 2.39.3
> >>
>
>
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Bibo Mao 1 week ago

On 2025/11/24 下午3:13, Huacai Chen wrote:
> On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>>
>>
>> On 2025/11/24 下午2:33, Huacai Chen wrote:
>>> Hi, Bibo,
>>>
>>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>
>>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
>>>> or not. Here add implementation with vcpu_is_preempted() when option
>>>> CONFIG_PARAVIRT is enabled.
>>>>
>>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>>>> ---
>>>>    arch/loongarch/include/asm/qspinlock.h |  5 +++++
>>>>    arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>>>>    2 files changed, 21 insertions(+)
>>>>
>>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
>>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
>>>> --- a/arch/loongarch/include/asm/qspinlock.h
>>>> +++ b/arch/loongarch/include/asm/qspinlock.h
>>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>>>>           return true;
>>>>    }
>>>>
>>>> +#ifdef CONFIG_SMP
>>>> +#define vcpu_is_preempted      vcpu_is_preempted
>>>> +bool vcpu_is_preempted(int cpu);
>>> In V1 there is a build error because you reference mp_ops, so in V2
>>> you needn't put it in CONFIG_SMP.
>> The compile failure problem is that vcpu_is_preempted() is redefined in
>> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
> But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
so what is advantage to implement this function if CONFIG_SMP is disabled?

> you can consider to inline the whole vcpu_is_preempted() here.
Defining the function vcpu_is_preempted() as inlined is not so easy for 
me, it beyond my ability now :(

With static key method, the static key need be exported, all modules 
need apply the jump label, that is dangerous and I doubt whether it is 
deserved.

Regards
Bibo Mao
> 
>>
>> The problem is that <asm/spinlock.h> is not included by sched.h, if
>> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
>> #ifdef CONFIG_SMP
>> # include <asm/spinlock.h>
>> #else
>> # include <linux/spinlock_up.h>
>> #endif
>>
>>> On the other hand, even if you really build a UP guest kernel, when
>>> multiple guests run together, you probably need vcpu_is_preemtped.
>> It is not relative with multiple VMs. When vcpu_is_preempted() is
>> called, it is to detect whether dest CPU is preempted or not, the cpu
>> from smp_processor_id() should not be preempted. So in generic
>> vcpu_is_preempted() works on multiple vCPUs.
> OK, I'm wrong here.
> 
> 
> Huacai
> 
>>
>> Regards
>> Bibo Mao
>>>
>>>
>>> Huacai
>>>
>>>> +#endif
>>>> +
>>>>    #endif /* CONFIG_PARAVIRT */
>>>>
>>>>    #include <asm-generic/qspinlock.h>
>>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
>>>> index b1b51f920b23..d4163679adc4 100644
>>>> --- a/arch/loongarch/kernel/paravirt.c
>>>> +++ b/arch/loongarch/kernel/paravirt.c
>>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
>>>>    }
>>>>
>>>>    #ifdef CONFIG_SMP
>>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
>>>>    static int pv_time_cpu_online(unsigned int cpu)
>>>>    {
>>>>           unsigned long flags;
>>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>>>>
>>>>           return 0;
>>>>    }
>>>> +
>>>> +bool notrace vcpu_is_preempted(int cpu)
>>>> +{
>>>> +       struct kvm_steal_time *src;
>>>> +
>>>> +       if (!static_branch_unlikely(&virt_preempt_key))
>>>> +               return false;
>>>> +
>>>> +       src = &per_cpu(steal_time, cpu);
>>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
>>>> +}
>>>> +EXPORT_SYMBOL(vcpu_is_preempted);
>>>>    #endif
>>>>
>>>>    static void pv_cpu_reboot(void *unused)
>>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
>>>>                   pr_err("Failed to install cpu hotplug callbacks\n");
>>>>                   return r;
>>>>           }
>>>> +
>>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
>>>> +               static_branch_enable(&virt_preempt_key);
>>>>    #endif
>>>>
>>>>           static_call_update(pv_steal_clock, paravt_steal_clock);
>>>> --
>>>> 2.39.3
>>>>
>>
>>

Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Huacai Chen 1 week ago
On Mon, Nov 24, 2025 at 3:50 PM Bibo Mao <maobibo@loongson.cn> wrote:
>
>
>
> On 2025/11/24 下午3:13, Huacai Chen wrote:
> > On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>
> >>
> >>
> >> On 2025/11/24 下午2:33, Huacai Chen wrote:
> >>> Hi, Bibo,
> >>>
> >>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>
> >>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> >>>> or not. Here add implementation with vcpu_is_preempted() when option
> >>>> CONFIG_PARAVIRT is enabled.
> >>>>
> >>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> >>>> ---
> >>>>    arch/loongarch/include/asm/qspinlock.h |  5 +++++
> >>>>    arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
> >>>>    2 files changed, 21 insertions(+)
> >>>>
> >>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> >>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
> >>>> --- a/arch/loongarch/include/asm/qspinlock.h
> >>>> +++ b/arch/loongarch/include/asm/qspinlock.h
> >>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> >>>>           return true;
> >>>>    }
> >>>>
> >>>> +#ifdef CONFIG_SMP
> >>>> +#define vcpu_is_preempted      vcpu_is_preempted
> >>>> +bool vcpu_is_preempted(int cpu);
> >>> In V1 there is a build error because you reference mp_ops, so in V2
> >>> you needn't put it in CONFIG_SMP.
> >> The compile failure problem is that vcpu_is_preempted() is redefined in
> >> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
> > But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
> so what is advantage to implement this function if CONFIG_SMP is disabled?
1. Keep consistency with other architectures.
2. Keep it simple to reduce #ifdefs (and !SMP is just for build, not
very useful in practice).

>
> > you can consider to inline the whole vcpu_is_preempted() here.
> Defining the function vcpu_is_preempted() as inlined is not so easy for
> me, it beyond my ability now :(
>
> With static key method, the static key need be exported, all modules
> need apply the jump label, that is dangerous and I doubt whether it is
> deserved.
No, you have already done similar things in virt_spin_lock(), it is an
inline function and uses virt_spin_lock_key.

Huacai

>
> Regards
> Bibo Mao
> >
> >>
> >> The problem is that <asm/spinlock.h> is not included by sched.h, if
> >> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
> >> #ifdef CONFIG_SMP
> >> # include <asm/spinlock.h>
> >> #else
> >> # include <linux/spinlock_up.h>
> >> #endif
> >>
> >>> On the other hand, even if you really build a UP guest kernel, when
> >>> multiple guests run together, you probably need vcpu_is_preemtped.
> >> It is not relative with multiple VMs. When vcpu_is_preempted() is
> >> called, it is to detect whether dest CPU is preempted or not, the cpu
> >> from smp_processor_id() should not be preempted. So in generic
> >> vcpu_is_preempted() works on multiple vCPUs.
> > OK, I'm wrong here.
> >
> >
> > Huacai
> >
> >>
> >> Regards
> >> Bibo Mao
> >>>
> >>>
> >>> Huacai
> >>>
> >>>> +#endif
> >>>> +
> >>>>    #endif /* CONFIG_PARAVIRT */
> >>>>
> >>>>    #include <asm-generic/qspinlock.h>
> >>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> >>>> index b1b51f920b23..d4163679adc4 100644
> >>>> --- a/arch/loongarch/kernel/paravirt.c
> >>>> +++ b/arch/loongarch/kernel/paravirt.c
> >>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
> >>>>    }
> >>>>
> >>>>    #ifdef CONFIG_SMP
> >>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
> >>>>    static int pv_time_cpu_online(unsigned int cpu)
> >>>>    {
> >>>>           unsigned long flags;
> >>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
> >>>>
> >>>>           return 0;
> >>>>    }
> >>>> +
> >>>> +bool notrace vcpu_is_preempted(int cpu)
> >>>> +{
> >>>> +       struct kvm_steal_time *src;
> >>>> +
> >>>> +       if (!static_branch_unlikely(&virt_preempt_key))
> >>>> +               return false;
> >>>> +
> >>>> +       src = &per_cpu(steal_time, cpu);
> >>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
> >>>> +}
> >>>> +EXPORT_SYMBOL(vcpu_is_preempted);
> >>>>    #endif
> >>>>
> >>>>    static void pv_cpu_reboot(void *unused)
> >>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
> >>>>                   pr_err("Failed to install cpu hotplug callbacks\n");
> >>>>                   return r;
> >>>>           }
> >>>> +
> >>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> >>>> +               static_branch_enable(&virt_preempt_key);
> >>>>    #endif
> >>>>
> >>>>           static_call_update(pv_steal_clock, paravt_steal_clock);
> >>>> --
> >>>> 2.39.3
> >>>>
> >>
> >>
>
>
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Bibo Mao 1 week ago

On 2025/11/24 下午4:03, Huacai Chen wrote:
> On Mon, Nov 24, 2025 at 3:50 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>>
>>
>> On 2025/11/24 下午3:13, Huacai Chen wrote:
>>> On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>
>>>>
>>>>
>>>> On 2025/11/24 下午2:33, Huacai Chen wrote:
>>>>> Hi, Bibo,
>>>>>
>>>>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>>>
>>>>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
>>>>>> or not. Here add implementation with vcpu_is_preempted() when option
>>>>>> CONFIG_PARAVIRT is enabled.
>>>>>>
>>>>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>>>>>> ---
>>>>>>     arch/loongarch/include/asm/qspinlock.h |  5 +++++
>>>>>>     arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>>>>>>     2 files changed, 21 insertions(+)
>>>>>>
>>>>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
>>>>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
>>>>>> --- a/arch/loongarch/include/asm/qspinlock.h
>>>>>> +++ b/arch/loongarch/include/asm/qspinlock.h
>>>>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>>>>>>            return true;
>>>>>>     }
>>>>>>
>>>>>> +#ifdef CONFIG_SMP
>>>>>> +#define vcpu_is_preempted      vcpu_is_preempted
>>>>>> +bool vcpu_is_preempted(int cpu);
>>>>> In V1 there is a build error because you reference mp_ops, so in V2
>>>>> you needn't put it in CONFIG_SMP.
>>>> The compile failure problem is that vcpu_is_preempted() is redefined in
>>>> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
>>> But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
>> so what is advantage to implement this function if CONFIG_SMP is disabled?
> 1. Keep consistency with other architectures.
> 2. Keep it simple to reduce #ifdefs (and !SMP is just for build, not
> very useful in practice).
It seems that CONFIG_SMP can be removed in header file 
include/asm/qspinlock.h, since asm/spinlock.h and asm/qspinlock.h is 
only included when CONFIG_SMP is set, otherwise only linux/spinlock_up.h 
is included.

> 
>>
>>> you can consider to inline the whole vcpu_is_preempted() here.
>> Defining the function vcpu_is_preempted() as inlined is not so easy for
>> me, it beyond my ability now :(
>>
>> With static key method, the static key need be exported, all modules
>> need apply the jump label, that is dangerous and I doubt whether it is
>> deserved.
> No, you have already done similar things in virt_spin_lock(), it is an
> inline function and uses virt_spin_lock_key.
virt_spin_lock is only called qspinlock in function 
queued_spin_lock_slowpath(). Function vcpu_is_preempted() is defined 
header file linux/sched.h, kernel module may use it.


> 
> Huacai
> 
>>
>> Regards
>> Bibo Mao
>>>
>>>>
>>>> The problem is that <asm/spinlock.h> is not included by sched.h, if
>>>> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
>>>> #ifdef CONFIG_SMP
>>>> # include <asm/spinlock.h>
>>>> #else
>>>> # include <linux/spinlock_up.h>
>>>> #endif
>>>>
>>>>> On the other hand, even if you really build a UP guest kernel, when
>>>>> multiple guests run together, you probably need vcpu_is_preemtped.
>>>> It is not relative with multiple VMs. When vcpu_is_preempted() is
>>>> called, it is to detect whether dest CPU is preempted or not, the cpu
>>>> from smp_processor_id() should not be preempted. So in generic
>>>> vcpu_is_preempted() works on multiple vCPUs.
>>> OK, I'm wrong here.
>>>
>>>
>>> Huacai
>>>
>>>>
>>>> Regards
>>>> Bibo Mao
>>>>>
>>>>>
>>>>> Huacai
>>>>>
>>>>>> +#endif
>>>>>> +
>>>>>>     #endif /* CONFIG_PARAVIRT */
>>>>>>
>>>>>>     #include <asm-generic/qspinlock.h>
>>>>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
>>>>>> index b1b51f920b23..d4163679adc4 100644
>>>>>> --- a/arch/loongarch/kernel/paravirt.c
>>>>>> +++ b/arch/loongarch/kernel/paravirt.c
>>>>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
>>>>>>     }
>>>>>>
>>>>>>     #ifdef CONFIG_SMP
>>>>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
>>>>>>     static int pv_time_cpu_online(unsigned int cpu)
>>>>>>     {
>>>>>>            unsigned long flags;
>>>>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>>>>>>
>>>>>>            return 0;
>>>>>>     }
>>>>>> +
>>>>>> +bool notrace vcpu_is_preempted(int cpu)
>>>>>> +{
>>>>>> +       struct kvm_steal_time *src;
>>>>>> +
>>>>>> +       if (!static_branch_unlikely(&virt_preempt_key))
>>>>>> +               return false;
>>>>>> +
>>>>>> +       src = &per_cpu(steal_time, cpu);
>>>>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
>>>>>> +}
>>>>>> +EXPORT_SYMBOL(vcpu_is_preempted);
>>>>>>     #endif
>>>>>>
>>>>>>     static void pv_cpu_reboot(void *unused)
>>>>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
>>>>>>                    pr_err("Failed to install cpu hotplug callbacks\n");
>>>>>>                    return r;
>>>>>>            }
>>>>>> +
>>>>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
>>>>>> +               static_branch_enable(&virt_preempt_key);
>>>>>>     #endif
>>>>>>
>>>>>>            static_call_update(pv_steal_clock, paravt_steal_clock);
>>>>>> --
>>>>>> 2.39.3
>>>>>>
>>>>
>>>>
>>
>>

Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Huacai Chen 1 week ago
On Mon, Nov 24, 2025 at 4:35 PM Bibo Mao <maobibo@loongson.cn> wrote:
>
>
>
> On 2025/11/24 下午4:03, Huacai Chen wrote:
> > On Mon, Nov 24, 2025 at 3:50 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>
> >>
> >>
> >> On 2025/11/24 下午3:13, Huacai Chen wrote:
> >>> On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>
> >>>>
> >>>>
> >>>> On 2025/11/24 下午2:33, Huacai Chen wrote:
> >>>>> Hi, Bibo,
> >>>>>
> >>>>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>>>
> >>>>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> >>>>>> or not. Here add implementation with vcpu_is_preempted() when option
> >>>>>> CONFIG_PARAVIRT is enabled.
> >>>>>>
> >>>>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> >>>>>> ---
> >>>>>>     arch/loongarch/include/asm/qspinlock.h |  5 +++++
> >>>>>>     arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
> >>>>>>     2 files changed, 21 insertions(+)
> >>>>>>
> >>>>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> >>>>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
> >>>>>> --- a/arch/loongarch/include/asm/qspinlock.h
> >>>>>> +++ b/arch/loongarch/include/asm/qspinlock.h
> >>>>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> >>>>>>            return true;
> >>>>>>     }
> >>>>>>
> >>>>>> +#ifdef CONFIG_SMP
> >>>>>> +#define vcpu_is_preempted      vcpu_is_preempted
> >>>>>> +bool vcpu_is_preempted(int cpu);
> >>>>> In V1 there is a build error because you reference mp_ops, so in V2
> >>>>> you needn't put it in CONFIG_SMP.
> >>>> The compile failure problem is that vcpu_is_preempted() is redefined in
> >>>> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
> >>> But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
> >> so what is advantage to implement this function if CONFIG_SMP is disabled?
> > 1. Keep consistency with other architectures.
> > 2. Keep it simple to reduce #ifdefs (and !SMP is just for build, not
> > very useful in practice).
> It seems that CONFIG_SMP can be removed in header file
> include/asm/qspinlock.h, since asm/spinlock.h and asm/qspinlock.h is
> only included when CONFIG_SMP is set, otherwise only linux/spinlock_up.h
> is included.
>
> >
> >>
> >>> you can consider to inline the whole vcpu_is_preempted() here.
> >> Defining the function vcpu_is_preempted() as inlined is not so easy for
> >> me, it beyond my ability now :(
> >>
> >> With static key method, the static key need be exported, all modules
> >> need apply the jump label, that is dangerous and I doubt whether it is
> >> deserved.
> > No, you have already done similar things in virt_spin_lock(), it is an
> > inline function and uses virt_spin_lock_key.
> virt_spin_lock is only called qspinlock in function
> queued_spin_lock_slowpath(). Function vcpu_is_preempted() is defined
> header file linux/sched.h, kernel module may use it.
Yes, if modules want to use it we need to EXPORT_SYMBOL. But don't
worry, static key infrastructure can handle this. Please see
cpu_feature_keys defined and used in
arch/powerpc/include/asm/cpu_has_feature.h, which is exported in
arch/powerpc/kernel/cputable.c.

Huacai

>
>
> >
> > Huacai
> >
> >>
> >> Regards
> >> Bibo Mao
> >>>
> >>>>
> >>>> The problem is that <asm/spinlock.h> is not included by sched.h, if
> >>>> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
> >>>> #ifdef CONFIG_SMP
> >>>> # include <asm/spinlock.h>
> >>>> #else
> >>>> # include <linux/spinlock_up.h>
> >>>> #endif
> >>>>
> >>>>> On the other hand, even if you really build a UP guest kernel, when
> >>>>> multiple guests run together, you probably need vcpu_is_preemtped.
> >>>> It is not relative with multiple VMs. When vcpu_is_preempted() is
> >>>> called, it is to detect whether dest CPU is preempted or not, the cpu
> >>>> from smp_processor_id() should not be preempted. So in generic
> >>>> vcpu_is_preempted() works on multiple vCPUs.
> >>> OK, I'm wrong here.
> >>>
> >>>
> >>> Huacai
> >>>
> >>>>
> >>>> Regards
> >>>> Bibo Mao
> >>>>>
> >>>>>
> >>>>> Huacai
> >>>>>
> >>>>>> +#endif
> >>>>>> +
> >>>>>>     #endif /* CONFIG_PARAVIRT */
> >>>>>>
> >>>>>>     #include <asm-generic/qspinlock.h>
> >>>>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> >>>>>> index b1b51f920b23..d4163679adc4 100644
> >>>>>> --- a/arch/loongarch/kernel/paravirt.c
> >>>>>> +++ b/arch/loongarch/kernel/paravirt.c
> >>>>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
> >>>>>>     }
> >>>>>>
> >>>>>>     #ifdef CONFIG_SMP
> >>>>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
> >>>>>>     static int pv_time_cpu_online(unsigned int cpu)
> >>>>>>     {
> >>>>>>            unsigned long flags;
> >>>>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
> >>>>>>
> >>>>>>            return 0;
> >>>>>>     }
> >>>>>> +
> >>>>>> +bool notrace vcpu_is_preempted(int cpu)
> >>>>>> +{
> >>>>>> +       struct kvm_steal_time *src;
> >>>>>> +
> >>>>>> +       if (!static_branch_unlikely(&virt_preempt_key))
> >>>>>> +               return false;
> >>>>>> +
> >>>>>> +       src = &per_cpu(steal_time, cpu);
> >>>>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
> >>>>>> +}
> >>>>>> +EXPORT_SYMBOL(vcpu_is_preempted);
> >>>>>>     #endif
> >>>>>>
> >>>>>>     static void pv_cpu_reboot(void *unused)
> >>>>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
> >>>>>>                    pr_err("Failed to install cpu hotplug callbacks\n");
> >>>>>>                    return r;
> >>>>>>            }
> >>>>>> +
> >>>>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> >>>>>> +               static_branch_enable(&virt_preempt_key);
> >>>>>>     #endif
> >>>>>>
> >>>>>>            static_call_update(pv_steal_clock, paravt_steal_clock);
> >>>>>> --
> >>>>>> 2.39.3
> >>>>>>
> >>>>
> >>>>
> >>
> >>
>
>
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Bibo Mao 1 week ago

On 2025/11/24 下午5:03, Huacai Chen wrote:
> On Mon, Nov 24, 2025 at 4:35 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>>
>>
>> On 2025/11/24 下午4:03, Huacai Chen wrote:
>>> On Mon, Nov 24, 2025 at 3:50 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>
>>>>
>>>>
>>>> On 2025/11/24 下午3:13, Huacai Chen wrote:
>>>>> On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>>>
>>>>>>
>>>>>>
>>>>>> On 2025/11/24 下午2:33, Huacai Chen wrote:
>>>>>>> Hi, Bibo,
>>>>>>>
>>>>>>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>>>>>>>
>>>>>>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
>>>>>>>> or not. Here add implementation with vcpu_is_preempted() when option
>>>>>>>> CONFIG_PARAVIRT is enabled.
>>>>>>>>
>>>>>>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>steal_time
>>>>>>>> ---
>>>>>>>>      arch/loongarch/include/asm/qspinlock.h |  5 +++++
>>>>>>>>      arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>>>>>>>>      2 files changed, 21 insertions(+)
>>>>>>>>
>>>>>>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
>>>>>>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
>>>>>>>> --- a/arch/loongarch/include/asm/qspinlock.h
>>>>>>>> +++ b/arch/loongarch/include/asm/qspinlock.h
>>>>>>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>>>>>>>>             return true;
>>>>>>>>      }
>>>>>>>>
>>>>>>>> +#ifdef CONFIG_SMP
>>>>>>>> +#define vcpu_is_preempted      vcpu_is_preempted
>>>>>>>> +bool vcpu_is_preempted(int cpu);
>>>>>>> In V1 there is a build error because you reference mp_ops, so in V2
>>>>>>> you needn't put it in CONFIG_SMP.
>>>>>> The compile failure problem is that vcpu_is_preempted() is redefined in
>>>>>> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
>>>>> But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
>>>> so what is advantage to implement this function if CONFIG_SMP is disabled?
>>> 1. Keep consistency with other architectures.
>>> 2. Keep it simple to reduce #ifdefs (and !SMP is just for build, not
>>> very useful in practice).
>> It seems that CONFIG_SMP can be removed in header file
>> include/asm/qspinlock.h, since asm/spinlock.h and asm/qspinlock.h is
>> only included when CONFIG_SMP is set, otherwise only linux/spinlock_up.h
>> is included.
>>
>>>
>>>>
>>>>> you can consider to inline the whole vcpu_is_preempted() here.
>>>> Defining the function vcpu_is_preempted() as inlined is not so easy for
>>>> me, it beyond my ability now :(
>>>>
>>>> With static key method, the static key need be exported, all modules
>>>> need apply the jump label, that is dangerous and I doubt whether it is
>>>> deserved.
>>> No, you have already done similar things in virt_spin_lock(), it is an
>>> inline function and uses virt_spin_lock_key.
>> virt_spin_lock is only called qspinlock in function
>> queued_spin_lock_slowpath(). Function vcpu_is_preempted() is defined
>> header file linux/sched.h, kernel module may use it.
> Yes, if modules want to use it we need to EXPORT_SYMBOL. But don't
> worry, static key infrastructure can handle this. Please see
> cpu_feature_keys defined and used in
> arch/powerpc/include/asm/cpu_has_feature.h, which is exported in
> arch/powerpc/kernel/cputable.c.
No, I do not want to do so. export static key and percpu steal_time 
structure, just in order to implement one inline function.

> 
> Huacai
> 
>>
>>
>>>
>>> Huacai
>>>
>>>>
>>>> Regards
>>>> Bibo Mao
>>>>>
>>>>>>
>>>>>> The problem is that <asm/spinlock.h> is not included by sched.h, if
>>>>>> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
>>>>>> #ifdef CONFIG_SMP
>>>>>> # include <asm/spinlock.h>
>>>>>> #else
>>>>>> # include <linux/spinlock_up.h>
>>>>>> #endif
>>>>>>
>>>>>>> On the other hand, even if you really build a UP guest kernel, when
>>>>>>> multiple guests run together, you probably need vcpu_is_preemtped.
>>>>>> It is not relative with multiple VMs. When vcpu_is_preempted() is
>>>>>> called, it is to detect whether dest CPU is preempted or not, the cpu
>>>>>> from smp_processor_id() should not be preempted. So in generic
>>>>>> vcpu_is_preempted() works on multiple vCPUs.
>>>>> OK, I'm wrong here.
>>>>>
>>>>>
>>>>> Huacai
>>>>>
>>>>>>
>>>>>> Regards
>>>>>> Bibo Mao
>>>>>>>
>>>>>>>
>>>>>>> Huacai
>>>>>>>
>>>>>>>> +#endif
>>>>>>>> +
>>>>>>>>      #endif /* CONFIG_PARAVIRT */
>>>>>>>>
>>>>>>>>      #include <asm-generic/qspinlock.h>
>>>>>>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
>>>>>>>> index b1b51f920b23..d4163679adc4 100644
>>>>>>>> --- a/arch/loongarch/kernel/paravirt.c
>>>>>>>> +++ b/arch/loongarch/kernel/paravirt.c
>>>>>>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
>>>>>>>>      }
>>>>>>>>
>>>>>>>>      #ifdef CONFIG_SMP
>>>>>>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
>>>>>>>>      static int pv_time_cpu_online(unsigned int cpu)
>>>>>>>>      {
>>>>>>>>             unsigned long flags;
>>>>>>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>>>>>>>>
>>>>>>>>             return 0;
>>>>>>>>      }
>>>>>>>> +
>>>>>>>> +bool notrace vcpu_is_preempted(int cpu)
>>>>>>>> +{
>>>>>>>> +       struct kvm_steal_time *src;
>>>>>>>> +
>>>>>>>> +       if (!static_branch_unlikely(&virt_preempt_key))
>>>>>>>> +               return false;
>>>>>>>> +
>>>>>>>> +       src = &per_cpu(steal_time, cpu);
>>>>>>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
>>>>>>>> +}
>>>>>>>> +EXPORT_SYMBOL(vcpu_is_preempted);
>>>>>>>>      #endif
>>>>>>>>
>>>>>>>>      static void pv_cpu_reboot(void *unused)
>>>>>>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
>>>>>>>>                     pr_err("Failed to install cpu hotplug callbacks\n");
>>>>>>>>                     return r;
>>>>>>>>             }
>>>>>>>> +
>>>>>>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
>>>>>>>> +               static_branch_enable(&virt_preempt_key);
>>>>>>>>      #endif
>>>>>>>>
>>>>>>>>             static_call_update(pv_steal_clock, paravt_steal_clock);
>>>>>>>> --
>>>>>>>> 2.39.3
>>>>>>>>
>>>>>>
>>>>>>
>>>>
>>>>
>>
>>

Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Huacai Chen 1 week ago
On Mon, Nov 24, 2025 at 5:08 PM Bibo Mao <maobibo@loongson.cn> wrote:
>
>
>
> On 2025/11/24 下午5:03, Huacai Chen wrote:
> > On Mon, Nov 24, 2025 at 4:35 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>
> >>
> >>
> >> On 2025/11/24 下午4:03, Huacai Chen wrote:
> >>> On Mon, Nov 24, 2025 at 3:50 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>
> >>>>
> >>>>
> >>>> On 2025/11/24 下午3:13, Huacai Chen wrote:
> >>>>> On Mon, Nov 24, 2025 at 3:03 PM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>>>
> >>>>>>
> >>>>>>
> >>>>>> On 2025/11/24 下午2:33, Huacai Chen wrote:
> >>>>>>> Hi, Bibo,
> >>>>>>>
> >>>>>>> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
> >>>>>>>>
> >>>>>>>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> >>>>>>>> or not. Here add implementation with vcpu_is_preempted() when option
> >>>>>>>> CONFIG_PARAVIRT is enabled.
> >>>>>>>>
> >>>>>>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>steal_time
> >>>>>>>> ---
> >>>>>>>>      arch/loongarch/include/asm/qspinlock.h |  5 +++++
> >>>>>>>>      arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
> >>>>>>>>      2 files changed, 21 insertions(+)
> >>>>>>>>
> >>>>>>>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> >>>>>>>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
> >>>>>>>> --- a/arch/loongarch/include/asm/qspinlock.h
> >>>>>>>> +++ b/arch/loongarch/include/asm/qspinlock.h
> >>>>>>>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> >>>>>>>>             return true;
> >>>>>>>>      }
> >>>>>>>>
> >>>>>>>> +#ifdef CONFIG_SMP
> >>>>>>>> +#define vcpu_is_preempted      vcpu_is_preempted
> >>>>>>>> +bool vcpu_is_preempted(int cpu);
> >>>>>>> In V1 there is a build error because you reference mp_ops, so in V2
> >>>>>>> you needn't put it in CONFIG_SMP.
> >>>>>> The compile failure problem is that vcpu_is_preempted() is redefined in
> >>>>>> both arch/loongarch/kernel/paravirt.c and include/linux/sched.h
> >>>>> But other archs don't define vcpu_is_preempted() under CONFIG_SMP, and
> >>>> so what is advantage to implement this function if CONFIG_SMP is disabled?
> >>> 1. Keep consistency with other architectures.
> >>> 2. Keep it simple to reduce #ifdefs (and !SMP is just for build, not
> >>> very useful in practice).
> >> It seems that CONFIG_SMP can be removed in header file
> >> include/asm/qspinlock.h, since asm/spinlock.h and asm/qspinlock.h is
> >> only included when CONFIG_SMP is set, otherwise only linux/spinlock_up.h
> >> is included.
> >>
> >>>
> >>>>
> >>>>> you can consider to inline the whole vcpu_is_preempted() here.
> >>>> Defining the function vcpu_is_preempted() as inlined is not so easy for
> >>>> me, it beyond my ability now :(
> >>>>
> >>>> With static key method, the static key need be exported, all modules
> >>>> need apply the jump label, that is dangerous and I doubt whether it is
> >>>> deserved.
> >>> No, you have already done similar things in virt_spin_lock(), it is an
> >>> inline function and uses virt_spin_lock_key.
> >> virt_spin_lock is only called qspinlock in function
> >> queued_spin_lock_slowpath(). Function vcpu_is_preempted() is defined
> >> header file linux/sched.h, kernel module may use it.
> > Yes, if modules want to use it we need to EXPORT_SYMBOL. But don't
> > worry, static key infrastructure can handle this. Please see
> > cpu_feature_keys defined and used in
> > arch/powerpc/include/asm/cpu_has_feature.h, which is exported in
> > arch/powerpc/kernel/cputable.c.
> No, I do not want to do so. export static key and percpu steal_time> structure, just in order to implement one inline function.
In V1 you care about the performance of vcpu_is_preempted(), so inline
can satisfy your own requirement.

But this is your own choice, I don't insist on that. I only want to
remove CONFIG_SMP for vcpu_is_preempted().


Huacai
>
> >
> > Huacai
> >
> >>
> >>
> >>>
> >>> Huacai
> >>>
> >>>>
> >>>> Regards
> >>>> Bibo Mao
> >>>>>
> >>>>>>
> >>>>>> The problem is that <asm/spinlock.h> is not included by sched.h, if
> >>>>>> CONFIG_SMP is disabled. Here is part of file include/linux/spinlock.h
> >>>>>> #ifdef CONFIG_SMP
> >>>>>> # include <asm/spinlock.h>
> >>>>>> #else
> >>>>>> # include <linux/spinlock_up.h>
> >>>>>> #endif
> >>>>>>
> >>>>>>> On the other hand, even if you really build a UP guest kernel, when
> >>>>>>> multiple guests run together, you probably need vcpu_is_preemtped.
> >>>>>> It is not relative with multiple VMs. When vcpu_is_preempted() is
> >>>>>> called, it is to detect whether dest CPU is preempted or not, the cpu
> >>>>>> from smp_processor_id() should not be preempted. So in generic
> >>>>>> vcpu_is_preempted() works on multiple vCPUs.
> >>>>> OK, I'm wrong here.
> >>>>>
> >>>>>
> >>>>> Huacai
> >>>>>
> >>>>>>
> >>>>>> Regards
> >>>>>> Bibo Mao
> >>>>>>>
> >>>>>>>
> >>>>>>> Huacai
> >>>>>>>
> >>>>>>>> +#endif
> >>>>>>>> +
> >>>>>>>>      #endif /* CONFIG_PARAVIRT */
> >>>>>>>>
> >>>>>>>>      #include <asm-generic/qspinlock.h>
> >>>>>>>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> >>>>>>>> index b1b51f920b23..d4163679adc4 100644
> >>>>>>>> --- a/arch/loongarch/kernel/paravirt.c
> >>>>>>>> +++ b/arch/loongarch/kernel/paravirt.c
> >>>>>>>> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
> >>>>>>>>      }
> >>>>>>>>
> >>>>>>>>      #ifdef CONFIG_SMP
> >>>>>>>> +DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
> >>>>>>>>      static int pv_time_cpu_online(unsigned int cpu)
> >>>>>>>>      {
> >>>>>>>>             unsigned long flags;
> >>>>>>>> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
> >>>>>>>>
> >>>>>>>>             return 0;
> >>>>>>>>      }
> >>>>>>>> +
> >>>>>>>> +bool notrace vcpu_is_preempted(int cpu)
> >>>>>>>> +{
> >>>>>>>> +       struct kvm_steal_time *src;
> >>>>>>>> +
> >>>>>>>> +       if (!static_branch_unlikely(&virt_preempt_key))
> >>>>>>>> +               return false;
> >>>>>>>> +
> >>>>>>>> +       src = &per_cpu(steal_time, cpu);
> >>>>>>>> +       return !!(src->preempted & KVM_VCPU_PREEMPTED);
> >>>>>>>> +}
> >>>>>>>> +EXPORT_SYMBOL(vcpu_is_preempted);
> >>>>>>>>      #endif
> >>>>>>>>
> >>>>>>>>      static void pv_cpu_reboot(void *unused)
> >>>>>>>> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
> >>>>>>>>                     pr_err("Failed to install cpu hotplug callbacks\n");
> >>>>>>>>                     return r;
> >>>>>>>>             }
> >>>>>>>> +
> >>>>>>>> +       if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> >>>>>>>> +               static_branch_enable(&virt_preempt_key);
> >>>>>>>>      #endif
> >>>>>>>>
> >>>>>>>>             static_call_update(pv_steal_clock, paravt_steal_clock);
> >>>>>>>> --
> >>>>>>>> 2.39.3
> >>>>>>>>
> >>>>>>
> >>>>>>
> >>>>
> >>>>
> >>
> >>
>
>
Re: [PATCH v2 2/3] LoongArch: Add paravirt support with vcpu_is_preempted() in guest side
Posted by Jürgen Groß 1 week ago
On 24.11.25 07:33, Huacai Chen wrote:
> Hi, Bibo,
> 
> On Mon, Nov 24, 2025 at 11:54 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>> Function vcpu_is_preempted() is used to check whether vCPU is preempted
>> or not. Here add implementation with vcpu_is_preempted() when option
>> CONFIG_PARAVIRT is enabled.
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>>   arch/loongarch/include/asm/qspinlock.h |  5 +++++
>>   arch/loongarch/kernel/paravirt.c       | 16 ++++++++++++++++
>>   2 files changed, 21 insertions(+)
>>
>> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
>> index e76d3aa1e1eb..9a5b7ba1f4cb 100644
>> --- a/arch/loongarch/include/asm/qspinlock.h
>> +++ b/arch/loongarch/include/asm/qspinlock.h
>> @@ -34,6 +34,11 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>>          return true;
>>   }
>>
>> +#ifdef CONFIG_SMP
>> +#define vcpu_is_preempted      vcpu_is_preempted
>> +bool vcpu_is_preempted(int cpu);
> In V1 there is a build error because you reference mp_ops, so in V2
> you needn't put it in CONFIG_SMP.
> On the other hand, even if you really build a UP guest kernel, when
> multiple guests run together, you probably need vcpu_is_preemtped.

I don't think so. When the UP guest's vcpu is preempted, who will use this
function?


Juergen