[PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch

Roger Pau Monne posted 18 patches 3 weeks, 6 days ago
[PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Roger Pau Monne 3 weeks, 6 days ago
On x86 Xen will perform lazy context switches to the idle vCPU, where the
previously running vCPU context is not overwritten, and only current is updated
to point to the idle vCPU.  The state is then disjunct between current and
curr_vcpu: current points to the idle vCPU, while curr_vcpu points to the vCPU
whose context is loaded on the pCPU.

While on that lazy context switched state, certain calls (like
map_domain_page()) will trigger a full synchronization of the pCPU state by
forcing a context switch.  Note however how calling any of such functions
inside the context switch code itself is very likely to trigger an infinite
recursion loop.

Attempt to limit the window where curr_vcpu != current in the context switch
code, as to prevent and infinite recursion loop around sync_local_execstate().

This is required for using map_domain_page() in the vCPU context switch code,
otherwise using map_domain_page() in that context ends up in a recursive
sync_local_execstate() loop:

map_domain_page() -> sync_local_execstate() -> map_domain_page() -> ...

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
 - New in this version.
---
 xen/arch/x86/domain.c | 58 +++++++++++++++++++++++++++++++++++--------
 xen/arch/x86/traps.c  |  2 --
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 78a13e6812c9..1f680bf176ee 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1982,16 +1982,16 @@ static void load_default_gdt(unsigned int cpu)
     per_cpu(full_gdt_loaded, cpu) = false;
 }
 
-static void __context_switch(void)
+static void __context_switch(struct vcpu *n)
 {
     struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
     unsigned int          cpu = smp_processor_id();
     struct vcpu          *p = per_cpu(curr_vcpu, cpu);
-    struct vcpu          *n = current;
     struct domain        *pd = p->domain, *nd = n->domain;
 
     ASSERT(p != n);
     ASSERT(!vcpu_cpu_dirty(n));
+    ASSERT(p == current);
 
     if ( !is_idle_domain(pd) )
     {
@@ -2036,6 +2036,18 @@ static void __context_switch(void)
 
     write_ptbase(n);
 
+    /*
+     * It's relevant to set both current and curr_vcpu back-to-back, to avoid a
+     * window where calls to mapcache_current_vcpu() during the context switch
+     * could trigger a recursive loop.
+     *
+     * Do the current switch immediately after switching to the new guest
+     * page-tables, so that current is (almost) always in sync with the
+     * currently loaded page-tables.
+     */
+    set_current(n);
+    per_cpu(curr_vcpu, cpu) = n;
+
 #ifdef CONFIG_PV
     /* Prefetch the VMCB if we expect to use it later in the context switch */
     if ( using_svm() && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
@@ -2048,8 +2060,6 @@ static void __context_switch(void)
     if ( pd != nd )
         cpumask_clear_cpu(cpu, pd->dirty_cpumask);
     write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
-
-    per_cpu(curr_vcpu, cpu) = n;
 }
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
@@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
     local_irq_disable();
 
-    set_current(next);
-
     if ( (per_cpu(curr_vcpu, cpu) == next) ||
          (is_idle_domain(nextd) && cpu_online(cpu)) )
     {
+        /*
+         * Lazy context switch to the idle vCPU, set current == idle.  Full
+         * context switch happens if/when sync_local_execstate() is called.
+         */
+        set_current(next);
         local_irq_enable();
     }
     else
     {
-        __context_switch();
+        /*
+         * curr_vcpu will always point to the currently loaded vCPU context, as
+         * it's not updated when doing a lazy switch to the idle vCPU.
+         */
+        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
+
+        if ( prev_ctx != current )
+        {
+            /*
+             * Doing a full context switch to a non-idle vCPU from a lazy
+             * context switched state.  Adjust current to point to the
+             * currently loaded vCPU context.
+             */
+            ASSERT(current == idle_vcpu[cpu]);
+            ASSERT(!is_idle_vcpu(next));
+            set_current(prev_ctx);
+        }
+        __context_switch(next);
 
         /* Re-enable interrupts before restoring state which may fault. */
         local_irq_enable();
@@ -2156,15 +2186,23 @@ int __sync_local_execstate(void)
 {
     unsigned long flags;
     int switch_required;
+    unsigned int cpu = smp_processor_id();
+    struct vcpu *p;
 
     local_irq_save(flags);
 
-    switch_required = (this_cpu(curr_vcpu) != current);
+    p = per_cpu(curr_vcpu, cpu);
+    switch_required = (p != current);
 
     if ( switch_required )
     {
-        ASSERT(current == idle_vcpu[smp_processor_id()]);
-        __context_switch();
+        ASSERT(current == idle_vcpu[cpu]);
+        /*
+         * Restore current to the previously running vCPU, __context_switch()
+         * will update current together with curr_vcpu.
+         */
+        set_current(p);
+        __context_switch(idle_vcpu[cpu]);
     }
 
     local_irq_restore(flags);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 87b30ce4df2a..487b8c5a78c5 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2232,8 +2232,6 @@ void __init trap_init(void)
 
 void activate_debugregs(const struct vcpu *curr)
 {
-    ASSERT(curr == current);
-
     write_debugreg(0, curr->arch.dr[0]);
     write_debugreg(1, curr->arch.dr[1]);
     write_debugreg(2, curr->arch.dr[2]);
-- 
2.46.0


Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Jan Beulich 3 weeks, 5 days ago
On 08.01.2025 15:26, Roger Pau Monne wrote:
> On x86 Xen will perform lazy context switches to the idle vCPU, where the
> previously running vCPU context is not overwritten, and only current is updated
> to point to the idle vCPU.  The state is then disjunct between current and
> curr_vcpu: current points to the idle vCPU, while curr_vcpu points to the vCPU
> whose context is loaded on the pCPU.
> 
> While on that lazy context switched state, certain calls (like
> map_domain_page()) will trigger a full synchronization of the pCPU state by
> forcing a context switch.  Note however how calling any of such functions
> inside the context switch code itself is very likely to trigger an infinite
> recursion loop.
> 
> Attempt to limit the window where curr_vcpu != current in the context switch
> code, as to prevent and infinite recursion loop around sync_local_execstate().
> 
> This is required for using map_domain_page() in the vCPU context switch code,
> otherwise using map_domain_page() in that context ends up in a recursive
> sync_local_execstate() loop:

Question is whether it's a good idea in the first place to start using
map_domain_page() from the context switch path. Surely there are possible
alternatives.

> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1982,16 +1982,16 @@ static void load_default_gdt(unsigned int cpu)
>      per_cpu(full_gdt_loaded, cpu) = false;
>  }
>  
> -static void __context_switch(void)
> +static void __context_switch(struct vcpu *n)
>  {
>      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
>      unsigned int          cpu = smp_processor_id();
>      struct vcpu          *p = per_cpu(curr_vcpu, cpu);
> -    struct vcpu          *n = current;
>      struct domain        *pd = p->domain, *nd = n->domain;
>  
>      ASSERT(p != n);
>      ASSERT(!vcpu_cpu_dirty(n));
> +    ASSERT(p == current);
>  
>      if ( !is_idle_domain(pd) )
>      {
> @@ -2036,6 +2036,18 @@ static void __context_switch(void)
>  
>      write_ptbase(n);
>  
> +    /*
> +     * It's relevant to set both current and curr_vcpu back-to-back, to avoid a
> +     * window where calls to mapcache_current_vcpu() during the context switch
> +     * could trigger a recursive loop.
> +     *
> +     * Do the current switch immediately after switching to the new guest
> +     * page-tables, so that current is (almost) always in sync with the
> +     * currently loaded page-tables.
> +     */
> +    set_current(n);
> +    per_cpu(curr_vcpu, cpu) = n;

The latter paragraph of the comment states something that so far wasn't intended,
and imo also shouldn't be going forward. It's curr_vcpu which wants to be in sync
with the loaded page tables. (Whether pulling ahead its updating is okay is a
separate question. All of these actions used to be be very carefully placed they
way they are. Which isn't to say that I can exclude things having gone stale ...)
And yes, that has always meant that mapcache_current_vcpu()'s condition for
calling sync_local_execstate() was building upon the fact that it won't be called
from context switching contexts.

Did you consider updating that condition (evaluating curr_cpu) instead?

> @@ -2048,8 +2060,6 @@ static void __context_switch(void)
>      if ( pd != nd )
>          cpumask_clear_cpu(cpu, pd->dirty_cpumask);
>      write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
> -
> -    per_cpu(curr_vcpu, cpu) = n;
>  }
>  
>  void context_switch(struct vcpu *prev, struct vcpu *next)
> @@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
>  
>      local_irq_disable();
>  
> -    set_current(next);
> -
>      if ( (per_cpu(curr_vcpu, cpu) == next) ||
>           (is_idle_domain(nextd) && cpu_online(cpu)) )
>      {
> +        /*
> +         * Lazy context switch to the idle vCPU, set current == idle.  Full
> +         * context switch happens if/when sync_local_execstate() is called.
> +         */
> +        set_current(next);
>          local_irq_enable();

The comment is misleading as far as the first half of the if() condition goes:
No further switching is going to happen in that case, aiui.

>      }
>      else
>      {
> -        __context_switch();
> +        /*
> +         * curr_vcpu will always point to the currently loaded vCPU context, as
> +         * it's not updated when doing a lazy switch to the idle vCPU.
> +         */
> +        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
> +
> +        if ( prev_ctx != current )
> +        {
> +            /*
> +             * Doing a full context switch to a non-idle vCPU from a lazy
> +             * context switched state.  Adjust current to point to the
> +             * currently loaded vCPU context.
> +             */
> +            ASSERT(current == idle_vcpu[cpu]);
> +            ASSERT(!is_idle_vcpu(next));
> +            set_current(prev_ctx);

This feels wrong, as in "current" then not representing what it should represent,
for a certain time window. I may be dense, but neither comment not description
clarify to me why this might be needed. I can see that it's needed to please the
ASSERT() you add to __context_switch(), yet then I might ask why that assertion
is put there.

> +        }
> +        __context_switch(next);
>  
>          /* Re-enable interrupts before restoring state which may fault. */
>          local_irq_enable();
> @@ -2156,15 +2186,23 @@ int __sync_local_execstate(void)
>  {
>      unsigned long flags;
>      int switch_required;
> +    unsigned int cpu = smp_processor_id();
> +    struct vcpu *p;
>  
>      local_irq_save(flags);
>  
> -    switch_required = (this_cpu(curr_vcpu) != current);
> +    p = per_cpu(curr_vcpu, cpu);
> +    switch_required = (p != current);
>  
>      if ( switch_required )
>      {
> -        ASSERT(current == idle_vcpu[smp_processor_id()]);
> -        __context_switch();
> +        ASSERT(current == idle_vcpu[cpu]);
> +        /*
> +         * Restore current to the previously running vCPU, __context_switch()
> +         * will update current together with curr_vcpu.
> +         */
> +        set_current(p);

Similarly here.

> +        __context_switch(idle_vcpu[cpu]);
>      }
>  
>      local_irq_restore(flags);
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2232,8 +2232,6 @@ void __init trap_init(void)
>  
>  void activate_debugregs(const struct vcpu *curr)
>  {
> -    ASSERT(curr == current);
> -
>      write_debugreg(0, curr->arch.dr[0]);
>      write_debugreg(1, curr->arch.dr[1]);
>      write_debugreg(2, curr->arch.dr[2]);

Why would this assertion go away? If it suddenly triggers, the parameter name
would now end up being wrong.

Jan
Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Roger Pau Monné 3 weeks, 5 days ago
On Thu, Jan 09, 2025 at 09:59:58AM +0100, Jan Beulich wrote:
> On 08.01.2025 15:26, Roger Pau Monne wrote:
> > On x86 Xen will perform lazy context switches to the idle vCPU, where the
> > previously running vCPU context is not overwritten, and only current is updated
> > to point to the idle vCPU.  The state is then disjunct between current and
> > curr_vcpu: current points to the idle vCPU, while curr_vcpu points to the vCPU
> > whose context is loaded on the pCPU.
> > 
> > While on that lazy context switched state, certain calls (like
> > map_domain_page()) will trigger a full synchronization of the pCPU state by
> > forcing a context switch.  Note however how calling any of such functions
> > inside the context switch code itself is very likely to trigger an infinite
> > recursion loop.
> > 
> > Attempt to limit the window where curr_vcpu != current in the context switch
> > code, as to prevent and infinite recursion loop around sync_local_execstate().
> > 
> > This is required for using map_domain_page() in the vCPU context switch code,
> > otherwise using map_domain_page() in that context ends up in a recursive
> > sync_local_execstate() loop:
> 
> Question is whether it's a good idea in the first place to start using
> map_domain_page() from the context switch path. Surely there are possible
> alternatives.

It seemed more natural rather the introducing yet something new to use
in the context switch path.  I'm happy to hear recommendations, but
overall introducing yet another interface to map stuff just for the
context switch path seems worse than extending an existing interface
to work in that context.

> > --- a/xen/arch/x86/domain.c
> > +++ b/xen/arch/x86/domain.c
> > @@ -1982,16 +1982,16 @@ static void load_default_gdt(unsigned int cpu)
> >      per_cpu(full_gdt_loaded, cpu) = false;
> >  }
> >  
> > -static void __context_switch(void)
> > +static void __context_switch(struct vcpu *n)
> >  {
> >      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
> >      unsigned int          cpu = smp_processor_id();
> >      struct vcpu          *p = per_cpu(curr_vcpu, cpu);
> > -    struct vcpu          *n = current;
> >      struct domain        *pd = p->domain, *nd = n->domain;
> >  
> >      ASSERT(p != n);
> >      ASSERT(!vcpu_cpu_dirty(n));
> > +    ASSERT(p == current);
> >  
> >      if ( !is_idle_domain(pd) )
> >      {
> > @@ -2036,6 +2036,18 @@ static void __context_switch(void)
> >  
> >      write_ptbase(n);
> >  
> > +    /*
> > +     * It's relevant to set both current and curr_vcpu back-to-back, to avoid a
> > +     * window where calls to mapcache_current_vcpu() during the context switch
> > +     * could trigger a recursive loop.
> > +     *
> > +     * Do the current switch immediately after switching to the new guest
> > +     * page-tables, so that current is (almost) always in sync with the
> > +     * currently loaded page-tables.
> > +     */
> > +    set_current(n);
> > +    per_cpu(curr_vcpu, cpu) = n;
> 
> The latter paragraph of the comment states something that so far wasn't intended,
> and imo also shouldn't be going forward. It's curr_vcpu which wants to be in sync
> with the loaded page tables. (Whether pulling ahead its updating is okay is a
> separate question. All of these actions used to be be very carefully placed they
> way they are. Which isn't to say that I can exclude things having gone stale ...)

I've noticed this was all quite carefully placed.  I've also attempted
to take care with the changes I've done here (and tested them
extensively).

> And yes, that has always meant that mapcache_current_vcpu()'s condition for
> calling sync_local_execstate() was building upon the fact that it won't be called
> from context switching contexts.
> 
> Did you consider updating that condition (evaluating curr_cpu) instead?

We cannot safely use map_domain_page() if current != curr_vcpu,
because at any point (as a result of an interrupt) a call to
sync_local_execstate(), and thus remove the mappings created by
map_domain_page() as a result of performing a full context switch to
the idle vCPU (and the idle vCPU page tables).

> 
> > @@ -2048,8 +2060,6 @@ static void __context_switch(void)
> >      if ( pd != nd )
> >          cpumask_clear_cpu(cpu, pd->dirty_cpumask);
> >      write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
> > -
> > -    per_cpu(curr_vcpu, cpu) = n;
> >  }
> >  
> >  void context_switch(struct vcpu *prev, struct vcpu *next)
> > @@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
> >  
> >      local_irq_disable();
> >  
> > -    set_current(next);
> > -
> >      if ( (per_cpu(curr_vcpu, cpu) == next) ||
> >           (is_idle_domain(nextd) && cpu_online(cpu)) )
> >      {
> > +        /*
> > +         * Lazy context switch to the idle vCPU, set current == idle.  Full
> > +         * context switch happens if/when sync_local_execstate() is called.
> > +         */
> > +        set_current(next);
> >          local_irq_enable();
> 
> The comment is misleading as far as the first half of the if() condition goes:
> No further switching is going to happen in that case, aiui.

Right, I should clarify that comment: this is either a lazy context
switch, or the return from a lazy state to the previously running
vCPU.

> >      }
> >      else
> >      {
> > -        __context_switch();
> > +        /*
> > +         * curr_vcpu will always point to the currently loaded vCPU context, as
> > +         * it's not updated when doing a lazy switch to the idle vCPU.
> > +         */
> > +        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
> > +
> > +        if ( prev_ctx != current )
> > +        {
> > +            /*
> > +             * Doing a full context switch to a non-idle vCPU from a lazy
> > +             * context switched state.  Adjust current to point to the
> > +             * currently loaded vCPU context.
> > +             */
> > +            ASSERT(current == idle_vcpu[cpu]);
> > +            ASSERT(!is_idle_vcpu(next));
> > +            set_current(prev_ctx);
> 
> This feels wrong, as in "current" then not representing what it should represent,
> for a certain time window. I may be dense, but neither comment not description
> clarify to me why this might be needed. I can see that it's needed to please the
> ASSERT() you add to __context_switch(), yet then I might ask why that assertion
> is put there.

This is done so that when calling __context_switch() current ==
curr_vcpu, and map_domain_page() can be used without getting into an
infinite sync_local_execstate() recursion loop.

> 
> > +        }
> > +        __context_switch(next);
> >  
> >          /* Re-enable interrupts before restoring state which may fault. */
> >          local_irq_enable();
> > @@ -2156,15 +2186,23 @@ int __sync_local_execstate(void)
> >  {
> >      unsigned long flags;
> >      int switch_required;
> > +    unsigned int cpu = smp_processor_id();
> > +    struct vcpu *p;
> >  
> >      local_irq_save(flags);
> >  
> > -    switch_required = (this_cpu(curr_vcpu) != current);
> > +    p = per_cpu(curr_vcpu, cpu);
> > +    switch_required = (p != current);
> >  
> >      if ( switch_required )
> >      {
> > -        ASSERT(current == idle_vcpu[smp_processor_id()]);
> > -        __context_switch();
> > +        ASSERT(current == idle_vcpu[cpu]);
> > +        /*
> > +         * Restore current to the previously running vCPU, __context_switch()
> > +         * will update current together with curr_vcpu.
> > +         */
> > +        set_current(p);
> 
> Similarly here.

Same reason, so that when calling __context_switch() current ==
curr_vcpu and map_domain_page() can be used (and in general
sync_local_execstate() becomes a no-op because a switch is already in
process.)

> 
> > +        __context_switch(idle_vcpu[cpu]);
> >      }
> >  
> >      local_irq_restore(flags);
> > --- a/xen/arch/x86/traps.c
> > +++ b/xen/arch/x86/traps.c
> > @@ -2232,8 +2232,6 @@ void __init trap_init(void)
> >  
> >  void activate_debugregs(const struct vcpu *curr)
> >  {
> > -    ASSERT(curr == current);
> > -
> >      write_debugreg(0, curr->arch.dr[0]);
> >      write_debugreg(1, curr->arch.dr[1]);
> >      write_debugreg(2, curr->arch.dr[2]);
> 
> Why would this assertion go away? If it suddenly triggers, the parameter name
> would now end up being wrong.

Well, at the point where activate_debugregs() gets called (in
paravirt_ctxt_switch_to()), current == previous as a result of this
change, so the assert is no longer true on purpose on that call
path.

Thanks, Roger.
Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Jan Beulich 3 weeks ago
On 09.01.2025 18:33, Roger Pau Monné wrote:
> On Thu, Jan 09, 2025 at 09:59:58AM +0100, Jan Beulich wrote:
>> On 08.01.2025 15:26, Roger Pau Monne wrote:
>>> @@ -2048,8 +2060,6 @@ static void __context_switch(void)
>>>      if ( pd != nd )
>>>          cpumask_clear_cpu(cpu, pd->dirty_cpumask);
>>>      write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
>>> -
>>> -    per_cpu(curr_vcpu, cpu) = n;
>>>  }
>>>  
>>>  void context_switch(struct vcpu *prev, struct vcpu *next)
>>> @@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
>>>  
>>>      local_irq_disable();
>>>  
>>> -    set_current(next);
>>> -
>>>      if ( (per_cpu(curr_vcpu, cpu) == next) ||
>>>           (is_idle_domain(nextd) && cpu_online(cpu)) )
>>>      {
>>> +        /*
>>> +         * Lazy context switch to the idle vCPU, set current == idle.  Full
>>> +         * context switch happens if/when sync_local_execstate() is called.
>>> +         */
>>> +        set_current(next);
>>>          local_irq_enable();
>>
>> The comment is misleading as far as the first half of the if() condition goes:
>> No further switching is going to happen in that case, aiui.
> 
> Right, I should clarify that comment: this is either a lazy context
> switch, or the return from a lazy state to the previously running
> vCPU.
> 
>>>      }
>>>      else
>>>      {
>>> -        __context_switch();
>>> +        /*
>>> +         * curr_vcpu will always point to the currently loaded vCPU context, as
>>> +         * it's not updated when doing a lazy switch to the idle vCPU.
>>> +         */
>>> +        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
>>> +
>>> +        if ( prev_ctx != current )
>>> +        {
>>> +            /*
>>> +             * Doing a full context switch to a non-idle vCPU from a lazy
>>> +             * context switched state.  Adjust current to point to the
>>> +             * currently loaded vCPU context.
>>> +             */
>>> +            ASSERT(current == idle_vcpu[cpu]);
>>> +            ASSERT(!is_idle_vcpu(next));
>>> +            set_current(prev_ctx);
>>
>> This feels wrong, as in "current" then not representing what it should represent,
>> for a certain time window. I may be dense, but neither comment not description
>> clarify to me why this might be needed. I can see that it's needed to please the
>> ASSERT() you add to __context_switch(), yet then I might ask why that assertion
>> is put there.
> 
> This is done so that when calling __context_switch() current ==
> curr_vcpu, and map_domain_page() can be used without getting into an
> infinite sync_local_execstate() recursion loop.

Yet it's the purpose of __context_switch() to bring curr_vcpu in sync
with current. IOW both matching up is supposed to be an exit condition
of the function, not an entry one.

Plus, as indicated when we were talking this through yesterday, the
set_current() here make "current" no longer point at what - from the
scheduler's perspective - is (supposed to be) the current vCPU.

Aiui this adjustment is the reason for ...

>>> --- a/xen/arch/x86/traps.c
>>> +++ b/xen/arch/x86/traps.c
>>> @@ -2232,8 +2232,6 @@ void __init trap_init(void)
>>>  
>>>  void activate_debugregs(const struct vcpu *curr)
>>>  {
>>> -    ASSERT(curr == current);
>>> -
>>>      write_debugreg(0, curr->arch.dr[0]);
>>>      write_debugreg(1, curr->arch.dr[1]);
>>>      write_debugreg(2, curr->arch.dr[2]);
>>
>> Why would this assertion go away? If it suddenly triggers, the parameter name
>> would now end up being wrong.
> 
> Well, at the point where activate_debugregs() gets called (in
> paravirt_ctxt_switch_to()), current == previous as a result of this
> change, so the assert is no longer true on purpose on that call
> path.

... this behavior. Which, as said, feels wrong the latest when "curr" was
renamed to no longer suggest it actually is cached "current". At that point
it'll be dubious whose ->arch.dr[] are actually written into the CPU
registers.

Also let's not forget that there's a 2nd call here, where I very much hope
it continues to be "current" that's being passed in.

Jan

Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Roger Pau Monné 2 weeks, 4 days ago
On Tue, Jan 14, 2025 at 04:02:01PM +0100, Jan Beulich wrote:
> On 09.01.2025 18:33, Roger Pau Monné wrote:
> > On Thu, Jan 09, 2025 at 09:59:58AM +0100, Jan Beulich wrote:
> >> On 08.01.2025 15:26, Roger Pau Monne wrote:
> >>>      }
> >>>      else
> >>>      {
> >>> -        __context_switch();
> >>> +        /*
> >>> +         * curr_vcpu will always point to the currently loaded vCPU context, as
> >>> +         * it's not updated when doing a lazy switch to the idle vCPU.
> >>> +         */
> >>> +        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
> >>> +
> >>> +        if ( prev_ctx != current )
> >>> +        {
> >>> +            /*
> >>> +             * Doing a full context switch to a non-idle vCPU from a lazy
> >>> +             * context switched state.  Adjust current to point to the
> >>> +             * currently loaded vCPU context.
> >>> +             */
> >>> +            ASSERT(current == idle_vcpu[cpu]);
> >>> +            ASSERT(!is_idle_vcpu(next));
> >>> +            set_current(prev_ctx);
> >>
> >> This feels wrong, as in "current" then not representing what it should represent,
> >> for a certain time window. I may be dense, but neither comment not description
> >> clarify to me why this might be needed. I can see that it's needed to please the
> >> ASSERT() you add to __context_switch(), yet then I might ask why that assertion
> >> is put there.
> > 
> > This is done so that when calling __context_switch() current ==
> > curr_vcpu, and map_domain_page() can be used without getting into an
> > infinite sync_local_execstate() recursion loop.
> 
> Yet it's the purpose of __context_switch() to bring curr_vcpu in sync
> with current. IOW both matching up is supposed to be an exit condition
> of the function, not an entry one.
> 
> Plus, as indicated when we were talking this through yesterday, the
> set_current() here make "current" no longer point at what - from the
> scheduler's perspective - is (supposed to be) the current vCPU.

I understand this, and I will look into alternative ways to workaround
the issues I'm facing that prompted the changes proposed on this
patch.

I've been thinking about what we spoke of disabling lazy idle context
switch when ASI was enabled, and I'm afraid that won't be enough.  The
{populate,destroy}_perdomain_mapping() functions added later in the
series will be used in the context switch path regardless of whether
ASI is enabled, and those functions require map_domain_page() to be
usable.  Hence map_domain_page() needs to be usable in the context
switch path.

I will see whether I can allow the usage of map_domain_page() at
context switch in a different way.

I understand the main concern is the window where current and the
scheduler notion of current diverge right?

Arguably this is already happening in context_switch(), as
set_current() gets called almost at the beggining of the function,
while the call to sched_context_switched() only happens at the tail of
the function.  So for the whole call to  __context_switch() current is
not in-sync with the scheduler currently running vCPU.  And I'm not
saying this is a model to follow, but the context switch code is
already fairly special, hence I don't see the change here as that much
different from the current logic.

That said, I will still try to figure an alternative way to deal with
the usage of map_domain_page() in the context switch path.

> Aiui this adjustment is the reason for ...
> 
> >>> --- a/xen/arch/x86/traps.c
> >>> +++ b/xen/arch/x86/traps.c
> >>> @@ -2232,8 +2232,6 @@ void __init trap_init(void)
> >>>  
> >>>  void activate_debugregs(const struct vcpu *curr)
> >>>  {
> >>> -    ASSERT(curr == current);
> >>> -
> >>>      write_debugreg(0, curr->arch.dr[0]);
> >>>      write_debugreg(1, curr->arch.dr[1]);
> >>>      write_debugreg(2, curr->arch.dr[2]);
> >>
> >> Why would this assertion go away? If it suddenly triggers, the parameter name
> >> would now end up being wrong.
> > 
> > Well, at the point where activate_debugregs() gets called (in
> > paravirt_ctxt_switch_to()), current == previous as a result of this
> > change, so the assert is no longer true on purpose on that call
> > path.
> 
> ... this behavior. Which, as said, feels wrong the latest when "curr" was
> renamed to no longer suggest it actually is cached "current". At that point
> it'll be dubious whose ->arch.dr[] are actually written into the CPU
> registers.
> 
> Also let's not forget that there's a 2nd call here, where I very much hope
> it continues to be "current" that's being passed in.

Indeed, for the other call the assert would still be valid, that
context is not changed.

Thanks, Roger.

Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Alejandro Vallejo 3 weeks, 6 days ago
This is a net gain even without ASI. Having "current" hold the previous vCPU on
__context_switch() makes it _a lot_ easier to follow the lazy switch path.

On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> On x86 Xen will perform lazy context switches to the idle vCPU, where the
> previously running vCPU context is not overwritten, and only current is updated
> to point to the idle vCPU.  The state is then disjunct between current and
> curr_vcpu: current points to the idle vCPU, while curr_vcpu points to the vCPU
> whose context is loaded on the pCPU.
>
> While on that lazy context switched state, certain calls (like
> map_domain_page()) will trigger a full synchronization of the pCPU state by
> forcing a context switch.  Note however how calling any of such functions
> inside the context switch code itself is very likely to trigger an infinite
> recursion loop.
>
> Attempt to limit the window where curr_vcpu != current in the context switch
> code, as to prevent and infinite recursion loop around sync_local_execstate().
>
> This is required for using map_domain_page() in the vCPU context switch code,
> otherwise using map_domain_page() in that context ends up in a recursive
> sync_local_execstate() loop:
>
> map_domain_page() -> sync_local_execstate() -> map_domain_page() -> ...

More generally, it's worth mentioning that we want to establish an invariant
between a per-cpu variable (curr_vcpu) and the currently running page tables.
That way it can be used as discriminant to know which are the currently active
per-vCPU mappings.

That's essential for implementing FPU hiding as proposed here:

  https://lore.kernel.org/xen-devel/20241105143310.28301-1-alejandro.vallejo@cloud.com/

A shorter form of that should probably be mentioned also...

>
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
> Changes since v1:
>  - New in this version.
> ---
>  xen/arch/x86/domain.c | 58 +++++++++++++++++++++++++++++++++++--------
>  xen/arch/x86/traps.c  |  2 --
>  2 files changed, 48 insertions(+), 12 deletions(-)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 78a13e6812c9..1f680bf176ee 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1982,16 +1982,16 @@ static void load_default_gdt(unsigned int cpu)
>      per_cpu(full_gdt_loaded, cpu) = false;
>  }
>  
> -static void __context_switch(void)
> +static void __context_switch(struct vcpu *n)
>  {
>      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
>      unsigned int          cpu = smp_processor_id();
>      struct vcpu          *p = per_cpu(curr_vcpu, cpu);
> -    struct vcpu          *n = current;
>      struct domain        *pd = p->domain, *nd = n->domain;
>  
>      ASSERT(p != n);
>      ASSERT(!vcpu_cpu_dirty(n));
> +    ASSERT(p == current);
>  
>      if ( !is_idle_domain(pd) )
>      {
> @@ -2036,6 +2036,18 @@ static void __context_switch(void)
>  
>      write_ptbase(n);
>  
> +    /*
> +     * It's relevant to set both current and curr_vcpu back-to-back, to avoid a
> +     * window where calls to mapcache_current_vcpu() during the context switch
> +     * could trigger a recursive loop.
> +     *
> +     * Do the current switch immediately after switching to the new guest
> +     * page-tables, so that current is (almost) always in sync with the
> +     * currently loaded page-tables.
> +     */
> +    set_current(n);
> +    per_cpu(curr_vcpu, cpu) = n;

... here. So we're not tempted to move these 2 far off from write_ptbase().

> +
>  #ifdef CONFIG_PV
>      /* Prefetch the VMCB if we expect to use it later in the context switch */
>      if ( using_svm() && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
> @@ -2048,8 +2060,6 @@ static void __context_switch(void)
>      if ( pd != nd )
>          cpumask_clear_cpu(cpu, pd->dirty_cpumask);
>      write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
> -
> -    per_cpu(curr_vcpu, cpu) = n;
>  }
>  
>  void context_switch(struct vcpu *prev, struct vcpu *next)
> @@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
>  
>      local_irq_disable();
>  
> -    set_current(next);
> -
>      if ( (per_cpu(curr_vcpu, cpu) == next) ||
>           (is_idle_domain(nextd) && cpu_online(cpu)) )
>      {
> +        /*
> +         * Lazy context switch to the idle vCPU, set current == idle.  Full
> +         * context switch happens if/when sync_local_execstate() is called.
> +         */
> +        set_current(next);
>          local_irq_enable();
>      }
>      else
>      {
> -        __context_switch();
> +        /*
> +         * curr_vcpu will always point to the currently loaded vCPU context, as

nit: s/will always point/always points/ ? It's an inconditional invariant,
after all.

> +         * it's not updated when doing a lazy switch to the idle vCPU.
> +         */
> +        struct vcpu *prev_ctx = per_cpu(curr_vcpu, cpu);
> +
> +        if ( prev_ctx != current )
> +        {
> +            /*
> +             * Doing a full context switch to a non-idle vCPU from a lazy
> +             * context switched state.  Adjust current to point to the
> +             * currently loaded vCPU context.
> +             */
> +            ASSERT(current == idle_vcpu[cpu]);
> +            ASSERT(!is_idle_vcpu(next));
> +            set_current(prev_ctx);
> +        }
> +        __context_switch(next);
>  
>          /* Re-enable interrupts before restoring state which may fault. */
>          local_irq_enable();
> @@ -2156,15 +2186,23 @@ int __sync_local_execstate(void)
>  {
>      unsigned long flags;
>      int switch_required;
> +    unsigned int cpu = smp_processor_id();
> +    struct vcpu *p;
>  
>      local_irq_save(flags);
>  
> -    switch_required = (this_cpu(curr_vcpu) != current);
> +    p = per_cpu(curr_vcpu, cpu);
> +    switch_required = (p != current);
>  
>      if ( switch_required )
>      {
> -        ASSERT(current == idle_vcpu[smp_processor_id()]);
> -        __context_switch();
> +        ASSERT(current == idle_vcpu[cpu]);
> +        /*
> +         * Restore current to the previously running vCPU, __context_switch()
> +         * will update current together with curr_vcpu.
> +         */
> +        set_current(p);
> +        __context_switch(idle_vcpu[cpu]);
>      }
>  
>      local_irq_restore(flags);
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 87b30ce4df2a..487b8c5a78c5 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2232,8 +2232,6 @@ void __init trap_init(void)
>  
>  void activate_debugregs(const struct vcpu *curr)
>  {
> -    ASSERT(curr == current);
> -
>      write_debugreg(0, curr->arch.dr[0]);
>      write_debugreg(1, curr->arch.dr[1]);
>      write_debugreg(2, curr->arch.dr[2]);

Cheers,
Alejandro
Re: [PATCH v2 02/18] x86/domain: limit window where curr_vcpu != current on context switch
Posted by Roger Pau Monné 3 weeks, 5 days ago
On Wed, Jan 08, 2025 at 04:26:46PM +0000, Alejandro Vallejo wrote:
> This is a net gain even without ASI. Having "current" hold the previous vCPU on
> __context_switch() makes it _a lot_ easier to follow the lazy switch path.
> 
> On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> > On x86 Xen will perform lazy context switches to the idle vCPU, where the
> > previously running vCPU context is not overwritten, and only current is updated
> > to point to the idle vCPU.  The state is then disjunct between current and
> > curr_vcpu: current points to the idle vCPU, while curr_vcpu points to the vCPU
> > whose context is loaded on the pCPU.
> >
> > While on that lazy context switched state, certain calls (like
> > map_domain_page()) will trigger a full synchronization of the pCPU state by
> > forcing a context switch.  Note however how calling any of such functions
> > inside the context switch code itself is very likely to trigger an infinite
> > recursion loop.
> >
> > Attempt to limit the window where curr_vcpu != current in the context switch
> > code, as to prevent and infinite recursion loop around sync_local_execstate().
> >
> > This is required for using map_domain_page() in the vCPU context switch code,
> > otherwise using map_domain_page() in that context ends up in a recursive
> > sync_local_execstate() loop:
> >
> > map_domain_page() -> sync_local_execstate() -> map_domain_page() -> ...
> 
> More generally, it's worth mentioning that we want to establish an invariant
> between a per-cpu variable (curr_vcpu) and the currently running page tables.
> That way it can be used as discriminant to know which are the currently active
> per-vCPU mappings.

You kind of already do this by checking curr_vcpu, as with this
changes there's still a window where the vCPU is lazy context
switched, and hence current != curr_vcpu (and curr_vcpu should signal
what page-tables are loaded).

The main point apart from more accurate signaling of the loaded
page-tables is to avoid infinite recursion if sync_local_execstate()
is called inside the context switch path.

> That's essential for implementing FPU hiding as proposed here:
> 
>   https://lore.kernel.org/xen-devel/20241105143310.28301-1-alejandro.vallejo@cloud.com/
> 
> A shorter form of that should probably be mentioned also...
> 
> >
> > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> > ---
> > Changes since v1:
> >  - New in this version.
> > ---
> >  xen/arch/x86/domain.c | 58 +++++++++++++++++++++++++++++++++++--------
> >  xen/arch/x86/traps.c  |  2 --
> >  2 files changed, 48 insertions(+), 12 deletions(-)
> >
> > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> > index 78a13e6812c9..1f680bf176ee 100644
> > --- a/xen/arch/x86/domain.c
> > +++ b/xen/arch/x86/domain.c
> > @@ -1982,16 +1982,16 @@ static void load_default_gdt(unsigned int cpu)
> >      per_cpu(full_gdt_loaded, cpu) = false;
> >  }
> >  
> > -static void __context_switch(void)
> > +static void __context_switch(struct vcpu *n)
> >  {
> >      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
> >      unsigned int          cpu = smp_processor_id();
> >      struct vcpu          *p = per_cpu(curr_vcpu, cpu);
> > -    struct vcpu          *n = current;
> >      struct domain        *pd = p->domain, *nd = n->domain;
> >  
> >      ASSERT(p != n);
> >      ASSERT(!vcpu_cpu_dirty(n));
> > +    ASSERT(p == current);
> >  
> >      if ( !is_idle_domain(pd) )
> >      {
> > @@ -2036,6 +2036,18 @@ static void __context_switch(void)
> >  
> >      write_ptbase(n);
> >  
> > +    /*
> > +     * It's relevant to set both current and curr_vcpu back-to-back, to avoid a
> > +     * window where calls to mapcache_current_vcpu() during the context switch
> > +     * could trigger a recursive loop.
> > +     *
> > +     * Do the current switch immediately after switching to the new guest
> > +     * page-tables, so that current is (almost) always in sync with the
> > +     * currently loaded page-tables.
> > +     */
> > +    set_current(n);
> > +    per_cpu(curr_vcpu, cpu) = n;
> 
> ... here. So we're not tempted to move these 2 far off from write_ptbase().

I think the "Do the current switch immediately after switching to the
new guest page-tables" sentence already signals that it's important to
keep the setting of current and curr_vcpu as close to the
write_ptbase() call as possible, but I'm open to suggestions for
better wording.

> > +
> >  #ifdef CONFIG_PV
> >      /* Prefetch the VMCB if we expect to use it later in the context switch */
> >      if ( using_svm() && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
> > @@ -2048,8 +2060,6 @@ static void __context_switch(void)
> >      if ( pd != nd )
> >          cpumask_clear_cpu(cpu, pd->dirty_cpumask);
> >      write_atomic(&p->dirty_cpu, VCPU_CPU_CLEAN);
> > -
> > -    per_cpu(curr_vcpu, cpu) = n;
> >  }
> >  
> >  void context_switch(struct vcpu *prev, struct vcpu *next)
> > @@ -2081,16 +2091,36 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
> >  
> >      local_irq_disable();
> >  
> > -    set_current(next);
> > -
> >      if ( (per_cpu(curr_vcpu, cpu) == next) ||
> >           (is_idle_domain(nextd) && cpu_online(cpu)) )
> >      {
> > +        /*
> > +         * Lazy context switch to the idle vCPU, set current == idle.  Full
> > +         * context switch happens if/when sync_local_execstate() is called.
> > +         */
> > +        set_current(next);
> >          local_irq_enable();
> >      }
> >      else
> >      {
> > -        __context_switch();
> > +        /*
> > +         * curr_vcpu will always point to the currently loaded vCPU context, as
> 
> nit: s/will always point/always points/ ? It's an inconditional invariant,
> after all.

Sure.

Thanks, Roger.