[PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline

Andrew Cooper posted 4 patches 1 week, 1 day ago
[PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline
Posted by Andrew Cooper 1 week, 1 day ago
Use asm goto rather than hiding a memset() in the fixup section.  With the
compiler now able to see the write into fpu_ctxt (as opposed to the asm
constraint erroneously stating it as input-only), it validly objects to the
pointer being const.

While FXRSTOR oughtn't to fault on an all-zeros input, avoid a risk of an
infinite loop entirely by using a fixup scheme similar to xrstor(), and
crashing the domain if we run out options.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/i387.c | 65 ++++++++++++++++++++-------------------------
 1 file changed, 29 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index b84cd6f7a9e1..e0714ab2267d 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -38,7 +38,8 @@ static inline void fpu_xrstor(struct vcpu *v, uint64_t mask)
 /* Restore x87 FPU, MMX, SSE and SSE2 state */
 static inline void fpu_fxrstor(struct vcpu *v)
 {
-    const fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
+    fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
+    unsigned int faults = 0;
 
     /*
      * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
@@ -59,49 +60,41 @@ static inline void fpu_fxrstor(struct vcpu *v)
      * possibility, which may occur if the block was passed to us by control
      * tools or through VCPUOP_initialise, by silently clearing the block.
      */
+ retry:
     switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
     {
     default:
-        asm_inline volatile (
+        asm_inline volatile goto (
             "1: fxrstorq %0\n"
-            ".section .fixup,\"ax\"   \n"
-            "2: push %%"__OP"ax       \n"
-            "   push %%"__OP"cx       \n"
-            "   push %%"__OP"di       \n"
-            "   lea  %0,%%"__OP"di    \n"
-            "   mov  %1,%%ecx         \n"
-            "   xor  %%eax,%%eax      \n"
-            "   rep ; stosl           \n"
-            "   pop  %%"__OP"di       \n"
-            "   pop  %%"__OP"cx       \n"
-            "   pop  %%"__OP"ax       \n"
-            "   jmp  1b               \n"
-            ".previous                \n"
-            _ASM_EXTABLE(1b, 2b)
-            :
-            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
+            _ASM_EXTABLE(1b, %l[fault])
+            :: "m" (*fpu_ctxt)
+            :: fault );
         break;
+
     case 4: case 2:
-        asm_inline volatile (
-            "1: fxrstor %0         \n"
-            ".section .fixup,\"ax\"\n"
-            "2: push %%"__OP"ax    \n"
-            "   push %%"__OP"cx    \n"
-            "   push %%"__OP"di    \n"
-            "   lea  %0,%%"__OP"di \n"
-            "   mov  %1,%%ecx      \n"
-            "   xor  %%eax,%%eax   \n"
-            "   rep ; stosl        \n"
-            "   pop  %%"__OP"di    \n"
-            "   pop  %%"__OP"cx    \n"
-            "   pop  %%"__OP"ax    \n"
-            "   jmp  1b            \n"
-            ".previous             \n"
-            _ASM_EXTABLE(1b, 2b)
-            :
-            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
+        asm_inline volatile goto (
+            "1: fxrstor %0\n"
+            _ASM_EXTABLE(1b, %l[fault])
+            :: "m" (*fpu_ctxt)
+            :: fault );
         break;
     }
+
+    return;
+
+ fault:
+    faults++;
+
+    switch ( faults )
+    {
+    case 1: /* Stage 1: Reset all state. */
+        memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
+        goto retry;
+
+    default: /* Stage 2: Nothing else to do. */
+        domain_crash(v->domain, "Uncorrectable FXRSTOR fault\n");
+        return;
+    }
 }
 
 /*******************************/
-- 
2.39.5


Re: [PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline
Posted by Jan Beulich 2 days, 20 hours ago
On 30.12.2025 14:54, Andrew Cooper wrote:
> Use asm goto rather than hiding a memset() in the fixup section.  With the
> compiler now able to see the write into fpu_ctxt (as opposed to the asm
> constraint erroneously stating it as input-only), it validly objects to the
> pointer being const.
> 
> While FXRSTOR oughtn't to fault on an all-zeros input, avoid a risk of an
> infinite loop entirely by using a fixup scheme similar to xrstor(), and
> crashing the domain if we run out options.

Question being - does ...

> --- a/xen/arch/x86/i387.c
> +++ b/xen/arch/x86/i387.c
> @@ -38,7 +38,8 @@ static inline void fpu_xrstor(struct vcpu *v, uint64_t mask)
>  /* Restore x87 FPU, MMX, SSE and SSE2 state */
>  static inline void fpu_fxrstor(struct vcpu *v)
>  {
> -    const fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
> +    fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
> +    unsigned int faults = 0;
>  
>      /*
>       * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
> @@ -59,49 +60,41 @@ static inline void fpu_fxrstor(struct vcpu *v)
>       * possibility, which may occur if the block was passed to us by control
>       * tools or through VCPUOP_initialise, by silently clearing the block.
>       */
> + retry:
>      switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
>      {
>      default:
> -        asm_inline volatile (
> +        asm_inline volatile goto (
>              "1: fxrstorq %0\n"
> -            ".section .fixup,\"ax\"   \n"
> -            "2: push %%"__OP"ax       \n"
> -            "   push %%"__OP"cx       \n"
> -            "   push %%"__OP"di       \n"
> -            "   lea  %0,%%"__OP"di    \n"
> -            "   mov  %1,%%ecx         \n"
> -            "   xor  %%eax,%%eax      \n"
> -            "   rep ; stosl           \n"
> -            "   pop  %%"__OP"di       \n"
> -            "   pop  %%"__OP"cx       \n"
> -            "   pop  %%"__OP"ax       \n"
> -            "   jmp  1b               \n"
> -            ".previous                \n"
> -            _ASM_EXTABLE(1b, 2b)
> -            :
> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
> +            _ASM_EXTABLE(1b, %l[fault])
> +            :: "m" (*fpu_ctxt)
> +            :: fault );
>          break;
> +
>      case 4: case 2:
> -        asm_inline volatile (
> -            "1: fxrstor %0         \n"
> -            ".section .fixup,\"ax\"\n"
> -            "2: push %%"__OP"ax    \n"
> -            "   push %%"__OP"cx    \n"
> -            "   push %%"__OP"di    \n"
> -            "   lea  %0,%%"__OP"di \n"
> -            "   mov  %1,%%ecx      \n"
> -            "   xor  %%eax,%%eax   \n"
> -            "   rep ; stosl        \n"
> -            "   pop  %%"__OP"di    \n"
> -            "   pop  %%"__OP"cx    \n"
> -            "   pop  %%"__OP"ax    \n"
> -            "   jmp  1b            \n"
> -            ".previous             \n"
> -            _ASM_EXTABLE(1b, 2b)
> -            :
> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
> +        asm_inline volatile goto (
> +            "1: fxrstor %0\n"
> +            _ASM_EXTABLE(1b, %l[fault])
> +            :: "m" (*fpu_ctxt)
> +            :: fault );
>          break;
>      }
> +
> +    return;
> +
> + fault:
> +    faults++;
> +
> +    switch ( faults )
> +    {
> +    case 1: /* Stage 1: Reset all state. */
> +        memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
> +        goto retry;
> +
> +    default: /* Stage 2: Nothing else to do. */
> +        domain_crash(v->domain, "Uncorrectable FXRSTOR fault\n");
> +        return;

... this then count as unreachable and/or dead code in Misra's terms? Nicola?
Sure, Eclair wouldn't be able to spot it, but that's no excuse imo.

Jan
Re: [PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline
Posted by Nicola Vetrini 2 days, 20 hours ago
On 2026-01-05 16:52, Jan Beulich wrote:
> On 30.12.2025 14:54, Andrew Cooper wrote:
>> Use asm goto rather than hiding a memset() in the fixup section.  With 
>> the
>> compiler now able to see the write into fpu_ctxt (as opposed to the 
>> asm
>> constraint erroneously stating it as input-only), it validly objects 
>> to the
>> pointer being const.
>> 
>> While FXRSTOR oughtn't to fault on an all-zeros input, avoid a risk of 
>> an
>> infinite loop entirely by using a fixup scheme similar to xrstor(), 
>> and
>> crashing the domain if we run out options.
> 
> Question being - does ...
> 
>> --- a/xen/arch/x86/i387.c
>> +++ b/xen/arch/x86/i387.c
>> @@ -38,7 +38,8 @@ static inline void fpu_xrstor(struct vcpu *v, 
>> uint64_t mask)
>>  /* Restore x87 FPU, MMX, SSE and SSE2 state */
>>  static inline void fpu_fxrstor(struct vcpu *v)
>>  {
>> -    const fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>> +    fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>> +    unsigned int faults = 0;
>> 
>>      /*
>>       * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
>> @@ -59,49 +60,41 @@ static inline void fpu_fxrstor(struct vcpu *v)
>>       * possibility, which may occur if the block was passed to us by 
>> control
>>       * tools or through VCPUOP_initialise, by silently clearing the 
>> block.
>>       */
>> + retry:
>>      switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
>>      {
>>      default:
>> -        asm_inline volatile (
>> +        asm_inline volatile goto (
>>              "1: fxrstorq %0\n"
>> -            ".section .fixup,\"ax\"   \n"
>> -            "2: push %%"__OP"ax       \n"
>> -            "   push %%"__OP"cx       \n"
>> -            "   push %%"__OP"di       \n"
>> -            "   lea  %0,%%"__OP"di    \n"
>> -            "   mov  %1,%%ecx         \n"
>> -            "   xor  %%eax,%%eax      \n"
>> -            "   rep ; stosl           \n"
>> -            "   pop  %%"__OP"di       \n"
>> -            "   pop  %%"__OP"cx       \n"
>> -            "   pop  %%"__OP"ax       \n"
>> -            "   jmp  1b               \n"
>> -            ".previous                \n"
>> -            _ASM_EXTABLE(1b, 2b)
>> -            :
>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>> +            _ASM_EXTABLE(1b, %l[fault])
>> +            :: "m" (*fpu_ctxt)
>> +            :: fault );
>>          break;
>> +
>>      case 4: case 2:
>> -        asm_inline volatile (
>> -            "1: fxrstor %0         \n"
>> -            ".section .fixup,\"ax\"\n"
>> -            "2: push %%"__OP"ax    \n"
>> -            "   push %%"__OP"cx    \n"
>> -            "   push %%"__OP"di    \n"
>> -            "   lea  %0,%%"__OP"di \n"
>> -            "   mov  %1,%%ecx      \n"
>> -            "   xor  %%eax,%%eax   \n"
>> -            "   rep ; stosl        \n"
>> -            "   pop  %%"__OP"di    \n"
>> -            "   pop  %%"__OP"cx    \n"
>> -            "   pop  %%"__OP"ax    \n"
>> -            "   jmp  1b            \n"
>> -            ".previous             \n"
>> -            _ASM_EXTABLE(1b, 2b)
>> -            :
>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>> +        asm_inline volatile goto (
>> +            "1: fxrstor %0\n"
>> +            _ASM_EXTABLE(1b, %l[fault])
>> +            :: "m" (*fpu_ctxt)
>> +            :: fault );
>>          break;
>>      }
>> +
>> +    return;
>> +
>> + fault:
>> +    faults++;
>> +
>> +    switch ( faults )
>> +    {
>> +    case 1: /* Stage 1: Reset all state. */
>> +        memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
>> +        goto retry;
>> +
>> +    default: /* Stage 2: Nothing else to do. */
>> +        domain_crash(v->domain, "Uncorrectable FXRSTOR fault\n");
>> +        return;
> 
> ... this then count as unreachable and/or dead code in Misra's terms? 
> Nicola?
> Sure, Eclair wouldn't be able to spot it, but that's no excuse imo.
> 
> Jan

Right now, probably not, but even if it did, an ASSERT_UNREACHABLE can 
be added in the default branch to deal with that.

-- 
Nicola Vetrini, B.Sc.
Software Engineer
BUGSENG (https://bugseng.com)
LinkedIn: https://www.linkedin.com/in/nicola-vetrini-a42471253
Re: [PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline
Posted by Andrew Cooper 2 days, 19 hours ago
On 05/01/2026 4:13 pm, Nicola Vetrini wrote:
> On 2026-01-05 16:52, Jan Beulich wrote:
>> On 30.12.2025 14:54, Andrew Cooper wrote:
>>> Use asm goto rather than hiding a memset() in the fixup section. 
>>> With the
>>> compiler now able to see the write into fpu_ctxt (as opposed to the asm
>>> constraint erroneously stating it as input-only), it validly objects
>>> to the
>>> pointer being const.
>>>
>>> While FXRSTOR oughtn't to fault on an all-zeros input, avoid a risk
>>> of an
>>> infinite loop entirely by using a fixup scheme similar to xrstor(), and
>>> crashing the domain if we run out options.
>>
>> Question being - does ...
>>
>>> --- a/xen/arch/x86/i387.c
>>> +++ b/xen/arch/x86/i387.c
>>> @@ -38,7 +38,8 @@ static inline void fpu_xrstor(struct vcpu *v,
>>> uint64_t mask)
>>>  /* Restore x87 FPU, MMX, SSE and SSE2 state */
>>>  static inline void fpu_fxrstor(struct vcpu *v)
>>>  {
>>> -    const fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>>> +    fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>>> +    unsigned int faults = 0;
>>>
>>>      /*
>>>       * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
>>> @@ -59,49 +60,41 @@ static inline void fpu_fxrstor(struct vcpu *v)
>>>       * possibility, which may occur if the block was passed to us
>>> by control
>>>       * tools or through VCPUOP_initialise, by silently clearing the
>>> block.
>>>       */
>>> + retry:
>>>      switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
>>>      {
>>>      default:
>>> -        asm_inline volatile (
>>> +        asm_inline volatile goto (
>>>              "1: fxrstorq %0\n"
>>> -            ".section .fixup,\"ax\"   \n"
>>> -            "2: push %%"__OP"ax       \n"
>>> -            "   push %%"__OP"cx       \n"
>>> -            "   push %%"__OP"di       \n"
>>> -            "   lea  %0,%%"__OP"di    \n"
>>> -            "   mov  %1,%%ecx         \n"
>>> -            "   xor  %%eax,%%eax      \n"
>>> -            "   rep ; stosl           \n"
>>> -            "   pop  %%"__OP"di       \n"
>>> -            "   pop  %%"__OP"cx       \n"
>>> -            "   pop  %%"__OP"ax       \n"
>>> -            "   jmp  1b               \n"
>>> -            ".previous                \n"
>>> -            _ASM_EXTABLE(1b, 2b)
>>> -            :
>>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>>> +            _ASM_EXTABLE(1b, %l[fault])
>>> +            :: "m" (*fpu_ctxt)
>>> +            :: fault );
>>>          break;
>>> +
>>>      case 4: case 2:
>>> -        asm_inline volatile (
>>> -            "1: fxrstor %0         \n"
>>> -            ".section .fixup,\"ax\"\n"
>>> -            "2: push %%"__OP"ax    \n"
>>> -            "   push %%"__OP"cx    \n"
>>> -            "   push %%"__OP"di    \n"
>>> -            "   lea  %0,%%"__OP"di \n"
>>> -            "   mov  %1,%%ecx      \n"
>>> -            "   xor  %%eax,%%eax   \n"
>>> -            "   rep ; stosl        \n"
>>> -            "   pop  %%"__OP"di    \n"
>>> -            "   pop  %%"__OP"cx    \n"
>>> -            "   pop  %%"__OP"ax    \n"
>>> -            "   jmp  1b            \n"
>>> -            ".previous             \n"
>>> -            _ASM_EXTABLE(1b, 2b)
>>> -            :
>>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>>> +        asm_inline volatile goto (
>>> +            "1: fxrstor %0\n"
>>> +            _ASM_EXTABLE(1b, %l[fault])
>>> +            :: "m" (*fpu_ctxt)
>>> +            :: fault );
>>>          break;
>>>      }
>>> +
>>> +    return;
>>> +
>>> + fault:
>>> +    faults++;
>>> +
>>> +    switch ( faults )
>>> +    {
>>> +    case 1: /* Stage 1: Reset all state. */
>>> +        memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
>>> +        goto retry;
>>> +
>>> +    default: /* Stage 2: Nothing else to do. */
>>> +        domain_crash(v->domain, "Uncorrectable FXRSTOR fault\n");
>>> +        return;
>>
>> ... this then count as unreachable and/or dead code in Misra's terms?
>> Nicola?
>> Sure, Eclair wouldn't be able to spot it, but that's no excuse imo.
>>
>> Jan
>
> Right now, probably not, but even if it did, an ASSERT_UNREACHABLE can
> be added in the default branch to deal with that.

It's fully reachable.

FXRSTOR can fault multiple times, and can fault for reasons unrelated to
the contents of the buffer.  Fault recovery isn't even limited to only
#GP[0] only, and FXRSTOR can suffer #AC from a misaligned pointer.

If Xen is operating properly, it oughtn't to fault more than once, but
right now the logic will livelock rather than terminate.

Further fixes being discussed (better auditing of toolstack-provided
buffers) should cause it never to fault for buffer-contents reasons, at
which point I'll be removing the retry aspect and escalating to
domain_crash() unconditionally.

~Andrew

Re: [PATCH 3/4] x86/i387: Rework fpu_fxrstor() given a newer toolchain baseline
Posted by Jan Beulich 2 days, 19 hours ago
On 05.01.2026 17:39, Andrew Cooper wrote:
> On 05/01/2026 4:13 pm, Nicola Vetrini wrote:
>> On 2026-01-05 16:52, Jan Beulich wrote:
>>> On 30.12.2025 14:54, Andrew Cooper wrote:
>>>> Use asm goto rather than hiding a memset() in the fixup section. 
>>>> With the
>>>> compiler now able to see the write into fpu_ctxt (as opposed to the asm
>>>> constraint erroneously stating it as input-only), it validly objects
>>>> to the
>>>> pointer being const.
>>>>
>>>> While FXRSTOR oughtn't to fault on an all-zeros input, avoid a risk
>>>> of an
>>>> infinite loop entirely by using a fixup scheme similar to xrstor(), and
>>>> crashing the domain if we run out options.
>>>
>>> Question being - does ...
>>>
>>>> --- a/xen/arch/x86/i387.c
>>>> +++ b/xen/arch/x86/i387.c
>>>> @@ -38,7 +38,8 @@ static inline void fpu_xrstor(struct vcpu *v,
>>>> uint64_t mask)
>>>>  /* Restore x87 FPU, MMX, SSE and SSE2 state */
>>>>  static inline void fpu_fxrstor(struct vcpu *v)
>>>>  {
>>>> -    const fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>>>> +    fpusse_t *fpu_ctxt = &v->arch.xsave_area->fpu_sse;
>>>> +    unsigned int faults = 0;
>>>>
>>>>      /*
>>>>       * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
>>>> @@ -59,49 +60,41 @@ static inline void fpu_fxrstor(struct vcpu *v)
>>>>       * possibility, which may occur if the block was passed to us
>>>> by control
>>>>       * tools or through VCPUOP_initialise, by silently clearing the
>>>> block.
>>>>       */
>>>> + retry:
>>>>      switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
>>>>      {
>>>>      default:
>>>> -        asm_inline volatile (
>>>> +        asm_inline volatile goto (
>>>>              "1: fxrstorq %0\n"
>>>> -            ".section .fixup,\"ax\"   \n"
>>>> -            "2: push %%"__OP"ax       \n"
>>>> -            "   push %%"__OP"cx       \n"
>>>> -            "   push %%"__OP"di       \n"
>>>> -            "   lea  %0,%%"__OP"di    \n"
>>>> -            "   mov  %1,%%ecx         \n"
>>>> -            "   xor  %%eax,%%eax      \n"
>>>> -            "   rep ; stosl           \n"
>>>> -            "   pop  %%"__OP"di       \n"
>>>> -            "   pop  %%"__OP"cx       \n"
>>>> -            "   pop  %%"__OP"ax       \n"
>>>> -            "   jmp  1b               \n"
>>>> -            ".previous                \n"
>>>> -            _ASM_EXTABLE(1b, 2b)
>>>> -            :
>>>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>>>> +            _ASM_EXTABLE(1b, %l[fault])
>>>> +            :: "m" (*fpu_ctxt)
>>>> +            :: fault );
>>>>          break;
>>>> +
>>>>      case 4: case 2:
>>>> -        asm_inline volatile (
>>>> -            "1: fxrstor %0         \n"
>>>> -            ".section .fixup,\"ax\"\n"
>>>> -            "2: push %%"__OP"ax    \n"
>>>> -            "   push %%"__OP"cx    \n"
>>>> -            "   push %%"__OP"di    \n"
>>>> -            "   lea  %0,%%"__OP"di \n"
>>>> -            "   mov  %1,%%ecx      \n"
>>>> -            "   xor  %%eax,%%eax   \n"
>>>> -            "   rep ; stosl        \n"
>>>> -            "   pop  %%"__OP"di    \n"
>>>> -            "   pop  %%"__OP"cx    \n"
>>>> -            "   pop  %%"__OP"ax    \n"
>>>> -            "   jmp  1b            \n"
>>>> -            ".previous             \n"
>>>> -            _ASM_EXTABLE(1b, 2b)
>>>> -            :
>>>> -            : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
>>>> +        asm_inline volatile goto (
>>>> +            "1: fxrstor %0\n"
>>>> +            _ASM_EXTABLE(1b, %l[fault])
>>>> +            :: "m" (*fpu_ctxt)
>>>> +            :: fault );
>>>>          break;
>>>>      }
>>>> +
>>>> +    return;
>>>> +
>>>> + fault:
>>>> +    faults++;
>>>> +
>>>> +    switch ( faults )
>>>> +    {
>>>> +    case 1: /* Stage 1: Reset all state. */
>>>> +        memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
>>>> +        goto retry;
>>>> +
>>>> +    default: /* Stage 2: Nothing else to do. */
>>>> +        domain_crash(v->domain, "Uncorrectable FXRSTOR fault\n");
>>>> +        return;
>>>
>>> ... this then count as unreachable and/or dead code in Misra's terms?
>>> Nicola?
>>> Sure, Eclair wouldn't be able to spot it, but that's no excuse imo.
>>
>> Right now, probably not, but even if it did, an ASSERT_UNREACHABLE can
>> be added in the default branch to deal with that.
> 
> It's fully reachable.
> 
> FXRSTOR can fault multiple times, and can fault for reasons unrelated to
> the contents of the buffer.  Fault recovery isn't even limited to only
> #GP[0] only, and FXRSTOR can suffer #AC from a misaligned pointer.

None of these faults are what we mean to recover from here. Faults
unrelated to buffer contents would pretty likely occur on the memset()
as well.

As to #AC - in ring 3, but not in ring 0 (where Xen runs)?

> If Xen is operating properly, it oughtn't to fault more than once, but
> right now the logic will livelock rather than terminate.

s/will/would/ as that's only hypothetical (assuming no other bugs).

> Further fixes being discussed (better auditing of toolstack-provided
> buffers) should cause it never to fault for buffer-contents reasons, at
> which point I'll be removing the retry aspect and escalating to
> domain_crash() unconditionally.

Still in the meantime I think Nicola's suggestion should be taken
and ASSERT_UNREACHABLE() be added. Then
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan