[PATCH V2 2/3] Fix: CPUs presence logic in _STA for x86 backward compatability

Salil Mehta via posted 3 patches 2 weeks ago
[PATCH V2 2/3] Fix: CPUs presence logic in _STA for x86 backward compatability
Posted by Salil Mehta via 2 weeks ago
Checking `is_present` first can break x86 migration from new Qemu
version to old Qemu. This is because CPRS Bit is not defined in the
older Qemu register block and will always be 0 resulting in check always
failing. Remove CPU_PRESENT Bit to preserve older ABI.

-                If ((\_SB.PCI0.PRES.CPEN == One))
-                {
-                    Local0 = 0x0F
+                If ((\_SB.PCI0.PRES.CPRS == One))
+                {
+                    If ((\_SB.PCI0.PRES.CPEN == One))
+                    {
+                        Local0 = 0x0F
+                    }
+                    Else
+                    {
+                        Local0 = 0x0D
+                    }
                 }

Reported-by: Igor Mammedov <imammedo@redhat.com>
Message-ID: <20241106100047.18901c9d@imammedo.users.ipa.redhat.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
---
 hw/acpi/cpu.c         | 49 ++++++++++++++++---------------------------
 include/hw/acpi/cpu.h |  1 +
 include/hw/core/cpu.h |  1 -
 3 files changed, 19 insertions(+), 32 deletions(-)

diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 23443f09a5..7b0badd08a 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -23,6 +23,8 @@ enum {
     CPHP_CMD_MAX
 };
 
+static bool always_present_cpus;
+
 static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
 {
     ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
@@ -79,7 +81,6 @@ static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
         val |= cdev->is_inserting ? 2 : 0;
         val |= cdev->is_removing  ? 4 : 0;
         val |= cdev->fw_remove  ? 16 : 0;
-        val |= cdev->cpu ? 32 : 0;
         trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
         break;
     case ACPI_CPU_CMD_DATA_OFFSET_RW:
@@ -246,15 +247,9 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
     memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
 }
 
-static bool should_remain_acpi_present(DeviceState *dev)
+static inline bool should_remain_acpi_present(DeviceState *dev)
 {
-    CPUClass *k = CPU_GET_CLASS(dev);
-    /*
-     * A system may contain CPUs that are always present on one die, NUMA node,
-     * or socket, yet may be non-present on another simultaneously. Check from
-     * architecture specific code.
-     */
-    return k->cpu_persistent_status && k->cpu_persistent_status(CPU(dev));
+    return always_present_cpus;
 }
 
 static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
@@ -362,7 +357,6 @@ const VMStateDescription vmstate_cpu_hotplug = {
 #define CPU_REMOVE_EVENT  "CRMV"
 #define CPU_EJECT_EVENT   "CEJ0"
 #define CPU_FW_EJECT_EVENT "CEJF"
-#define CPU_PRESENT       "CPRS"
 
 void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
                     build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
@@ -423,9 +417,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
         aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
         /* tell firmware to do device eject, write only */
         aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1));
-        /* 1 if present, read only */
-        aml_append(field, aml_named_field(CPU_PRESENT, 1));
-        aml_append(field, aml_reserved_field(2));
+        aml_append(field, aml_reserved_field(3));
         aml_append(field, aml_named_field(CPU_COMMAND, 8));
         aml_append(cpu_ctrl_dev, field);
 
@@ -455,7 +447,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
         Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
         Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
         Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
-        Aml *is_present = aml_name("%s.%s", cphp_res_path, CPU_PRESENT);
         Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
         Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
         Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
@@ -482,28 +473,24 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
 
         method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
         {
-            Aml *idx = aml_arg(0);
+            Aml *default_sta = aml_int(opts.always_present_cpus ? 0xD : 0);
             Aml *sta = aml_local(0);
-            Aml *ifctx2;
-            Aml *else_ctx;
+            Aml *idx = aml_arg(0);
+
+            always_present_cpus = opts.always_present_cpus;
 
             aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
             aml_append(method, aml_store(idx, cpu_selector));
-            aml_append(method, aml_store(zero, sta));
-            ifctx = aml_if(aml_equal(is_present, one));
+            /*
+             * Default case:
+             * _STA(0xD) = cpu is present and disabled OR
+             * _STA(0x0) = cpu is not present (and hence disabled)
+             */
+            aml_append(method, aml_store(default_sta, sta));
+            ifctx = aml_if(aml_equal(is_enabled, one));
             {
-                ifctx2 = aml_if(aml_equal(is_enabled, one));
-                {
-                    /* cpu is present and enabled */
-                    aml_append(ifctx2, aml_store(aml_int(0xF), sta));
-                }
-                aml_append(ifctx, ifctx2);
-                else_ctx = aml_else();
-                {
-                    /* cpu is present but disabled */
-                    aml_append(else_ctx, aml_store(aml_int(0xD), sta));
-                }
-                aml_append(ifctx, else_ctx);
+                /* _STA(0xF) = cpu is present and enabled */
+                aml_append(ifctx, aml_or(aml_int(0xF), sta, sta));
             }
             aml_append(method, ifctx);
             aml_append(method, aml_release(ctrl_lock));
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index 32654dc274..4a3e591120 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -55,6 +55,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
 typedef struct CPUHotplugFeatures {
     bool acpi_1_compatible;
     bool has_legacy_cphp;
+    bool always_present_cpus;
     bool fw_unplugs_cpu;
     const char *smi_path;
 } CPUHotplugFeatures;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index db8a6fbc6e..e936b67675 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -158,7 +158,6 @@ struct CPUClass {
     void (*dump_state)(CPUState *cpu, FILE *, int flags);
     void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
     int64_t (*get_arch_id)(CPUState *cpu);
-    bool (*cpu_persistent_status)(CPUState *cpu);
     bool (*cpu_enabled_status)(CPUState *cpu);
     void (*set_pc)(CPUState *cpu, vaddr value);
     vaddr (*get_pc)(CPUState *cpu);
-- 
2.34.1
Re: [PATCH V2 2/3] Fix: CPUs presence logic in _STA for x86 backward compatability
Posted by Igor Mammedov 1 week, 5 days ago
On Sat, 9 Nov 2024 00:07:27 +0000
Salil Mehta <salil.mehta@huawei.com> wrote:

> Checking `is_present` first can break x86 migration from new Qemu
> version to old Qemu. This is because CPRS Bit is not defined in the
> older Qemu register block and will always be 0 resulting in check always
> failing. Remove CPU_PRESENT Bit to preserve older ABI.
> 
> -                If ((\_SB.PCI0.PRES.CPEN == One))
> -                {
> -                    Local0 = 0x0F
> +                If ((\_SB.PCI0.PRES.CPRS == One))
> +                {
> +                    If ((\_SB.PCI0.PRES.CPEN == One))
> +                    {
> +                        Local0 = 0x0F
> +                    }
> +                    Else
> +                    {
> +                        Local0 = 0x0D
> +                    }
>                  }
> 
> Reported-by: Igor Mammedov <imammedo@redhat.com>
> Message-ID: <20241106100047.18901c9d@imammedo.users.ipa.redhat.com>
> Signed-off-by: Salil Mehta <salil.mehta@huawei.com>

AML parts looks fine modulo global nit.

My recommendation is to revert both patches for 9.2,
as that would fix the broken hotplug and return code to original
state. And then repost them (amended) as part of ARM cpuhp series
to have cleaner history instead of spaghetti of fixups/refactoring commits.

See other comments below.


> ---
>  hw/acpi/cpu.c         | 49 ++++++++++++++++---------------------------
>  include/hw/acpi/cpu.h |  1 +
>  include/hw/core/cpu.h |  1 -
>  3 files changed, 19 insertions(+), 32 deletions(-)
> 
> diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
> index 23443f09a5..7b0badd08a 100644
> --- a/hw/acpi/cpu.c
> +++ b/hw/acpi/cpu.c
> @@ -23,6 +23,8 @@ enum {
>      CPHP_CMD_MAX
>  };
>  
> +static bool always_present_cpus;
> +
>  static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
>  {
>      ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
> @@ -79,7 +81,6 @@ static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
>          val |= cdev->is_inserting ? 2 : 0;
>          val |= cdev->is_removing  ? 4 : 0;
>          val |= cdev->fw_remove  ? 16 : 0;
> -        val |= cdev->cpu ? 32 : 0;
>          trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
>          break;
>      case ACPI_CPU_CMD_DATA_OFFSET_RW:
> @@ -246,15 +247,9 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
>      memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
>  }
>  
> -static bool should_remain_acpi_present(DeviceState *dev)
> +static inline bool should_remain_acpi_present(DeviceState *dev)
>  {
> -    CPUClass *k = CPU_GET_CLASS(dev);
> -    /*
> -     * A system may contain CPUs that are always present on one die, NUMA node,
> -     * or socket, yet may be non-present on another simultaneously. Check from
> -     * architecture specific code.
> -     */
> -    return k->cpu_persistent_status && k->cpu_persistent_status(CPU(dev));
> +    return always_present_cpus;
>  }

instead of using global, we should pass parameter to

 void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,                        
                         CPUHotplugState *state, hwaddr base_addr, bool always_present_cpus)
                                                                   ^^^^  

and store it in 'CPUHotplugState *state::always_present_cpus',
Then caller would supply it to the function depending on configuration
(a new property in GED to opt-in in the feature, and hardcoded 'false' for x86 call sites)
 

Also this does leave around cpu_enabled_status() callback,
instead of adding callbacks we should just use
  object_property_get_bool(OBJECT(cdev->cpu), 'realized')
to do the same (i.e. what v6 was hiding behind callbacks).

That is still correct for x86, it might be different for ARM,
like a dedicated property 'enabled' depending on where
discussion leads us, but that is the topic for discussion
with ARM cpuhp patches.

The point is, it's stray arm cpuhp patch and not needed in 9.2.
It's better to have 2d6cfbaf1 and bf1ecc8dad6 reverted for 9.2 and
then ARM cpuhp will bring them back to the clean 9.1 codebase,
instead of having a mess of not necessary right now broken patches +
fixups to bring them to desired state.

>  static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
> @@ -362,7 +357,6 @@ const VMStateDescription vmstate_cpu_hotplug = {
>  #define CPU_REMOVE_EVENT  "CRMV"
>  #define CPU_EJECT_EVENT   "CEJ0"
>  #define CPU_FW_EJECT_EVENT "CEJF"
> -#define CPU_PRESENT       "CPRS"
>  
>  void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>                      build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
> @@ -423,9 +417,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>          aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
>          /* tell firmware to do device eject, write only */
>          aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1));
> -        /* 1 if present, read only */
> -        aml_append(field, aml_named_field(CPU_PRESENT, 1));
> -        aml_append(field, aml_reserved_field(2));
> +        aml_append(field, aml_reserved_field(3));
>          aml_append(field, aml_named_field(CPU_COMMAND, 8));
>          aml_append(cpu_ctrl_dev, field);
>  
> @@ -455,7 +447,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>          Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
>          Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
>          Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
> -        Aml *is_present = aml_name("%s.%s", cphp_res_path, CPU_PRESENT);
>          Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
>          Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
>          Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
> @@ -482,28 +473,24 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>  
>          method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
>          {
> -            Aml *idx = aml_arg(0);
> +            Aml *default_sta = aml_int(opts.always_present_cpus ? 0xD : 0);
>              Aml *sta = aml_local(0);
> -            Aml *ifctx2;
> -            Aml *else_ctx;
> +            Aml *idx = aml_arg(0);
> +
> +            always_present_cpus = opts.always_present_cpus;

opts argument is meant to configure AML generator,
so I'd rather not overload it with ACPI device configuration
and introduction of a global var.  

>  
>              aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
>              aml_append(method, aml_store(idx, cpu_selector));
> -            aml_append(method, aml_store(zero, sta));
> -            ifctx = aml_if(aml_equal(is_present, one));
> +            /*
> +             * Default case:
> +             * _STA(0xD) = cpu is present and disabled OR
> +             * _STA(0x0) = cpu is not present (and hence disabled)
> +             */
> +            aml_append(method, aml_store(default_sta, sta));
> +            ifctx = aml_if(aml_equal(is_enabled, one));
>              {
> -                ifctx2 = aml_if(aml_equal(is_enabled, one));
> -                {
> -                    /* cpu is present and enabled */
> -                    aml_append(ifctx2, aml_store(aml_int(0xF), sta));
> -                }
> -                aml_append(ifctx, ifctx2);
> -                else_ctx = aml_else();
> -                {
> -                    /* cpu is present but disabled */
> -                    aml_append(else_ctx, aml_store(aml_int(0xD), sta));
> -                }
> -                aml_append(ifctx, else_ctx);
> +                /* _STA(0xF) = cpu is present and enabled */
> +                aml_append(ifctx, aml_or(aml_int(0xF), sta, sta));
>              }
>              aml_append(method, ifctx);
>              aml_append(method, aml_release(ctrl_lock));
> diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
> index 32654dc274..4a3e591120 100644
> --- a/include/hw/acpi/cpu.h
> +++ b/include/hw/acpi/cpu.h
> @@ -55,6 +55,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
>  typedef struct CPUHotplugFeatures {
>      bool acpi_1_compatible;
>      bool has_legacy_cphp;
> +    bool always_present_cpus;
>      bool fw_unplugs_cpu;
>      const char *smi_path;
>  } CPUHotplugFeatures;
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index db8a6fbc6e..e936b67675 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -158,7 +158,6 @@ struct CPUClass {
>      void (*dump_state)(CPUState *cpu, FILE *, int flags);
>      void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
>      int64_t (*get_arch_id)(CPUState *cpu);
> -    bool (*cpu_persistent_status)(CPUState *cpu);
>      bool (*cpu_enabled_status)(CPUState *cpu);
>      void (*set_pc)(CPUState *cpu, vaddr value);
>      vaddr (*get_pc)(CPUState *cpu);