[PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank

Yazen Ghannam posted 22 patches 3 months, 2 weeks ago
There is a newer version of this series
[PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank
Posted by Yazen Ghannam 3 months, 2 weeks ago
The threshold_bank structure is a container for one or more
threshold_block structures. Currently, the container has a single
pointer to the 'first' threshold_block structure which then has a linked
list of the remaining threshold_block structures.

This results in an extra level of indirection where the 'first' block is
checked before iterating over the remaining blocks.

Remove the indirection by including the head of the block list in the
threshold_bank structure which already acts as a container for all the
bank's thresholding blocks.

Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
---

Notes:
    Link:
    https://lore.kernel.org/r/20250415-wip-mca-updates-v3-4-8ffd9eb4aa56@amd.com
    
    v3->v4:
    * No change.
    
    v2->v3:
    * Added tags from Qiuxu and Tony.
    
    v1->v2:
    * New in v2.

 arch/x86/kernel/cpu/mce/amd.c | 43 ++++++++++++-------------------------------
 1 file changed, 12 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 0ffbee329a8c..5d351ec863cd 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -241,7 +241,8 @@ struct threshold_block {
 
 struct threshold_bank {
 	struct kobject		*kobj;
-	struct threshold_block	*blocks;
+	/* List of threshold blocks within this MCA bank. */
+	struct list_head	miscj;
 };
 
 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
@@ -900,9 +901,9 @@ static void log_and_reset_block(struct threshold_block *block)
  */
 static void amd_threshold_interrupt(void)
 {
-	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
-	struct threshold_bank **bp = this_cpu_read(threshold_banks);
+	struct threshold_bank **bp = this_cpu_read(threshold_banks), *thr_bank;
 	unsigned int bank, cpu = smp_processor_id();
+	struct threshold_block *block, *tmp;
 
 	/*
 	 * Validate that the threshold bank has been initialized already. The
@@ -916,16 +917,11 @@ static void amd_threshold_interrupt(void)
 		if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
 			continue;
 
-		first_block = bp[bank]->blocks;
-		if (!first_block)
+		thr_bank = bp[bank];
+		if (!thr_bank)
 			continue;
 
-		/*
-		 * The first block is also the head of the list. Check it first
-		 * before iterating over the rest.
-		 */
-		log_and_reset_block(first_block);
-		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
+		list_for_each_entry_safe(block, tmp, &thr_bank->miscj, miscj)
 			log_and_reset_block(block);
 	}
 }
@@ -1151,13 +1147,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
 		default_attrs[2] = NULL;
 	}
 
-	INIT_LIST_HEAD(&b->miscj);
-
-	/* This is safe as @tb is not visible yet */
-	if (tb->blocks)
-		list_add(&b->miscj, &tb->blocks->miscj);
-	else
-		tb->blocks = b;
+	list_add(&b->miscj, &tb->miscj);
 
 	err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b));
 	if (err)
@@ -1208,6 +1198,8 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
 		goto out_free;
 	}
 
+	INIT_LIST_HEAD(&b->miscj);
+
 	err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
 	if (err)
 		goto out_kobj;
@@ -1228,26 +1220,15 @@ static void threshold_block_release(struct kobject *kobj)
 	kfree(to_block(kobj));
 }
 
-static void deallocate_threshold_blocks(struct threshold_bank *bank)
+static void threshold_remove_bank(struct threshold_bank *bank)
 {
 	struct threshold_block *pos, *tmp;
 
-	list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
+	list_for_each_entry_safe(pos, tmp, &bank->miscj, miscj) {
 		list_del(&pos->miscj);
 		kobject_put(&pos->kobj);
 	}
 
-	kobject_put(&bank->blocks->kobj);
-}
-
-static void threshold_remove_bank(struct threshold_bank *bank)
-{
-	if (!bank->blocks)
-		goto out_free;
-
-	deallocate_threshold_blocks(bank);
-
-out_free:
 	kobject_put(bank->kobj);
 	kfree(bank);
 }

-- 
2.49.0
Re: [PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank
Posted by Nikolay Borisov 3 months, 2 weeks ago

On 6/24/25 17:16, Yazen Ghannam wrote:
> The threshold_bank structure is a container for one or more
> threshold_block structures. Currently, the container has a single
> pointer to the 'first' threshold_block structure which then has a linked
> list of the remaining threshold_block structures.
> 
> This results in an extra level of indirection where the 'first' block is
> checked before iterating over the remaining blocks.
> 
> Remove the indirection by including the head of the block list in the
> threshold_bank structure which already acts as a container for all the
> bank's thresholding blocks.
> 
> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
> Tested-by: Tony Luck <tony.luck@intel.com>
> Reviewed-by: Tony Luck <tony.luck@intel.com>
> Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
> ---
> 
> Notes:
>      Link:
>      https://lore.kernel.org/r/20250415-wip-mca-updates-v3-4-8ffd9eb4aa56@amd.com
>      
>      v3->v4:
>      * No change.
>      
>      v2->v3:
>      * Added tags from Qiuxu and Tony.
>      
>      v1->v2:
>      * New in v2.
> 
>   arch/x86/kernel/cpu/mce/amd.c | 43 ++++++++++++-------------------------------
>   1 file changed, 12 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
> index 0ffbee329a8c..5d351ec863cd 100644
> --- a/arch/x86/kernel/cpu/mce/amd.c
> +++ b/arch/x86/kernel/cpu/mce/amd.c
> @@ -241,7 +241,8 @@ struct threshold_block {
>   
>   struct threshold_bank {
>   	struct kobject		*kobj;
> -	struct threshold_block	*blocks;
> +	/* List of threshold blocks within this MCA bank. */
> +	struct list_head	miscj;
>   };
>   
>   static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
> @@ -900,9 +901,9 @@ static void log_and_reset_block(struct threshold_block *block)
>    */
>   static void amd_threshold_interrupt(void)
>   {
> -	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
> -	struct threshold_bank **bp = this_cpu_read(threshold_banks);
> +	struct threshold_bank **bp = this_cpu_read(threshold_banks), *thr_bank;
>   	unsigned int bank, cpu = smp_processor_id();
> +	struct threshold_block *block, *tmp;
>   
>   	/*
>   	 * Validate that the threshold bank has been initialized already. The
> @@ -916,16 +917,11 @@ static void amd_threshold_interrupt(void)
>   		if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
>   			continue;

<slight off topic>

nit: I wonder if instead of using per_cpu and manual bit testing can't a direct
call to x86_this_cpu_test_bit be a better solution. The assembly looks like:

[OLD]

xorl    %r14d, %r14d    # ivtmp.245
movq    %rax, 8(%rsp)   # cpu, %sfp
# arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
movq    $bank_map, %rax #, __ptr
movq    %rax, (%rsp)    # __ptr, %sfp
.L236:
movq    8(%rsp), %rax   # %sfp, cpu
# arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
movq    (%rsp), %rsi    # %sfp, __ptr
# arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
movq    __per_cpu_offset(,%rax,8), %rax # __per_cpu_offset[cpu_23], __per_cpu_offset[cpu_23]
# arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
movq    (%rax,%rsi), %rax
# arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
btq %r14, %rax

[NEW]

xorl    %r15d, %r15d    # ivtmp.246
.L236:
# 917 "arch/x86/kernel/cpu/mce/amd.c" 1
btl %r15d, %gs:bank_map(%rip)   # ivtmp.246, *_9


That way you end up with a single btl (but I guess a version that uses btq should be added as well)
inside the loop rather than a bunch of instructions moving data around for per_cpu.

Alternatively, since this is running in interrupt context can't you use directly this_cpu_read(bank_map) and eliminate the smp_processor_id invocation?

</slight off topic>

>   

<snip>
Re: [PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank
Posted by Borislav Petkov 1 month, 2 weeks ago
On Wed, Jun 25, 2025 at 07:52:26PM +0300, Nikolay Borisov wrote:
> That way you end up with a single btl (but I guess a version that uses btq
> should be added as well) inside the loop rather than a bunch of instructions
> moving data around for per_cpu.

There's also this_cpu_ptr() etc.

You know how I always take patches, right?

:-)

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
Re: [PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank
Posted by Nikolay Borisov 3 months, 1 week ago

On 6/25/25 19:52, Nikolay Borisov wrote:
> 
> 
> On 6/24/25 17:16, Yazen Ghannam wrote:
>> The threshold_bank structure is a container for one or more
>> threshold_block structures. Currently, the container has a single
>> pointer to the 'first' threshold_block structure which then has a linked
>> list of the remaining threshold_block structures.
>>
>> This results in an extra level of indirection where the 'first' block is
>> checked before iterating over the remaining blocks.
>>
>> Remove the indirection by including the head of the block list in the
>> threshold_bank structure which already acts as a container for all the
>> bank's thresholding blocks.
>>
>> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
>> Tested-by: Tony Luck <tony.luck@intel.com>
>> Reviewed-by: Tony Luck <tony.luck@intel.com>
>> Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
>> ---
>>
>> Notes:
>>      Link:
>>      https://lore.kernel.org/r/20250415-wip-mca-updates- 
>> v3-4-8ffd9eb4aa56@amd.com
>>      v3->v4:
>>      * No change.
>>      v2->v3:
>>      * Added tags from Qiuxu and Tony.
>>      v1->v2:
>>      * New in v2.
>>
>>   arch/x86/kernel/cpu/mce/amd.c | 43 +++++++++++ 
>> +-------------------------------
>>   1 file changed, 12 insertions(+), 31 deletions(-)
>>
>> diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/ 
>> amd.c
>> index 0ffbee329a8c..5d351ec863cd 100644
>> --- a/arch/x86/kernel/cpu/mce/amd.c
>> +++ b/arch/x86/kernel/cpu/mce/amd.c
>> @@ -241,7 +241,8 @@ struct threshold_block {
>>   struct threshold_bank {
>>       struct kobject        *kobj;
>> -    struct threshold_block    *blocks;
>> +    /* List of threshold blocks within this MCA bank. */
>> +    struct list_head    miscj;
>>   };
>>   static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
>> @@ -900,9 +901,9 @@ static void log_and_reset_block(struct 
>> threshold_block *block)
>>    */
>>   static void amd_threshold_interrupt(void)
>>   {
>> -    struct threshold_block *first_block = NULL, *block = NULL, *tmp = 
>> NULL;
>> -    struct threshold_bank **bp = this_cpu_read(threshold_banks);
>> +    struct threshold_bank **bp = this_cpu_read(threshold_banks), 
>> *thr_bank;
>>       unsigned int bank, cpu = smp_processor_id();
>> +    struct threshold_block *block, *tmp;
>>       /*
>>        * Validate that the threshold bank has been initialized 
>> already. The
>> @@ -916,16 +917,11 @@ static void amd_threshold_interrupt(void)
>>           if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
>>               continue;
> 
> <slight off topic>
> 
> nit: I wonder if instead of using per_cpu and manual bit testing can't a 
> direct
> call to x86_this_cpu_test_bit be a better solution. The assembly looks 
> like:
> 
> [OLD]
> 
> xorl    %r14d, %r14d    # ivtmp.245
> movq    %rax, 8(%rsp)   # cpu, %sfp
> # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) 
> & BIT_ULL(bank)))
> movq    $bank_map, %rax #, __ptr
> movq    %rax, (%rsp)    # __ptr, %sfp
> .L236:
> movq    8(%rsp), %rax   # %sfp, cpu
> # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) 
> & BIT_ULL(bank)))
> movq    (%rsp), %rsi    # %sfp, __ptr
> # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) 
> & BIT_ULL(bank)))
> movq    __per_cpu_offset(,%rax,8), %rax # __per_cpu_offset[cpu_23], 
> __per_cpu_offset[cpu_23]
> # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) 
> & BIT_ULL(bank)))
> movq    (%rax,%rsi), %rax
> # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu) 
> & BIT_ULL(bank)))
> btq %r14, %rax
> 
> [NEW]
> 
> xorl    %r15d, %r15d    # ivtmp.246
> .L236:
> # 917 "arch/x86/kernel/cpu/mce/amd.c" 1
> btl %r15d, %gs:bank_map(%rip)   # ivtmp.246, *_9
> 
> 
> That way you end up with a single btl (but I guess a version that uses 
> btq should be added as well)
> inside the loop rather than a bunch of instructions moving data around 
> for per_cpu.
> 
> Alternatively, since this is running in interrupt context can't you use 
> directly this_cpu_read(bank_map) and eliminate the smp_processor_id 
> invocation?


Actually the total number of banks are at most 128 as per the layout of 
MCG_CAP register, so using btl is fine. Also I'm not sure why the 
original code uses BIT_ULL vs just BIT since we can't have a 64bit value.


> 
> </slight off topic>
> 
> 
> <snip>
> 

Re: [PATCH v4 08/22] x86/mce/amd: Put list_head in threshold_bank
Posted by Yazen Ghannam 3 months, 1 week ago
On Fri, Jun 27, 2025 at 02:14:40PM +0300, Nikolay Borisov wrote:
> 
> 
> On 6/25/25 19:52, Nikolay Borisov wrote:
> > 
> > 
> > On 6/24/25 17:16, Yazen Ghannam wrote:
> > > The threshold_bank structure is a container for one or more
> > > threshold_block structures. Currently, the container has a single
> > > pointer to the 'first' threshold_block structure which then has a linked
> > > list of the remaining threshold_block structures.
> > > 
> > > This results in an extra level of indirection where the 'first' block is
> > > checked before iterating over the remaining blocks.
> > > 
> > > Remove the indirection by including the head of the block list in the
> > > threshold_bank structure which already acts as a container for all the
> > > bank's thresholding blocks.
> > > 
> > > Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
> > > Tested-by: Tony Luck <tony.luck@intel.com>
> > > Reviewed-by: Tony Luck <tony.luck@intel.com>
> > > Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
> > > ---
> > > 
> > > Notes:
> > >      Link:
> > >      https://lore.kernel.org/r/20250415-wip-mca-updates-
> > > v3-4-8ffd9eb4aa56@amd.com
> > >      v3->v4:
> > >      * No change.
> > >      v2->v3:
> > >      * Added tags from Qiuxu and Tony.
> > >      v1->v2:
> > >      * New in v2.
> > > 
> > >   arch/x86/kernel/cpu/mce/amd.c | 43 +++++++++++
> > > +-------------------------------
> > >   1 file changed, 12 insertions(+), 31 deletions(-)
> > > 
> > > diff --git a/arch/x86/kernel/cpu/mce/amd.c
> > > b/arch/x86/kernel/cpu/mce/ amd.c
> > > index 0ffbee329a8c..5d351ec863cd 100644
> > > --- a/arch/x86/kernel/cpu/mce/amd.c
> > > +++ b/arch/x86/kernel/cpu/mce/amd.c
> > > @@ -241,7 +241,8 @@ struct threshold_block {
> > >   struct threshold_bank {
> > >       struct kobject        *kobj;
> > > -    struct threshold_block    *blocks;
> > > +    /* List of threshold blocks within this MCA bank. */
> > > +    struct list_head    miscj;
> > >   };
> > >   static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
> > > @@ -900,9 +901,9 @@ static void log_and_reset_block(struct
> > > threshold_block *block)
> > >    */
> > >   static void amd_threshold_interrupt(void)
> > >   {
> > > -    struct threshold_block *first_block = NULL, *block = NULL, *tmp
> > > = NULL;
> > > -    struct threshold_bank **bp = this_cpu_read(threshold_banks);
> > > +    struct threshold_bank **bp = this_cpu_read(threshold_banks),
> > > *thr_bank;
> > >       unsigned int bank, cpu = smp_processor_id();
> > > +    struct threshold_block *block, *tmp;
> > >       /*
> > >        * Validate that the threshold bank has been initialized
> > > already. The
> > > @@ -916,16 +917,11 @@ static void amd_threshold_interrupt(void)
> > >           if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
> > >               continue;
> > 
> > <slight off topic>
> > 
> > nit: I wonder if instead of using per_cpu and manual bit testing can't a
> > direct
> > call to x86_this_cpu_test_bit be a better solution. The assembly looks
> > like:
> > 
> > [OLD]
> > 
> > xorl    %r14d, %r14d    # ivtmp.245
> > movq    %rax, 8(%rsp)   # cpu, %sfp
> > # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu)
> > & BIT_ULL(bank)))
> > movq    $bank_map, %rax #, __ptr
> > movq    %rax, (%rsp)    # __ptr, %sfp
> > .L236:
> > movq    8(%rsp), %rax   # %sfp, cpu
> > # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu)
> > & BIT_ULL(bank)))
> > movq    (%rsp), %rsi    # %sfp, __ptr
> > # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu)
> > & BIT_ULL(bank)))
> > movq    __per_cpu_offset(,%rax,8), %rax # __per_cpu_offset[cpu_23],
> > __per_cpu_offset[cpu_23]
> > # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu)
> > & BIT_ULL(bank)))
> > movq    (%rax,%rsi), %rax
> > # arch/x86/kernel/cpu/mce/amd.c:917:        if (!(per_cpu(bank_map, cpu)
> > & BIT_ULL(bank)))
> > btq %r14, %rax
> > 
> > [NEW]
> > 
> > xorl    %r15d, %r15d    # ivtmp.246
> > .L236:
> > # 917 "arch/x86/kernel/cpu/mce/amd.c" 1
> > btl %r15d, %gs:bank_map(%rip)   # ivtmp.246, *_9
> > 
> > 
> > That way you end up with a single btl (but I guess a version that uses
> > btq should be added as well)
> > inside the loop rather than a bunch of instructions moving data around
> > for per_cpu.
> > 
> > Alternatively, since this is running in interrupt context can't you use
> > directly this_cpu_read(bank_map) and eliminate the smp_processor_id
> > invocation?
> 
> 
> Actually the total number of banks are at most 128 as per the layout of
> MCG_CAP register, so using btl is fine. Also I'm not sure why the original
> code uses BIT_ULL vs just BIT since we can't have a 64bit value.
> 
> 

Hi Nikolay,

MCG_CAP[Count] is an 8-bit field, so we can (potentially) have up to 255
MCA banks.

"bank_map" is a bitmask, and current systems can have up to 64 MCA banks.
That is why BIT_ULL is needed.

Thanks,
Yazen
[tip: ras/core] x86/mce/amd: Put list_head in threshold_bank
Posted by tip-bot2 for Yazen Ghannam 1 month ago
The following commit has been merged into the ras/core branch of tip:

Commit-ID:     c4bac5c640e3782bf30c07c4d82042d0202fe224
Gitweb:        https://git.kernel.org/tip/c4bac5c640e3782bf30c07c4d82042d0202fe224
Author:        Yazen Ghannam <yazen.ghannam@amd.com>
AuthorDate:    Tue, 24 Jun 2025 14:16:03 
Committer:     Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 05 Sep 2025 12:42:21 +02:00

x86/mce/amd: Put list_head in threshold_bank

The threshold_bank structure is a container for one or more threshold_block
structures. Currently, the container has a single pointer to the 'first'
threshold_block structure which then has a linked list of the remaining
threshold_block structures.

This results in an extra level of indirection where the 'first' block is
checked before iterating over the remaining blocks.

Remove the indirection by including the head of the block list in the
threshold_bank structure which already acts as a container for all the bank's
thresholding blocks.

Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: https://lore.kernel.org/20250624-wip-mca-updates-v4-8-236dd74f645f@amd.com
---
 arch/x86/kernel/cpu/mce/amd.c | 43 +++++++++-------------------------
 1 file changed, 12 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 7e36bc0..e9b9be2 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -241,7 +241,8 @@ struct threshold_block {
 
 struct threshold_bank {
 	struct kobject		*kobj;
-	struct threshold_block	*blocks;
+	/* List of threshold blocks within this MCA bank. */
+	struct list_head	miscj;
 };
 
 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
@@ -898,9 +899,9 @@ static void log_and_reset_block(struct threshold_block *block)
  */
 static void amd_threshold_interrupt(void)
 {
-	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
-	struct threshold_bank **bp = this_cpu_read(threshold_banks);
+	struct threshold_bank **bp = this_cpu_read(threshold_banks), *thr_bank;
 	unsigned int bank, cpu = smp_processor_id();
+	struct threshold_block *block, *tmp;
 
 	/*
 	 * Validate that the threshold bank has been initialized already. The
@@ -914,16 +915,11 @@ static void amd_threshold_interrupt(void)
 		if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
 			continue;
 
-		first_block = bp[bank]->blocks;
-		if (!first_block)
+		thr_bank = bp[bank];
+		if (!thr_bank)
 			continue;
 
-		/*
-		 * The first block is also the head of the list. Check it first
-		 * before iterating over the rest.
-		 */
-		log_and_reset_block(first_block);
-		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
+		list_for_each_entry_safe(block, tmp, &thr_bank->miscj, miscj)
 			log_and_reset_block(block);
 	}
 }
@@ -1149,13 +1145,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
 		default_attrs[2] = NULL;
 	}
 
-	INIT_LIST_HEAD(&b->miscj);
-
-	/* This is safe as @tb is not visible yet */
-	if (tb->blocks)
-		list_add(&b->miscj, &tb->blocks->miscj);
-	else
-		tb->blocks = b;
+	list_add(&b->miscj, &tb->miscj);
 
 	err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b));
 	if (err)
@@ -1206,6 +1196,8 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
 		goto out_free;
 	}
 
+	INIT_LIST_HEAD(&b->miscj);
+
 	err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
 	if (err)
 		goto out_kobj;
@@ -1226,26 +1218,15 @@ static void threshold_block_release(struct kobject *kobj)
 	kfree(to_block(kobj));
 }
 
-static void deallocate_threshold_blocks(struct threshold_bank *bank)
+static void threshold_remove_bank(struct threshold_bank *bank)
 {
 	struct threshold_block *pos, *tmp;
 
-	list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
+	list_for_each_entry_safe(pos, tmp, &bank->miscj, miscj) {
 		list_del(&pos->miscj);
 		kobject_put(&pos->kobj);
 	}
 
-	kobject_put(&bank->blocks->kobj);
-}
-
-static void threshold_remove_bank(struct threshold_bank *bank)
-{
-	if (!bank->blocks)
-		goto out_free;
-
-	deallocate_threshold_blocks(bank);
-
-out_free:
 	kobject_put(bank->kobj);
 	kfree(bank);
 }