[PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE

Chaohai Chen posted 1 patch 1 month, 1 week ago
There is a newer version of this series
drivers/scsi/scsi_scan.c | 22 ++++++++--------------
include/scsi/scsi_host.h |  6 +++---
2 files changed, 11 insertions(+), 17 deletions(-)
[PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by Chaohai Chen 1 month, 1 week ago
Previously, host_lock was used to prevent bit-set conflicts in async_scan,
but this approach introduced naked reads in some code paths.

Convert async_scan from a bitfield to a bool type to eliminate bit-level
conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
memory ordering on Alpha and satisfy KCSAN requirements.

Signed-off-by: Chaohai Chen <wdhh6@aliyun.com>
---

v1->v3:
use READ_ONCE()/WRITE_ONCE() to fix the issue (Christoph Hellwig, Damien Le Moal)

v1: https://lore.kernel.org/all/20260302121343.1630837-1-wdhh6@aliyun.com/

 drivers/scsi/scsi_scan.c | 22 ++++++++--------------
 include/scsi/scsi_host.h |  6 +++---
 2 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 60c06fa4ec32..892be54dacc6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1298,7 +1298,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
 		goto out_free_result;
 	}
 
-	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
+	res = scsi_add_lun(sdev, result, &bflags, READ_ONCE(shost->async_scan));
 	if (res == SCSI_SCAN_LUN_PRESENT) {
 		if (bflags & BLIST_KEY) {
 			sdev->lockable = 0;
@@ -1629,7 +1629,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
 	scsi_autopm_get_target(starget);
 
 	mutex_lock(&shost->scan_mutex);
-	if (!shost->async_scan)
+	if (!READ_ONCE(shost->async_scan))
 		scsi_complete_async_scans();
 
 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
@@ -1839,7 +1839,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
 		return;
 
 	mutex_lock(&shost->scan_mutex);
-	if (!shost->async_scan)
+	if (!READ_ONCE(shost->async_scan))
 		scsi_complete_async_scans();
 
 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
@@ -1896,7 +1896,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
 		return -EINVAL;
 
 	mutex_lock(&shost->scan_mutex);
-	if (!shost->async_scan)
+	if (!READ_ONCE(shost->async_scan))
 		scsi_complete_async_scans();
 
 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
@@ -1943,13 +1943,12 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 {
 	struct async_scan_data *data = NULL;
-	unsigned long flags;
 
 	if (strncmp(scsi_scan_type, "sync", 4) == 0)
 		return NULL;
 
 	mutex_lock(&shost->scan_mutex);
-	if (shost->async_scan) {
+	if (READ_ONCE(shost->async_scan)) {
 		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
 		goto err;
 	}
@@ -1962,9 +1961,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 		goto err;
 	init_completion(&data->prev_finished);
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	shost->async_scan = 1;
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	WRITE_ONCE(shost->async_scan, true);
 	mutex_unlock(&shost->scan_mutex);
 
 	spin_lock(&async_scan_lock);
@@ -1992,7 +1989,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 static void scsi_finish_async_scan(struct async_scan_data *data)
 {
 	struct Scsi_Host *shost;
-	unsigned long flags;
 
 	if (!data)
 		return;
@@ -2001,7 +1997,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
 
 	mutex_lock(&shost->scan_mutex);
 
-	if (!shost->async_scan) {
+	if (!READ_ONCE(shost->async_scan)) {
 		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
 		dump_stack();
 		mutex_unlock(&shost->scan_mutex);
@@ -2012,9 +2008,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
 
 	scsi_sysfs_add_devices(shost);
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	shost->async_scan = 0;
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	WRITE_ONCE(shost->async_scan, false);
 
 	mutex_unlock(&shost->scan_mutex);
 
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f6e12565a81d..668ec9a1b33c 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -678,9 +678,6 @@ struct Scsi_Host {
 	/* Task mgmt function in progress */
 	unsigned tmf_in_progress:1;
 
-	/* Asynchronous scan in progress */
-	unsigned async_scan:1;
-
 	/* Don't resume host in EH */
 	unsigned eh_noresume:1;
 
@@ -699,6 +696,9 @@ struct Scsi_Host {
 	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
 	unsigned no_scsi2_lun_in_cdb:1;
 
+	/* Asynchronous scan in progress */
+	bool async_scan;
+
 	/*
 	 * Optional work queue to be utilized by the transport
 	 */
-- 
2.43.7
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by John Garry 1 month, 1 week ago
On 04/03/2026 07:57, Chaohai Chen wrote:
> Previously, host_lock was used to prevent bit-set conflicts in async_scan,
> but this approach introduced naked reads in some code paths.
> 
> Convert async_scan from a bitfield to a bool type to eliminate bit-level
> conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
> memory ordering on Alpha and satisfy KCSAN requirements.

Is the shost->scan_mutex always held when shost->async_scan is read/written?

> 
> Signed-off-by: Chaohai Chen <wdhh6@aliyun.com>
> ---
> 
> v1->v3:
> use READ_ONCE()/WRITE_ONCE() to fix the issue (Christoph Hellwig, Damien Le Moal)
> 
> v1: https://urldefense.com/v3/__https://lore.kernel.org/all/20260302121343.1630837-1-wdhh6@aliyun.com/__;!!ACWV5N9M2RV99hQ!KlZN-O7DbZEu25CkCc5u3UxNVI8TafYP_8y7BLOsNvIv6kwKjzL038XMO0nBle4xVoVlj6tMo87cg7mr$
> 
>   drivers/scsi/scsi_scan.c | 22 ++++++++--------------
>   include/scsi/scsi_host.h |  6 +++---
>   2 files changed, 11 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
> index 60c06fa4ec32..892be54dacc6 100644
> --- a/drivers/scsi/scsi_scan.c
> +++ b/drivers/scsi/scsi_scan.c
> @@ -1298,7 +1298,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
>   		goto out_free_result;
>   	}
>   
> -	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
> +	res = scsi_add_lun(sdev, result, &bflags, READ_ONCE(shost->async_scan));
>   	if (res == SCSI_SCAN_LUN_PRESENT) {
>   		if (bflags & BLIST_KEY) {
>   			sdev->lockable = 0;
> @@ -1629,7 +1629,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
>   	scsi_autopm_get_target(starget);
>   
>   	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>   		scsi_complete_async_scans();
>   
>   	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1839,7 +1839,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
>   		return;
>   
>   	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>   		scsi_complete_async_scans();
>   
>   	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1896,7 +1896,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
>   		return -EINVAL;
>   
>   	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>   		scsi_complete_async_scans();
>   
>   	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1943,13 +1943,12 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
>   static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>   {
>   	struct async_scan_data *data = NULL;
> -	unsigned long flags;
>   
>   	if (strncmp(scsi_scan_type, "sync", 4) == 0)
>   		return NULL;
>   
>   	mutex_lock(&shost->scan_mutex);
> -	if (shost->async_scan) {
> +	if (READ_ONCE(shost->async_scan)) {
>   		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
>   		goto err;
>   	}
> @@ -1962,9 +1961,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>   		goto err;
>   	init_completion(&data->prev_finished);
>   
> -	spin_lock_irqsave(shost->host_lock, flags);
> -	shost->async_scan = 1;
> -	spin_unlock_irqrestore(shost->host_lock, flags);
> +	WRITE_ONCE(shost->async_scan, true);
>   	mutex_unlock(&shost->scan_mutex);
>   
>   	spin_lock(&async_scan_lock);
> @@ -1992,7 +1989,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>   static void scsi_finish_async_scan(struct async_scan_data *data)
>   {
>   	struct Scsi_Host *shost;
> -	unsigned long flags;
>   
>   	if (!data)
>   		return;
> @@ -2001,7 +1997,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
>   
>   	mutex_lock(&shost->scan_mutex);
>   
> -	if (!shost->async_scan) {
> +	if (!READ_ONCE(shost->async_scan)) {
>   		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
>   		dump_stack();
>   		mutex_unlock(&shost->scan_mutex);
> @@ -2012,9 +2008,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
>   
>   	scsi_sysfs_add_devices(shost);
>   
> -	spin_lock_irqsave(shost->host_lock, flags);
> -	shost->async_scan = 0;
> -	spin_unlock_irqrestore(shost->host_lock, flags);
> +	WRITE_ONCE(shost->async_scan, false);
>   
>   	mutex_unlock(&shost->scan_mutex);
>   
> diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
> index f6e12565a81d..668ec9a1b33c 100644
> --- a/include/scsi/scsi_host.h
> +++ b/include/scsi/scsi_host.h
> @@ -678,9 +678,6 @@ struct Scsi_Host {
>   	/* Task mgmt function in progress */
>   	unsigned tmf_in_progress:1;
>   
> -	/* Asynchronous scan in progress */
> -	unsigned async_scan:1;
> -
>   	/* Don't resume host in EH */
>   	unsigned eh_noresume:1;
>   
> @@ -699,6 +696,9 @@ struct Scsi_Host {
>   	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
>   	unsigned no_scsi2_lun_in_cdb:1;
>   
> +	/* Asynchronous scan in progress */
> +	bool async_scan;
> +
>   	/*
>   	 * Optional work queue to be utilized by the transport
>   	 */
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by Chaohai Chen 1 month, 1 week ago
On Wed, Mar 04, 2026 at 09:20:25AM +0000, John Garry wrote:
> On 04/03/2026 07:57, Chaohai Chen wrote:
> > Previously, host_lock was used to prevent bit-set conflicts in async_scan,
> > but this approach introduced naked reads in some code paths.
> > 
> > Convert async_scan from a bitfield to a bool type to eliminate bit-level
> > conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
> > memory ordering on Alpha and satisfy KCSAN requirements.
> 
> Is the shost->scan_mutex always held when shost->async_scan is read/written?
> 
Yes. In theory, there is no need for READ-ONCE/WRITE-ONCE. Plus, this belongs 
to defensive programming. And it indicates that this is a shared variable, 
which means that this variable will be accessed by multiple threads and 
concurrency issues need to be handled carefully.
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by Bart Van Assche 1 month, 1 week ago
On 3/4/26 3:43 AM, Chaohai Chen wrote:
> On Wed, Mar 04, 2026 at 09:20:25AM +0000, John Garry wrote:
>> On 04/03/2026 07:57, Chaohai Chen wrote:
>>> Previously, host_lock was used to prevent bit-set conflicts in async_scan,
>>> but this approach introduced naked reads in some code paths.
>>>
>>> Convert async_scan from a bitfield to a bool type to eliminate bit-level
>>> conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
>>> memory ordering on Alpha and satisfy KCSAN requirements.
>>
>> Is the shost->scan_mutex always held when shost->async_scan is read/written?
>>
> Yes. In theory, there is no need for READ-ONCE/WRITE-ONCE. Plus, this belongs
> to defensive programming. And it indicates that this is a shared variable,
> which means that this variable will be accessed by multiple threads and
> concurrency issues need to be handled carefully.

Using READ_ONCE() / WRITE_ONCE() for member variables protected by a
mutex is wrong because it confuses people who read the code. Please use
__guarded_by() to document that the async_scan member is protected by
the scan_mutex. More information about __guarded_by(), introduced during
the 7.0 merge window, is available in
Documentation/dev-tools/context-analysis.rst. As one can see in that
document, Clang 22 or later is needed to verify __guarded_by()
annotations during compilation. Information about how to build the
kernel with Clang is available in Documentation/kbuild/llvm.rst.

Bart.
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by John Garry 1 month, 1 week ago
On 04/03/2026 09:43, Chaohai Chen wrote:
> On Wed, Mar 04, 2026 at 09:20:25AM +0000, John Garry wrote:
>> On 04/03/2026 07:57, Chaohai Chen wrote:
>>> Previously, host_lock was used to prevent bit-set conflicts in async_scan,
>>> but this approach introduced naked reads in some code paths.
>>>
>>> Convert async_scan from a bitfield to a bool type to eliminate bit-level
>>> conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
>>> memory ordering on Alpha and satisfy KCSAN requirements.
>>
>> Is the shost->scan_mutex always held when shost->async_scan is read/written?
>>
> Yes. In theory, there is no need for READ-ONCE/WRITE-ONCE.

Then I understand that in practice that there is no need (for 
READ/WRITE_ONCE), but having it will do no harm, as you mention below.

> Plus, this belongs
> to defensive programming. 

You could mention what this in the commit log.

> And it indicates that this is a shared variable,
> which means that this variable will be accessed by multiple threads and
> concurrency issues need to be handled carefully.

Reviewed-by: John Garry <john.g.garry@oracle.com>
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by Damien Le Moal 1 month, 1 week ago
On 3/4/26 18:43, Chaohai Chen wrote:
> On Wed, Mar 04, 2026 at 09:20:25AM +0000, John Garry wrote:
>> On 04/03/2026 07:57, Chaohai Chen wrote:
>>> Previously, host_lock was used to prevent bit-set conflicts in async_scan,
>>> but this approach introduced naked reads in some code paths.
>>>
>>> Convert async_scan from a bitfield to a bool type to eliminate bit-level
>>> conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
>>> memory ordering on Alpha and satisfy KCSAN requirements.
>>
>> Is the shost->scan_mutex always held when shost->async_scan is read/written?
>>
> Yes. In theory, there is no need for READ-ONCE/WRITE-ONCE. Plus, this belongs 
> to defensive programming. And it indicates that this is a shared variable, 
> which means that this variable will be accessed by multiple threads and 
> concurrency issues need to be handled carefully.

If the scan_mutex is always held when scan_mutex is used, there will not be
multiple threads, unless there are accessed also from IRQ context, which would
be odd. So I am not sure what concurrency issue you are referring to here.


-- 
Damien Le Moal
Western Digital Research
Re: [PATCH v3] scsi: core: Fix async_scan race condition with READ_ONCE/WRITE_ONCE
Posted by Damien Le Moal 1 month, 1 week ago
On 3/4/26 16:57, Chaohai Chen wrote:
> Previously, host_lock was used to prevent bit-set conflicts in async_scan,
> but this approach introduced naked reads in some code paths.
> 
> Convert async_scan from a bitfield to a bool type to eliminate bit-level
> conflicts entirely. Use READ_ONCE() and WRITE_ONCE() to ensure proper
> memory ordering on Alpha and satisfy KCSAN requirements.
> 
> Signed-off-by: Chaohai Chen <wdhh6@aliyun.com>
> ---
> 
> v1->v3:
> use READ_ONCE()/WRITE_ONCE() to fix the issue (Christoph Hellwig, Damien Le Moal)
> 
> v1: https://lore.kernel.org/all/20260302121343.1630837-1-wdhh6@aliyun.com/
> 
>  drivers/scsi/scsi_scan.c | 22 ++++++++--------------
>  include/scsi/scsi_host.h |  6 +++---
>  2 files changed, 11 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
> index 60c06fa4ec32..892be54dacc6 100644
> --- a/drivers/scsi/scsi_scan.c
> +++ b/drivers/scsi/scsi_scan.c
> @@ -1298,7 +1298,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
>  		goto out_free_result;
>  	}
>  
> -	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
> +	res = scsi_add_lun(sdev, result, &bflags, READ_ONCE(shost->async_scan));
>  	if (res == SCSI_SCAN_LUN_PRESENT) {
>  		if (bflags & BLIST_KEY) {
>  			sdev->lockable = 0;
> @@ -1629,7 +1629,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
>  	scsi_autopm_get_target(starget);
>  
>  	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>  		scsi_complete_async_scans();
>  
>  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1839,7 +1839,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
>  		return;
>  
>  	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>  		scsi_complete_async_scans();
>  
>  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1896,7 +1896,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
>  		return -EINVAL;
>  
>  	mutex_lock(&shost->scan_mutex);
> -	if (!shost->async_scan)
> +	if (!READ_ONCE(shost->async_scan))
>  		scsi_complete_async_scans();
>  
>  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
> @@ -1943,13 +1943,12 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
>  static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>  {
>  	struct async_scan_data *data = NULL;
> -	unsigned long flags;
>  
>  	if (strncmp(scsi_scan_type, "sync", 4) == 0)
>  		return NULL;
>  
>  	mutex_lock(&shost->scan_mutex);
> -	if (shost->async_scan) {
> +	if (READ_ONCE(shost->async_scan)) {
>  		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
>  		goto err;
>  	}
> @@ -1962,9 +1961,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>  		goto err;
>  	init_completion(&data->prev_finished);
>  
> -	spin_lock_irqsave(shost->host_lock, flags);
> -	shost->async_scan = 1;
> -	spin_unlock_irqrestore(shost->host_lock, flags);
> +	WRITE_ONCE(shost->async_scan, true);
>  	mutex_unlock(&shost->scan_mutex);
>  
>  	spin_lock(&async_scan_lock);
> @@ -1992,7 +1989,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
>  static void scsi_finish_async_scan(struct async_scan_data *data)
>  {
>  	struct Scsi_Host *shost;
> -	unsigned long flags;
>  
>  	if (!data)
>  		return;
> @@ -2001,7 +1997,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
>  
>  	mutex_lock(&shost->scan_mutex);
>  
> -	if (!shost->async_scan) {
> +	if (!READ_ONCE(shost->async_scan)) {
>  		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
>  		dump_stack();
>  		mutex_unlock(&shost->scan_mutex);
> @@ -2012,9 +2008,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
>  
>  	scsi_sysfs_add_devices(shost);
>  
> -	spin_lock_irqsave(shost->host_lock, flags);
> -	shost->async_scan = 0;
> -	spin_unlock_irqrestore(shost->host_lock, flags);
> +	WRITE_ONCE(shost->async_scan, false);
>  
>  	mutex_unlock(&shost->scan_mutex);
>  
> diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
> index f6e12565a81d..668ec9a1b33c 100644
> --- a/include/scsi/scsi_host.h
> +++ b/include/scsi/scsi_host.h
> @@ -678,9 +678,6 @@ struct Scsi_Host {
>  	/* Task mgmt function in progress */
>  	unsigned tmf_in_progress:1;
>  
> -	/* Asynchronous scan in progress */
> -	unsigned async_scan:1;
> -
>  	/* Don't resume host in EH */
>  	unsigned eh_noresume:1;
>  
> @@ -699,6 +696,9 @@ struct Scsi_Host {
>  	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
>  	unsigned no_scsi2_lun_in_cdb:1;
>  
> +	/* Asynchronous scan in progress */
> +	bool async_scan;
> +

Please move this before the bit field in the structure to avoid holes.

>  	/*
>  	 * Optional work queue to be utilized by the transport
>  	 */


-- 
Damien Le Moal
Western Digital Research