From: Swarna Prabhu <sw.prabhu6@gmail.com>
The WRITE SAME(16) and WRITE SAME(10) scsi commands uses
a page from a dedicated mempool('sd_page_pool') for its
payload. This pool was initialized to allocate single
pages, which was sufficient as long as the device sector
size did not exceed the PAGE_SIZE.
Given that block layer now supports block size upto
64K ie beyond PAGE_SIZE, initialize large page pool in
'sd_probe()' if a higher sector device is attached ensuring
atomicity. Adapt 'sd_set_special_bvec()' to use large page
pool when a higher sector size device is attached.
With the above fix, enable sector sizes > PAGE_SIZE in
scsi sd driver.
Cc: stable@vger.kernel.org
Signed-off-by: Swarna Prabhu <s.prabhu@samsung.com>
Co-developed-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
---
drivers/scsi/sd.c | 79 ++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 67 insertions(+), 12 deletions(-)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f50b92e63201..0e0c5dd1c668 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -112,8 +112,11 @@ static void sd_shutdown(struct device *);
static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
+static DEFINE_MUTEX(sd_mutex_lock);
static mempool_t *sd_page_pool;
+static mempool_t *sd_large_page_pool;
+static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0);
static struct lock_class_key sd_bio_compl_lkclass;
static const char *sd_cache_types[] = {
@@ -922,14 +925,27 @@ static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
(logical_block_size >> SECTOR_SHIFT);
}
-static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
+static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len)
{
struct page *page;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_device *sdp = cmd->device;
+ unsigned sector_size = sdp->sector_size;
+ unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE);
+ int n = 0;
- page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+ if (sector_size > PAGE_SIZE)
+ page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC);
+ else
+ page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!page)
return NULL;
- clear_highpage(page);
+
+ do {
+ clear_highpage(page + n);
+ n++;
+ } while (n < nr_pages);
+
bvec_set_page(&rq->special_vec, page, data_len, 0);
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
return bvec_virt(&rq->special_vec);
@@ -945,7 +961,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
unsigned int data_len = 24;
char *buf;
- buf = sd_set_special_bvec(rq, data_len);
+ buf = sd_set_special_bvec(cmd, data_len);
if (!buf)
return BLK_STS_RESOURCE;
@@ -1034,7 +1050,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
- if (!sd_set_special_bvec(rq, data_len))
+ if (!sd_set_special_bvec(cmd, data_len))
return BLK_STS_RESOURCE;
cmd->cmd_len = 16;
@@ -1061,7 +1077,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
- if (!sd_set_special_bvec(rq, data_len))
+ if (!sd_set_special_bvec(cmd, data_len))
return BLK_STS_RESOURCE;
cmd->cmd_len = 10;
@@ -1507,9 +1523,15 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = scsi_cmd_to_rq(SCpnt);
+ struct scsi_device *sdp = SCpnt->device;
+ unsigned sector_size = sdp->sector_size;
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- mempool_free(rq->special_vec.bv_page, sd_page_pool);
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ if (sector_size > PAGE_SIZE)
+ mempool_free(rq->special_vec.bv_page, sd_large_page_pool);
+ else
+ mempool_free(rq->special_vec.bv_page, sd_page_pool);
+ }
}
static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
@@ -2920,10 +2942,7 @@ sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
"assuming 512.\n");
}
- if (sector_size != 512 &&
- sector_size != 1024 &&
- sector_size != 2048 &&
- sector_size != 4096) {
+ if (blk_validate_block_size(sector_size)) {
sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
sector_size);
/*
@@ -4044,6 +4063,21 @@ static int sd_probe(struct device *dev)
sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
sd_revalidate_disk(gd);
+ if (sdp->sector_size > PAGE_SIZE) {
+ mutex_lock(&sd_mutex_lock);
+ if (!sd_large_page_pool) {
+ sd_large_page_pool = mempool_create_page_pool(
+ SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE));
+ if (!sd_large_page_pool) {
+ printk(KERN_ERR "sd: can't create large page mempool\n");
+ error = -ENOMEM;
+ mutex_unlock(&sd_mutex_lock);
+ goto out_free_index;
+ }
+ }
+ atomic_inc(&sd_large_page_pool_users);
+ mutex_unlock(&sd_mutex_lock);
+ }
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
@@ -4061,6 +4095,14 @@ static int sd_probe(struct device *dev)
if (error) {
device_unregister(&sdkp->disk_dev);
put_disk(gd);
+ if (sdp->sector_size > PAGE_SIZE) {
+ mutex_lock(&sd_mutex_lock);
+ if (atomic_dec_and_test(&sd_large_page_pool_users)) {
+ mempool_destroy(sd_large_page_pool);
+ sd_large_page_pool = NULL;
+ }
+ mutex_unlock(&sd_mutex_lock);
+ }
goto out;
}
@@ -4101,6 +4143,7 @@ static int sd_probe(struct device *dev)
static int sd_remove(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp = sdkp->device;
scsi_autopm_get_device(sdkp->device);
@@ -4110,6 +4153,16 @@ static int sd_remove(struct device *dev)
sd_shutdown(dev);
put_disk(sdkp->disk);
+
+ if (sdp->sector_size > PAGE_SIZE) {
+ mutex_lock(&sd_mutex_lock);
+ if (atomic_dec_and_test(&sd_large_page_pool_users)) {
+ mempool_destroy(sd_large_page_pool);
+ sd_large_page_pool = NULL;
+ }
+ mutex_unlock(&sd_mutex_lock);
+ }
+
return 0;
}
@@ -4446,6 +4499,8 @@ static void __exit exit_sd(void)
scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_page_pool);
+ if (sd_large_page_pool)
+ mempool_destroy(sd_large_page_pool);
class_unregister(&sd_disk_class);
--
2.39.5
On 2/11/26 10:50, sw.prabhu6@gmail.com wrote:
> From: Swarna Prabhu <sw.prabhu6@gmail.com>
>
> The WRITE SAME(16) and WRITE SAME(10) scsi commands uses
> a page from a dedicated mempool('sd_page_pool') for its
> payload. This pool was initialized to allocate single
> pages, which was sufficient as long as the device sector
> size did not exceed the PAGE_SIZE.
>
> Given that block layer now supports block size upto
> 64K ie beyond PAGE_SIZE, initialize large page pool in
> 'sd_probe()' if a higher sector device is attached ensuring
> atomicity. Adapt 'sd_set_special_bvec()' to use large page
> pool when a higher sector size device is attached.
>
> With the above fix, enable sector sizes > PAGE_SIZE in
> scsi sd driver.
This is not a fix (as in a bug fix) but rather a new feature.
> Cc: stable@vger.kernel.org
Why ? Before this patch, scsi allows only up to 4K sector size, which is not >
PAGE_SIZE.
> Signed-off-by: Swarna Prabhu <s.prabhu@samsung.com>
> Co-developed-by: Pankaj Raghav <p.raghav@samsung.com>
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> ---
> drivers/scsi/sd.c | 79 ++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 67 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
> index f50b92e63201..0e0c5dd1c668 100644
> --- a/drivers/scsi/sd.c
> +++ b/drivers/scsi/sd.c
> @@ -112,8 +112,11 @@ static void sd_shutdown(struct device *);
> static void scsi_disk_release(struct device *cdev);
>
> static DEFINE_IDA(sd_index_ida);
> +static DEFINE_MUTEX(sd_mutex_lock);
>
> static mempool_t *sd_page_pool;
> +static mempool_t *sd_large_page_pool;
> +static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0);
> static struct lock_class_key sd_bio_compl_lkclass;
>
> static const char *sd_cache_types[] = {
> @@ -922,14 +925,27 @@ static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
> (logical_block_size >> SECTOR_SHIFT);
> }
>
> -static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
> +static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len)
> {
> struct page *page;
> + struct request *rq = scsi_cmd_to_rq(cmd);
> + struct scsi_device *sdp = cmd->device;
> + unsigned sector_size = sdp->sector_size;
> + unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE);
> + int n = 0;
>
> - page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
> + if (sector_size > PAGE_SIZE)
> + page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC);
> + else
> + page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
> if (!page)
> return NULL;
> - clear_highpage(page);
> +
> + do {
> + clear_highpage(page + n);
> + n++;
> + } while (n < nr_pages);
A for loop would be a lot cleaner and simpler.
> +
> bvec_set_page(&rq->special_vec, page, data_len, 0);
> rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
> return bvec_virt(&rq->special_vec);
> @@ -945,7 +961,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
> unsigned int data_len = 24;
> char *buf;
>
> - buf = sd_set_special_bvec(rq, data_len);
> + buf = sd_set_special_bvec(cmd, data_len);
> if (!buf)
> return BLK_STS_RESOURCE;
>
> @@ -1034,7 +1050,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
> u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
> u32 data_len = sdp->sector_size;
>
> - if (!sd_set_special_bvec(rq, data_len))
> + if (!sd_set_special_bvec(cmd, data_len))
> return BLK_STS_RESOURCE;
>
> cmd->cmd_len = 16;
> @@ -1061,7 +1077,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
> u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
> u32 data_len = sdp->sector_size;
>
> - if (!sd_set_special_bvec(rq, data_len))
> + if (!sd_set_special_bvec(cmd, data_len))
> return BLK_STS_RESOURCE;
>
> cmd->cmd_len = 10;
> @@ -1507,9 +1523,15 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
> static void sd_uninit_command(struct scsi_cmnd *SCpnt)
> {
> struct request *rq = scsi_cmd_to_rq(SCpnt);
> + struct scsi_device *sdp = SCpnt->device;
> + unsigned sector_size = sdp->sector_size;
>
> - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
> - mempool_free(rq->special_vec.bv_page, sd_page_pool);
> + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
> + if (sector_size > PAGE_SIZE)
> + mempool_free(rq->special_vec.bv_page, sd_large_page_pool);
> + else
> + mempool_free(rq->special_vec.bv_page, sd_page_pool);
> + }
> }
>
> static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
> @@ -2920,10 +2942,7 @@ sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
> "assuming 512.\n");
> }
>
> - if (sector_size != 512 &&
> - sector_size != 1024 &&
> - sector_size != 2048 &&
> - sector_size != 4096) {
> + if (blk_validate_block_size(sector_size)) {
> sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
> sector_size);
> /*
> @@ -4044,6 +4063,21 @@ static int sd_probe(struct device *dev)
> sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
>
> sd_revalidate_disk(gd);
> + if (sdp->sector_size > PAGE_SIZE) {
> + mutex_lock(&sd_mutex_lock);
> + if (!sd_large_page_pool) {
> + sd_large_page_pool = mempool_create_page_pool(
> + SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE));
> + if (!sd_large_page_pool) {
> + printk(KERN_ERR "sd: can't create large page mempool\n");
> + error = -ENOMEM;
> + mutex_unlock(&sd_mutex_lock);
> + goto out_free_index;
> + }
> + }
> + atomic_inc(&sd_large_page_pool_users);
> + mutex_unlock(&sd_mutex_lock);
> + }
It would be a lot nicer to have this defined as a helper function that goes
together with a pool destroy function (see below).
>
> if (sdp->removable) {
> gd->flags |= GENHD_FL_REMOVABLE;
> @@ -4061,6 +4095,14 @@ static int sd_probe(struct device *dev)
> if (error) {
> device_unregister(&sdkp->disk_dev);
> put_disk(gd);
> + if (sdp->sector_size > PAGE_SIZE) {
> + mutex_lock(&sd_mutex_lock);
> + if (atomic_dec_and_test(&sd_large_page_pool_users)) {
> + mempool_destroy(sd_large_page_pool);
> + sd_large_page_pool = NULL;
> + }
> + mutex_unlock(&sd_mutex_lock);
> + }
This hunk is repeated twice. Make this a helper please.
> goto out;
> }
>
> @@ -4101,6 +4143,7 @@ static int sd_probe(struct device *dev)
> static int sd_remove(struct device *dev)
> {
> struct scsi_disk *sdkp = dev_get_drvdata(dev);
> + struct scsi_device *sdp = sdkp->device;
>
> scsi_autopm_get_device(sdkp->device);
>
> @@ -4110,6 +4153,16 @@ static int sd_remove(struct device *dev)
> sd_shutdown(dev);
>
> put_disk(sdkp->disk);
> +
> + if (sdp->sector_size > PAGE_SIZE) {
> + mutex_lock(&sd_mutex_lock);
> + if (atomic_dec_and_test(&sd_large_page_pool_users)) {
> + mempool_destroy(sd_large_page_pool);
> + sd_large_page_pool = NULL;
> + }
> + mutex_unlock(&sd_mutex_lock);
> + }
> +
> return 0;
> }
>
> @@ -4446,6 +4499,8 @@ static void __exit exit_sd(void)
>
> scsi_unregister_driver(&sd_template.gendrv);
> mempool_destroy(sd_page_pool);
> + if (sd_large_page_pool)
> + mempool_destroy(sd_large_page_pool);
>
> class_unregister(&sd_disk_class);
>
--
Damien Le Moal
Western Digital Research
© 2016 - 2026 Red Hat, Inc.