drivers/nvme/host/nvme.h | 5 +++++ drivers/nvme/host/pci.c | 17 ++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-)
From: Robert Beckett <bob.beckett@collabora.com>
We initially put in a quick fix of limiting the queue depth to 1
as experimentation showed that it fixed data corruption on 64GB
steamdecks.
After further experimentation, it appears that the corruption
is fixed by increasing the small dma pool segment size to
512 bytes. Testing via desync image verification shows that
it now passes thousands of verification loops, where previously
it never managed above 7.
Currently it is not known why this fixes the corruption.
Perhaps it is doing something nasty like using an mmc page
as a cache for the prp lists (mmc min. page size is 512 bytes)
and not invalidating properly, so that the dma pool change to
treats segment list as a stack ends up giving a previous
segment in the same cached page.
This fixes the previous queue depth limitation as it fixes
the corruption without incurring a 37% tested performance
degredation.
Fixes: 83bdfcbdbe5d ("nvme-pci: qdepth 1 quirk")
Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
---
drivers/nvme/host/nvme.h | 5 +++++
drivers/nvme/host/pci.c | 17 ++++++++++++-----
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 093cb423f536..bbad25d15360 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -173,6 +173,11 @@ enum nvme_quirks {
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
+
+ /*
+ * Min. dma pool segment size 512 bytes
+ */
+ NVME_QUIRK_SMALL_DMAPOOL_512 = (1 << 22),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4b9fda0b1d9a..0782c9b1b4e7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -141,6 +141,7 @@ struct nvme_dev {
struct nvme_ctrl ctrl;
u32 last_ps;
bool hmb;
+ u32 small_dmapool_seg_size;
mempool_t *iod_mempool;
@@ -611,7 +612,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
}
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
- if (nprps <= (256 / 8)) {
+ if (nprps <= (dev->small_dmapool_seg_size / 8)) {
pool = dev->prp_small_pool;
iod->nr_allocations = 0;
} else {
@@ -701,7 +702,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
- if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
+ if (entries <= (dev->small_dmapool_seg_size / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
iod->nr_allocations = 0;
} else {
@@ -2700,8 +2701,9 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev)
return -ENOMEM;
/* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
+ dev->prp_small_pool = dma_pool_create("prp list small", dev->dev,
+ dev->small_dmapool_seg_size,
+ dev->small_dmapool_seg_size, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
@@ -3063,6 +3065,11 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
* a single integrity segment for the separate metadata pointer.
*/
dev->ctrl.max_integrity_segments = 1;
+
+ if (dev->ctrl.quirks & NVME_QUIRK_SMALL_DMAPOOL_512)
+ dev->small_dmapool_seg_size = 512;
+ else
+ dev->small_dmapool_seg_size = 256;
return dev;
out_put_device:
@@ -3449,7 +3456,7 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
- .driver_data = NVME_QUIRK_QDEPTH_ONE },
+ .driver_data = NVME_QUIRK_SMALL_DMAPOOL_512 },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
--
2.45.2
On Thu, Nov 07, 2024 at 04:50:46PM +0000, Bob Beckett wrote: > @@ -611,7 +612,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, > } > > nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); > - if (nprps <= (256 / 8)) { > + if (nprps <= (dev->small_dmapool_seg_size / 8)) { > pool = dev->prp_small_pool; > iod->nr_allocations = 0; > } else { We have a constant expression currently, and this is changing it a full division in the IO path. :( Could we leave the pool selection check size as-is and just say the cost of the quirk is additional memory overhead? > @@ -2700,8 +2701,9 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev) > return -ENOMEM; > > /* Optimisation for I/Os between 4k and 128k */ > - dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, > - 256, 256, 0); > + dev->prp_small_pool = dma_pool_create("prp list small", dev->dev, > + dev->small_dmapool_seg_size, > + dev->small_dmapool_seg_size, 0); I think it should work if we only change the alignment property of the pool. Something like this: if (dev->ctrl.quirks & NVME_QUIRK_SMALL_DMAPOOL_512) dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 256, 512, 0); else dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 256, 256, 0);
On Thu, Nov 07, 2024 at 10:19:30AM -0700, Keith Busch wrote: > We have a constant expression currently, and this is changing it a full > division in the IO path. :( Yeah. Given that the device is broken I'd just have it pay the price and never use the small prp pool instead, which just adds a single extra branch to the fast path.
---- On Thu, 07 Nov 2024 17:19:30 +0000 Keith Busch wrote --- > On Thu, Nov 07, 2024 at 04:50:46PM +0000, Bob Beckett wrote: > > @@ -611,7 +612,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, > > } > > > > nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); > > - if (nprps <= (256 / 8)) { > > + if (nprps small_dmapool_seg_size / 8)) { > > pool = dev->prp_small_pool; > > iod->nr_allocations = 0; > > } else { > > We have a constant expression currently, and this is changing it a full > division in the IO path. :( yeah, that's fair. Does it get high enough throughput that this is a significant issue here? (I have little intuition for this driver). how about pre-computing the nprps threshold during pool creation where we detect the quirk, it would then be variable comparison instead of a const comparison, but no divide? > > Could we leave the pool selection check size as-is and just say the cost > of the quirk is additional memory overhead? > > > @@ -2700,8 +2701,9 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev) > > return -ENOMEM; > > > > /* Optimisation for I/Os between 4k and 128k */ > > - dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, > > - 256, 256, 0); > > + dev->prp_small_pool = dma_pool_create("prp list small", dev->dev, > > + dev->small_dmapool_seg_size, > > + dev->small_dmapool_seg_size, 0); > > I think it should work if we only change the alignment property of the > pool. Something like this: > > if (dev->ctrl.quirks & NVME_QUIRK_SMALL_DMAPOOL_512) > dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, > 256, 512, 0); I actually already tested a change of 512, 512 while keeping the 256 devision above during testing (i.e. waste half of the segment). I'll confirm with a test again against latest and send a v2 assuming it tests fine. > else > dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, > 256, 256, 0); >
© 2016 - 2024 Red Hat, Inc.