Rename nvme_build_prpl() to nvme_prpl_xfer() and directly invoke
nvme_io_xfer() or nvme_bounce_xfer() from that function.
Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
---
src/hw/nvme-int.h | 1 -
src/hw/nvme.c | 42 ++++++++++++++++--------------------------
2 files changed, 16 insertions(+), 27 deletions(-)
diff --git a/src/hw/nvme-int.h b/src/hw/nvme-int.h
index a4c1555..9564c17 100644
--- a/src/hw/nvme-int.h
+++ b/src/hw/nvme-int.h
@@ -125,7 +125,6 @@ struct nvme_namespace {
/* Page List */
u32 prpl_len;
- void *prp1;
u64 prpl[NVME_MAX_PRPL_ENTRIES];
};
diff --git a/src/hw/nvme.c b/src/hw/nvme.c
index fd7c1d0..bafe8bf 100644
--- a/src/hw/nvme.c
+++ b/src/hw/nvme.c
@@ -499,10 +499,13 @@ static int nvme_add_prpl(struct nvme_namespace *ns, u64 base)
return 0;
}
-static int nvme_build_prpl(struct nvme_namespace *ns, void *op_buf, u16 count)
+// Transfer data using page list (if applicable)
+static int
+nvme_prpl_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count,
+ int write)
{
int first_page = 1;
- u32 base = (long)op_buf;
+ u32 base = (long)buf;
s32 size;
if (count > ns->max_req_size)
@@ -512,31 +515,28 @@ static int nvme_build_prpl(struct nvme_namespace *ns, void *op_buf, u16 count)
size = count * ns->block_size;
/* Special case for transfers that fit into PRP1, but are unaligned */
- if (((size + (base & ~NVME_PAGE_MASK)) <= NVME_PAGE_SIZE)) {
- ns->prp1 = op_buf;
- return count;
- }
+ if (((size + (base & ~NVME_PAGE_MASK)) <= NVME_PAGE_SIZE))
+ return nvme_io_xfer(ns, lba, buf, count, write);
/* Every request has to be page aligned */
if (base & ~NVME_PAGE_MASK)
- return 0;
+ return nvme_bounce_xfer(ns, lba, buf, count, write);
/* Make sure a full block fits into the last chunk */
if (size & (ns->block_size - 1ULL))
- return 0;
+ return nvme_bounce_xfer(ns, lba, buf, count, write);
for (; size > 0; base += NVME_PAGE_SIZE, size -= NVME_PAGE_SIZE) {
if (first_page) {
/* First page is special */
- ns->prp1 = (void*)base;
first_page = 0;
continue;
}
if (nvme_add_prpl(ns, base))
- return 0;
+ return nvme_bounce_xfer(ns, lba, buf, count, write);
}
- return count;
+ return nvme_io_xfer(ns, lba, buf, count, write);
}
static int
@@ -737,24 +737,14 @@ nvme_scan(void)
static int
nvme_cmd_readwrite(struct nvme_namespace *ns, struct disk_op_s *op, int write)
{
- u16 i, blocks;
-
+ int i;
for (i = 0; i < op->count;) {
u16 blocks_remaining = op->count - i;
char *op_buf = op->buf_fl + i * ns->block_size;
-
- blocks = nvme_build_prpl(ns, op_buf, blocks_remaining);
- if (blocks) {
- int res = nvme_io_xfer(ns, op->lba + i, ns->prp1, blocks, write);
- if (res < 0)
- return DISK_RET_EBADTRACK;
- } else {
- int res = nvme_bounce_xfer(ns, op->lba + i, op_buf, blocks, write);
- if (res < 0)
- return DISK_RET_EBADTRACK;
- blocks = res;
- }
-
+ int blocks = nvme_prpl_xfer(ns, op->lba + i, op_buf,
+ blocks_remaining, write);
+ if (blocks < 0)
+ return DISK_RET_EBADTRACK;
i += blocks;
}
--
2.31.1
_______________________________________________
SeaBIOS mailing list -- seabios@seabios.org
To unsubscribe send an email to seabios-leave@seabios.org
On 19.01.22 19:45, Kevin O'Connor wrote:
> Rename nvme_build_prpl() to nvme_prpl_xfer() and directly invoke
> nvme_io_xfer() or nvme_bounce_xfer() from that function.
>
> Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
> ---
> src/hw/nvme-int.h | 1 -
> src/hw/nvme.c | 42 ++++++++++++++++--------------------------
> 2 files changed, 16 insertions(+), 27 deletions(-)
>
> diff --git a/src/hw/nvme-int.h b/src/hw/nvme-int.h
> index a4c1555..9564c17 100644
> --- a/src/hw/nvme-int.h
> +++ b/src/hw/nvme-int.h
> @@ -125,7 +125,6 @@ struct nvme_namespace {
>
> /* Page List */
> u32 prpl_len;
> - void *prp1;
> u64 prpl[NVME_MAX_PRPL_ENTRIES];
> };
>
> diff --git a/src/hw/nvme.c b/src/hw/nvme.c
> index fd7c1d0..bafe8bf 100644
> --- a/src/hw/nvme.c
> +++ b/src/hw/nvme.c
> @@ -499,10 +499,13 @@ static int nvme_add_prpl(struct nvme_namespace *ns, u64 base)
> return 0;
> }
>
> -static int nvme_build_prpl(struct nvme_namespace *ns, void *op_buf, u16 count)
> +// Transfer data using page list (if applicable)
> +static int
> +nvme_prpl_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count,
> + int write)
> {
> int first_page = 1;
> - u32 base = (long)op_buf;
> + u32 base = (long)buf;
> s32 size;
>
> if (count > ns->max_req_size)
> @@ -512,31 +515,28 @@ static int nvme_build_prpl(struct nvme_namespace *ns, void *op_buf, u16 count)
>
> size = count * ns->block_size;
> /* Special case for transfers that fit into PRP1, but are unaligned */
> - if (((size + (base & ~NVME_PAGE_MASK)) <= NVME_PAGE_SIZE)) {
> - ns->prp1 = op_buf;
> - return count;
> - }
> + if (((size + (base & ~NVME_PAGE_MASK)) <= NVME_PAGE_SIZE))
> + return nvme_io_xfer(ns, lba, buf, count, write);
>
> /* Every request has to be page aligned */
> if (base & ~NVME_PAGE_MASK)
> - return 0;
> + return nvme_bounce_xfer(ns, lba, buf, count, write);
>
> /* Make sure a full block fits into the last chunk */
> if (size & (ns->block_size - 1ULL))
> - return 0;
> + return nvme_bounce_xfer(ns, lba, buf, count, write);
>
> for (; size > 0; base += NVME_PAGE_SIZE, size -= NVME_PAGE_SIZE) {
> if (first_page) {
> /* First page is special */
> - ns->prp1 = (void*)base;
> first_page = 0;
> continue;
> }
> if (nvme_add_prpl(ns, base))
> - return 0;
> + return nvme_bounce_xfer(ns, lba, buf, count, write);
I think this is correct, but reasoning about all of the bounce
invocations is truly making my head hurt :). How about we split the
"Does this fit into a PRP request" logic from the "do request" part? Can
we just at the end of the function have something like this?
return nvme_io_xfer(ns, lba, buf, count, write);
bounce:
return nvme_bounce_xfer(ns, lba, buf, count, write);
and then goto bounce every time we realize we have to turn the request
into an up-to-one-page bounce request?
Alex
> }
>
> - return count;
> + return nvme_io_xfer(ns, lba, buf, count, write);
> }
>
> static int
> @@ -737,24 +737,14 @@ nvme_scan(void)
> static int
> nvme_cmd_readwrite(struct nvme_namespace *ns, struct disk_op_s *op, int write)
> {
> - u16 i, blocks;
> -
> + int i;
> for (i = 0; i < op->count;) {
> u16 blocks_remaining = op->count - i;
> char *op_buf = op->buf_fl + i * ns->block_size;
> -
> - blocks = nvme_build_prpl(ns, op_buf, blocks_remaining);
> - if (blocks) {
> - int res = nvme_io_xfer(ns, op->lba + i, ns->prp1, blocks, write);
> - if (res < 0)
> - return DISK_RET_EBADTRACK;
> - } else {
> - int res = nvme_bounce_xfer(ns, op->lba + i, op_buf, blocks, write);
> - if (res < 0)
> - return DISK_RET_EBADTRACK;
> - blocks = res;
> - }
> -
> + int blocks = nvme_prpl_xfer(ns, op->lba + i, op_buf,
> + blocks_remaining, write);
> + if (blocks < 0)
> + return DISK_RET_EBADTRACK;
> i += blocks;
> }
>
> --
> 2.31.1
>
> _______________________________________________
> SeaBIOS mailing list -- seabios@seabios.org
> To unsubscribe send an email to seabios-leave@seabios.org
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879
_______________________________________________
SeaBIOS mailing list -- seabios@seabios.org
To unsubscribe send an email to seabios-leave@seabios.org
© 2016 - 2025 Red Hat, Inc.