From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 11F751D7982; Sun, 27 Oct 2024 14:22:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038969; cv=none; b=Ar+H5hl/RApJrhwcAbNpDUQVriV9mc55cXOvc/Pp1ySu/3IpJU5E0aLLqgDC9eF2CBR0akIWKGEK1NVC/CbSaSLAWmgCDO20ZQ588HNozTSgfkPrYidC+rkHJvPfCZNnPzULbL8TexXiFv/OWouIAR21+deO5iVAuHGroOE8Gio= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038969; c=relaxed/simple; bh=Ig+DtUOzk0wRj0C/EWVGPiHozm6V2fkXLKm30AePHJc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=E848eaVbWLg/dF2Vbi1V/H9qFbWffFFk5RkUaMhfgVRCy2KhPRKEOV0qbhl4Sezh7u9xve8NB/kagsVPGhzWLF3n7jzKpP49JMyaoVA3EGaEBBEO6IQB1HX4wY4STBLZ0CC87llyvzXXlOX+Y8gg3j4PeOoG3ywnJBoPIswH5lE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=r+zfRmfg; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="r+zfRmfg" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 270F5C4CEC3; Sun, 27 Oct 2024 14:22:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038968; bh=Ig+DtUOzk0wRj0C/EWVGPiHozm6V2fkXLKm30AePHJc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=r+zfRmfgUjBdKG0sjFH7EFWB14iiJjvEXA1eTMDEC2NPGsxgK3GK1ozAFere43Kk3 Olw8VnLKVDFHXG1xCYeSjKw7t2cyNGOGv1SB0qzIaO7Hjg9oYHlDJVIAPTr9/UTSPh iBN4VQUVqSBI7htWJrXD6Vj/LvGOfrORvquuGrGjQyhyT4sD/QndXQDzjQPGyamfu0 9FwwvLYJyeH8muOh/ZbuS9SltR1FQiEn/M9pPcFgU0ARzo04FYi2f55G1NgIoF90fU gugpxORRZ1GMtHC/dWj350ZIJ+YXqS8sneoRix0wJR8tDnk8guw7Mz4U+sUsPYyPVa 1pQyQWXPE7NcA== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 1/7] block: share more code for bio addition helpers Date: Sun, 27 Oct 2024 16:21:54 +0200 Message-ID: X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig __bio_iov_iter_get_pages currently open codes adding pages to the bio, which duplicates a lot of code from bio_add_page and bio_add_zone_append_page. Add bio_add_page_int and bio_add_zone_append_page_int helpers that pass down the same_page output argument so that __bio_iov_iter_get_pages can reuse the main add bio to page helpers. Note that I'd normally call these helpers __bio_add_page and __bio_add_zone_append_page, but the former is already taken for an exported API. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- block/bio.c | 114 +++++++++++++++++++++++----------------------------- 1 file changed, 51 insertions(+), 63 deletions(-) diff --git a/block/bio.c b/block/bio.c index ac4d77c88932..2d3bc8bfb071 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1064,6 +1064,19 @@ int bio_add_pc_page(struct request_queue *q, struct = bio *bio, } EXPORT_SYMBOL(bio_add_pc_page); =20 +static int bio_add_zone_append_page_int(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset, bool *same_page) +{ + struct block_device *bdev =3D bio->bi_bdev; + + if (WARN_ON_ONCE(bio_op(bio) !=3D REQ_OP_ZONE_APPEND)) + return 0; + if (WARN_ON_ONCE(!bdev_is_zoned(bdev))) + return 0; + return bio_add_hw_page(bdev_get_queue(bdev), bio, page, len, offset, + bdev_max_zone_append_sectors(bdev), same_page); +} + /** * bio_add_zone_append_page - attempt to add page to zone-append bio * @bio: destination bio @@ -1083,17 +1096,9 @@ EXPORT_SYMBOL(bio_add_pc_page); int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - struct request_queue *q =3D bdev_get_queue(bio->bi_bdev); bool same_page =3D false; =20 - if (WARN_ON_ONCE(bio_op(bio) !=3D REQ_OP_ZONE_APPEND)) - return 0; - - if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) - return 0; - - return bio_add_hw_page(q, bio, page, len, offset, - queue_max_zone_append_sectors(q), &same_page); + return bio_add_zone_append_page_int(bio, page, len, offset, &same_page); } EXPORT_SYMBOL_GPL(bio_add_zone_append_page); =20 @@ -1119,20 +1124,9 @@ void __bio_add_page(struct bio *bio, struct page *pa= ge, } EXPORT_SYMBOL_GPL(__bio_add_page); =20 -/** - * bio_add_page - attempt to add page(s) to bio - * @bio: destination bio - * @page: start page to add - * @len: vec entry length, may cross pages - * @offset: vec entry offset relative to @page, may cross pages - * - * Attempt to add page(s) to the bio_vec maplist. This will only fail - * if either bio->bi_vcnt =3D=3D bio->bi_max_vecs or it's a cloned bio. - */ -int bio_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) +static int bio_add_page_int(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset, bool *same_page) { - bool same_page =3D false; =20 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return 0; @@ -1141,7 +1135,7 @@ int bio_add_page(struct bio *bio, struct page *page, =20 if (bio->bi_vcnt > 0 && bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - page, len, offset, &same_page)) { + page, len, offset, same_page)) { bio->bi_iter.bi_size +=3D len; return len; } @@ -1151,6 +1145,24 @@ int bio_add_page(struct bio *bio, struct page *page, __bio_add_page(bio, page, len, offset); return len; } + +/** + * bio_add_page - attempt to add page(s) to bio + * @bio: destination bio + * @page: start page to add + * @len: vec entry length, may cross pages + * @offset: vec entry offset relative to @page, may cross pages + * + * Attempt to add page(s) to the bio_vec maplist. Will only fail if the + * bio is full, or it is incorrectly used on a cloned bio. + */ +int bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + bool same_page =3D false; + + return bio_add_page_int(bio, page, len, offset, &same_page); +} EXPORT_SYMBOL(bio_add_page); =20 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, @@ -1224,41 +1236,6 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_it= er *iter) bio_set_flag(bio, BIO_CLONED); } =20 -static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t = len, - size_t offset) -{ - bool same_page =3D false; - - if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) - return -EIO; - - if (bio->bi_vcnt > 0 && - bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - folio_page(folio, 0), len, offset, - &same_page)) { - bio->bi_iter.bi_size +=3D len; - if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) - unpin_user_folio(folio, 1); - return 0; - } - bio_add_folio_nofail(bio, folio, len, offset); - return 0; -} - -static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *fo= lio, - size_t len, size_t offset) -{ - struct request_queue *q =3D bdev_get_queue(bio->bi_bdev); - bool same_page =3D false; - - if (bio_add_hw_folio(q, bio, folio, len, offset, - queue_max_zone_append_sectors(q), &same_page) !=3D len) - return -EINVAL; - if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) - unpin_user_folio(folio, 1); - return 0; -} - static unsigned int get_contig_folio_len(unsigned int *num_pages, struct page **pages, unsigned int i, struct folio *folio, size_t left, @@ -1353,6 +1330,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, = struct iov_iter *iter) for (left =3D size, i =3D 0; left > 0; left -=3D len, i +=3D num_pages) { struct page *page =3D pages[i]; struct folio *folio =3D page_folio(page); + struct page *first_page =3D folio_page(folio, 0); + bool same_page =3D false; =20 folio_offset =3D ((size_t)folio_page_idx(folio, page) << PAGE_SHIFT) + offset; @@ -1366,12 +1345,21 @@ static int __bio_iov_iter_get_pages(struct bio *bio= , struct iov_iter *iter) folio, left, offset); =20 if (bio_op(bio) =3D=3D REQ_OP_ZONE_APPEND) { - ret =3D bio_iov_add_zone_append_folio(bio, folio, len, - folio_offset); - if (ret) + if (bio_add_zone_append_page_int(bio, first_page, len, + folio_offset, &same_page) !=3D len) { + ret =3D -EINVAL; + break; + } + } else { + if (bio_add_page_int(bio, folio_page(folio, 0), len, + folio_offset, &same_page) !=3D len) { + ret =3D -EINVAL; break; - } else - bio_iov_add_folio(bio, folio, len, folio_offset); + } + } + + if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) + unpin_user_folio(folio, 1); =20 offset =3D 0; } --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 677AC17C7CC; Sun, 27 Oct 2024 14:22:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038973; cv=none; b=RP3GQy6A5KOmZ9x1GtEUCna/XNYIwYkHMz+d11m4CeVhLmawiZexHj3f9XDrZdVtJez6N16/2y1py9no4m+So9O+FdfMN1ewOtBGjvNIn8ZWLf39yDDbz5RVT18xTLesyzCIEtQUJCV3DEzx+XwvFiUmbWtP6wSSbC/DMR0qNw0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038973; c=relaxed/simple; bh=xs6fvM8ovmipSkvK4QHs63+5zfVZxbyL5606zTIt2PA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=G7hdKp8AQhw5gXTcJeGFeInoQb0350EupQnutVKR2U2gWMfUTbFAIoQmMHvb6QL9bjy5B8Xs01YGfA50NhgYXlG7SO/PKtYwNL9QjV7WSCclfXxSGldGEQiOqPZOgRMSzVEKdudj/l5lmpgc3S98td31KoQznAHgR/mm2Yc7hAQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=t8n4IOYj; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="t8n4IOYj" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 334A2C4CEE5; Sun, 27 Oct 2024 14:22:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038973; bh=xs6fvM8ovmipSkvK4QHs63+5zfVZxbyL5606zTIt2PA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=t8n4IOYjkU+t+CyiP5M3EN8uu0+Fhy6+/w3iTz0Q2atQ8SbLeJALE1g9BoSjijj8S PhFXdSulUwQafrHIn9TUpPBoI8HneaOXwC+JJ/0p97sfDVhUC3RNK3PE04HkzCC/4J cWhbJk1wzm2NbKHYjlRlEPHMWjX6NITYLwV/dH6J33RTjaJ7fOIVz0CcVhtzsLzNex /phptkTCnClXhaWaw4eIffed64MThYl7FGZrLdyzju3U2a2z40qKT8ylqHMywRXe7K zG/3VBzIUTnaCnNjKwQ0rxt3cmXGFfMIET1vHtTft+96gIQ+u7gpzegw5AhZuk1QEm Ay7ibRcVje6YQ== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 2/7] block: don't merge different kinds of P2P transfers in a single bio Date: Sun, 27 Oct 2024 16:21:55 +0200 Message-ID: <34d44537a65aba6ede215a8ad882aeee028b423a.1730037261.git.leon@kernel.org> X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig To get out of the dma mapping helpers having to check every segment for it's P2P status, ensure that bios either contain P2P transfers or non-P2P transfers, and that a P2P bio only contains ranges from a single device. This means we do the page zone access in the bio add path where it should be still page hot, and will only have do the fairly expensive P2P topology lookup once per bio down in the dma mapping path, and only for already marked bios. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky Reviewed-by: Logan Gunthorpe --- block/bio.c | 36 +++++++++++++++++++++++++++++------- block/blk-map.c | 32 ++++++++++++++++++++++++-------- include/linux/blk_types.h | 2 ++ 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/block/bio.c b/block/bio.c index 2d3bc8bfb071..943a6d78cb3e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -928,8 +928,6 @@ static bool bvec_try_merge_page(struct bio_vec *bv, str= uct page *page, return false; if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) return false; - if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) - return false; =20 *same_page =3D ((vec_end_addr & PAGE_MASK) =3D=3D ((page_addr + off) & PAGE_MASK)); @@ -993,6 +991,14 @@ int bio_add_hw_page(struct request_queue *q, struct bi= o *bio, if (bio->bi_vcnt > 0) { struct bio_vec *bv =3D &bio->bi_io_vec[bio->bi_vcnt - 1]; =20 + /* + * When doing ZONE_DEVICE-based P2P transfers, all pages in a + * bio must be P2P pages from the same device. + */ + if ((bio->bi_opf & REQ_P2PDMA) && + !zone_device_pages_have_same_pgmap(bv->bv_page, page)) + return 0; + if (bvec_try_merge_hw_page(q, bv, page, len, offset, same_page)) { bio->bi_iter.bi_size +=3D len; @@ -1009,6 +1015,9 @@ int bio_add_hw_page(struct request_queue *q, struct b= io *bio, */ if (bvec_gap_to_prev(&q->limits, bv, offset)) return 0; + } else { + if (is_pci_p2pdma_page(page)) + bio->bi_opf |=3D REQ_P2PDMA | REQ_NOMERGE; } =20 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); @@ -1133,11 +1142,24 @@ static int bio_add_page_int(struct bio *bio, struct= page *page, if (bio->bi_iter.bi_size > UINT_MAX - len) return 0; =20 - if (bio->bi_vcnt > 0 && - bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - page, len, offset, same_page)) { - bio->bi_iter.bi_size +=3D len; - return len; + if (bio->bi_vcnt > 0) { + struct bio_vec *bv =3D &bio->bi_io_vec[bio->bi_vcnt - 1]; + + /* + * When doing ZONE_DEVICE-based P2P transfers, all pages in a + * bio must be P2P pages from the same device. + */ + if ((bio->bi_opf & REQ_P2PDMA) && + !zone_device_pages_have_same_pgmap(bv->bv_page, page)) + return 0; + + if (bvec_try_merge_page(bv, page, len, offset, same_page)) { + bio->bi_iter.bi_size +=3D len; + return len; + } + } else { + if (is_pci_p2pdma_page(page)) + bio->bi_opf |=3D REQ_P2PDMA | REQ_NOMERGE; } =20 if (bio->bi_vcnt >=3D bio->bi_max_vecs) diff --git a/block/blk-map.c b/block/blk-map.c index 0e1167b23934..03192b1ca6ea 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -568,6 +568,7 @@ static int blk_rq_map_user_bvec(struct request *rq, con= st struct iov_iter *iter) const struct queue_limits *lim =3D &q->limits; unsigned int nsegs =3D 0, bytes =3D 0; struct bio *bio; + int error; size_t i; =20 if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) @@ -588,15 +589,30 @@ static int blk_rq_map_user_bvec(struct request *rq, c= onst struct iov_iter *iter) for (i =3D 0; i < nr_segs; i++) { struct bio_vec *bv =3D &bvecs[i]; =20 - /* - * If the queue doesn't support SG gaps and adding this - * offset would create a gap, fallback to copy. - */ - if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { - blk_mq_map_bio_put(bio); - return -EREMOTEIO; + error =3D -EREMOTEIO; + if (bvprvp) { + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, fallback to copy. + */ + if (bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) + goto put_bio; + + /* + * When doing ZONE_DEVICE-based P2P transfers, all pages + * in a bio must be P2P pages, and from the same device. + */ + if ((bio->bi_opf & REQ_P2PDMA) && + zone_device_pages_have_same_pgmap(bvprvp->bv_page, + bv->bv_page)) + goto put_bio; + } else { + if (is_pci_p2pdma_page(bv->bv_page)) + bio->bi_opf |=3D REQ_P2PDMA | REQ_NOMERGE; } + /* check full condition */ + error =3D -EINVAL; if (nsegs >=3D nr_segs || bytes > UINT_MAX - bv->bv_len) goto put_bio; if (bytes + bv->bv_len > nr_iter) @@ -611,7 +627,7 @@ static int blk_rq_map_user_bvec(struct request *rq, con= st struct iov_iter *iter) return 0; put_bio: blk_mq_map_bio_put(bio); - return -EINVAL; + return error; } =20 /** diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dce7615c35e7..94cf146e8ce6 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -378,6 +378,7 @@ enum req_flag_bits { __REQ_DRV, /* for driver use */ __REQ_FS_PRIVATE, /* for file system (submitter) use */ __REQ_ATOMIC, /* for atomic write operations */ + __REQ_P2PDMA, /* contains P2P DMA pages */ /* * Command specific flags, keep last: */ @@ -410,6 +411,7 @@ enum req_flag_bits { #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) +#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA) =20 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) =20 --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 309CB1DCB0D; Sun, 27 Oct 2024 14:23:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038997; cv=none; b=vCVICtXpRCJn+HQ8Flg846RUiKtiB6BVBxGhsnFcxnltJTx2wzic+CXUNcM2bjfXuSf/lcHmthHSdT8wkBBfhGeyj25fDvbZIfTVJMwOdxVOxYePQ2xGN2OpHRaKxdKobuu6jEpZlMZoIO6azIXFnO1r1OXrZRn375UOC7HQTd8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038997; c=relaxed/simple; bh=ZAkEvCDpOQpT4FqrkcEPmIofeDg0rb/b5+lEupSm25s=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=VC74ksCS/CoJ9Wx+FeY81aKcljslT/YyaDwtmUgG+4k2fCpHyI0+aCmLt7kQMEElPcFEfv3ofpF3b7fF02PFmw6E3N1W2t2ItC80aRSzI9BFQ1y2T+Dsgzpc5C6kCmJ0CMlNTbUZYO/gDy9kb6miICcLIsCBWDt9h70g7NNhHIw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=GU2WiE85; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="GU2WiE85" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 38793C4CEC3; Sun, 27 Oct 2024 14:23:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038997; bh=ZAkEvCDpOQpT4FqrkcEPmIofeDg0rb/b5+lEupSm25s=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=GU2WiE85QkyrGZabT77HMzH4RzcXBtUoK7DX8razab6E7GlTeZ2LQTeNI7ZRCEXcF +9yMHm9zDo08nuNcPNZNSsuaYkBUL9ipGLwLInfLmk0HejWEa8WPu1R8UqdKOsPV74 Z5wpY7UYyju99OwvD8w7GPbHNbCOK/zLgPk0s2GMr97M3wN8lKofvcDQjgjNlAIopH eIYUU6O634Ehr1WlX/W3GKJbeoFI2GpQCheDt+O+VjGAz5sar3RgcEymVxHQmnOdc4 uO+VYU+gZUk1F3VsP3juUi2vi+UCZ40ddUDVpK0TRukr1wrJIDD2IoyIOcFlFa0aTT I1l2OPb8MTV4g== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 3/7] blk-mq: add a dma mapping iterator Date: Sun, 27 Oct 2024 16:21:56 +0200 Message-ID: <9b7c6203fbbae90821f2220dba1bd600e3bd23ba.1730037261.git.leon@kernel.org> X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig blk_rq_map_sg is maze of nested loops. Untangle it by creating an iterator that returns [paddr,len] tuples for DMA mapping, and then implement the DMA logic on top of this. This not only removes code at the source level, but also generates nicer binary code: $ size block/blk-merge.o.* text data bss dec hex filename 10001 432 0 10433 28c1 block/blk-merge.o.new 10317 468 0 10785 2a21 block/blk-merge.o.old Last but not least it will be used as a building block for a new DMA mapping helper that doesn't rely on struct scatterlist. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- block/blk-merge.c | 182 ++++++++++++++++++++-------------------------- 1 file changed, 77 insertions(+), 105 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index ad763ec313b6..b63fd754a5de 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -451,137 +451,109 @@ unsigned int blk_recalc_rq_segments(struct request = *rq) return nr_phys_segs; } =20 -static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, - struct scatterlist *sglist) +struct phys_vec { + phys_addr_t paddr; + u32 len; +}; + +static bool blk_map_iter_next(struct request *req, + struct req_iterator *iter, struct phys_vec *vec) { - if (!*sg) - return sglist; + unsigned int max_size; + struct bio_vec bv; =20 /* - * If the driver previously mapped a shorter list, we could see a - * termination bit prematurely unless it fully inits the sg table - * on each mapping. We KNOW that there must be more entries here - * or the driver would be buggy, so force clear the termination bit - * to avoid doing a full sg_init_table() in drivers for each command. + * For special payload requests there only is a single segment. Return + * it now and make sure blk_phys_iter_next stop iterating. */ - sg_unmark_end(*sg); - return sg_next(*sg); -} + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + if (!iter->bio) + return false; + vec->paddr =3D bvec_phys(&req->special_vec); + vec->len =3D req->special_vec.bv_len; + iter->bio =3D NULL; + return true; + } =20 -static unsigned blk_bvec_map_sg(struct request_queue *q, - struct bio_vec *bvec, struct scatterlist *sglist, - struct scatterlist **sg) -{ - unsigned nbytes =3D bvec->bv_len; - unsigned nsegs =3D 0, total =3D 0; + if (!iter->iter.bi_size) + return false; =20 - while (nbytes > 0) { - unsigned offset =3D bvec->bv_offset + total; - unsigned len =3D get_max_segment_size(&q->limits, - bvec_phys(bvec) + total, nbytes); - struct page *page =3D bvec->bv_page; + bv =3D mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + vec->paddr =3D bvec_phys(&bv); + max_size =3D get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); + bv.bv_len =3D min(bv.bv_len, max_size); + bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); =20 - /* - * Unfortunately a fair number of drivers barf on scatterlists - * that have an offset larger than PAGE_SIZE, despite other - * subsystems dealing with that invariant just fine. For now - * stick to the legacy format where we never present those from - * the block layer, but the code below should be removed once - * these offenders (mostly MMC/SD drivers) are fixed. - */ - page +=3D (offset >> PAGE_SHIFT); - offset &=3D ~PAGE_MASK; + /* + * If we are entirely done with this bi_io_vec entry, check if the next + * one could be merged into it. This typically happens when moving to + * the next bio, but some callers also don't pack bvecs tight. + */ + while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { + struct bio_vec next; + + if (!iter->iter.bi_size) { + if (!iter->bio->bi_next) + break; + iter->bio =3D iter->bio->bi_next; + iter->iter =3D iter->bio->bi_iter; + } =20 - *sg =3D blk_next_sg(sg, sglist); - sg_set_page(*sg, page, len, offset); + next =3D mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + if (bv.bv_len + next.bv_len > max_size || + !biovec_phys_mergeable(req->q, &bv, &next)) + break; =20 - total +=3D len; - nbytes -=3D len; - nsegs++; + bv.bv_len +=3D next.bv_len; + bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); } =20 - return nsegs; + vec->len =3D bv.bv_len; + return true; } =20 -static inline int __blk_bvec_map_sg(struct bio_vec bv, - struct scatterlist *sglist, struct scatterlist **sg) -{ - *sg =3D blk_next_sg(sg, sglist); - sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); - return 1; -} +#define blk_phys_to_page(_paddr) \ + (pfn_to_page(__phys_to_pfn(_paddr))) =20 -/* only try to merge bvecs into one sg if they are from two bios */ -static inline bool -__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, - struct bio_vec *bvprv, struct scatterlist **sg) +static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, + struct scatterlist *sglist) { - - int nbytes =3D bvec->bv_len; - if (!*sg) - return false; - - if ((*sg)->length + nbytes > queue_max_segment_size(q)) - return false; - - if (!biovec_phys_mergeable(q, bvprv, bvec)) - return false; - - (*sg)->length +=3D nbytes; - - return true; -} - -static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist, - struct scatterlist **sg) -{ - struct bio_vec bvec, bvprv =3D { NULL }; - struct bvec_iter iter; - int nsegs =3D 0; - bool new_bio =3D false; - - for_each_bio(bio) { - bio_for_each_bvec(bvec, bio, iter) { - /* - * Only try to merge bvecs from two bios given we - * have done bio internal merge when adding pages - * to bio - */ - if (new_bio && - __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) - goto next_bvec; - - if (bvec.bv_offset + bvec.bv_len <=3D PAGE_SIZE) - nsegs +=3D __blk_bvec_map_sg(bvec, sglist, sg); - else - nsegs +=3D blk_bvec_map_sg(q, &bvec, sglist, sg); - next_bvec: - new_bio =3D false; - } - if (likely(bio->bi_iter.bi_size)) { - bvprv =3D bvec; - new_bio =3D true; - } - } + return sglist; =20 - return nsegs; + /* + * If the driver previously mapped a shorter list, we could see a + * termination bit prematurely unless it fully inits the sg table + * on each mapping. We KNOW that there must be more entries here + * or the driver would be buggy, so force clear the termination bit + * to avoid doing a full sg_init_table() in drivers for each command. + */ + sg_unmark_end(*sg); + return sg_next(*sg); } =20 /* - * map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_phys_segments entries + * Map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries. */ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist, struct scatterlist **last_sg) { + struct req_iterator iter =3D { + .bio =3D rq->bio, + .iter =3D rq->bio->bi_iter, + }; + struct phys_vec vec; int nsegs =3D 0; =20 - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - nsegs =3D __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); - else if (rq->bio) - nsegs =3D __blk_bios_map_sg(q, rq->bio, sglist, last_sg); + while (blk_map_iter_next(rq, &iter, &vec)) { + struct page *page =3D blk_phys_to_page(vec.paddr); + unsigned int offset =3D offset_in_page(vec.paddr); + + *last_sg =3D blk_next_sg(last_sg, sglist); + sg_set_page(*last_sg, page, vec.len, offset); + nsegs++; + } =20 if (*last_sg) sg_mark_end(*last_sg); --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1A68B17D358; Sun, 27 Oct 2024 14:23:01 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038981; cv=none; b=RnEYmrzz9jNw4QsTGzs6RtIWywl45RgNlgfg0Ew7tfUNM1IHb4oh/qLpX7OLBKKOX1qP72nYHHX5x1NWvPovy7J6I4iGGY2bTsQw8fy8TEdXqm+d+LRIAm64SCvcs4oi4LjhGFAzoSt2ce5TYm2jIPbDfV0w8+fiEUmbngSw+Bw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038981; c=relaxed/simple; bh=9DR3LFJjJhjhbu472Va//Xe8BphJFty4JMYW8TX4X1I=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=K0tkOXPWv+A++VNSH1f8v79b/jpSH+ez1Q8BO+BvcTDPrq8NNoycyb+0ejz+XXnlUQo2sM1HvugnT2ahWg1H/xBJt4HhGHPajjTE5T5CX+XmmHq7OgO+/XtDCyXQw84oiCOr3tMb+Lzr5a8mmQUFiP7ZJqz7fg5Mbvxf71aRzUk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=hPuXERdP; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="hPuXERdP" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2BD05C4CEE5; Sun, 27 Oct 2024 14:23:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038981; bh=9DR3LFJjJhjhbu472Va//Xe8BphJFty4JMYW8TX4X1I=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=hPuXERdPFKyRKnnAub1JPkt8oeVENyiRQqNq5FYQjoxSMzpqfJc+ZPwdfx3HpJ2ko ymCeTGgIafCN3eHEOH4LA53Ephvv179t+7iRHX9jMk5+g3q6etTegx4HBpUpGsZJtJ esLWLUFB4m/qz8vXShOVP9ErGfuP6u+imE23Bd1qqQWqmU3ohI2AWmmjIOz2SdrPeN ALS21r1jA5TJ6duS26kzeQfH3r1ude1LVXDVyvi+e2har7IMxbiyHVvpQ/SoDTkQ44 QBAhVRb6Cbecg847TMNxQ9odCamJhWK3ZUPTmyBaq1+638ICOVMOp/C6RWcDXPstKE 3UzxbmWuiI4rA== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 4/7] blk-mq: add scatterlist-less DMA mapping helpers Date: Sun, 27 Oct 2024 16:21:57 +0200 Message-ID: <383557d0fa1aa393dbab4e1daec94b6cced384ab.1730037261.git.leon@kernel.org> X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig Add a new blk_rq_dma_map / blk_rq_dma_unmap pair that does away with the wasteful scatterlist structure. Instead it uses the mapping iterator to either add segments to the IOVA for IOMMU operations, or just maps them one by one for the direct mapping. For the IOMMU case instead of a scatterlist with an entry for each segment, only a single [dma_addr,len] pair needs to be stored for processing a request, and for the direct mapping the per-segment allocation shrinks from [page,offset,len,dma_addr,dma_len] to just [dma_addr,len]. The major downside of this API is that the IOVA collapsing only works when the driver sets a virt_boundary that matches the IOMMU granule. Note that struct blk_dma_vec, struct blk_dma_mapping and blk_rq_dma_unmap aren't really block specific, but for they are kept with the only mapping routine to keep things simple. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- block/blk-merge.c | 163 +++++++++++++++++++++++++++++++++++++ include/linux/blk-mq-dma.h | 64 +++++++++++++++ 2 files changed, 227 insertions(+) create mode 100644 include/linux/blk-mq-dma.h diff --git a/block/blk-merge.c b/block/blk-merge.c index b63fd754a5de..77e5a3d208fc 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -515,6 +517,167 @@ static bool blk_map_iter_next(struct request *req, #define blk_phys_to_page(_paddr) \ (pfn_to_page(__phys_to_pfn(_paddr))) =20 +/* + * The IOVA-based DMA API wants to be able to coalesce at the minimal IOMM= U page + * size granularity (which is guaranteed to be <=3D PAGE_SIZE and usually = 4k), so + * we need to ensure our segments are aligned to this as well. + * + * Note that there is no point in using the slightly more complicated IOVA= based + * path for single segment mappings. + */ +static inline bool blk_can_dma_map_iova(struct request *req, + struct device *dma_dev) +{ + return !((queue_virt_boundary(req->q) + 1) & + dma_get_merge_boundary(dma_dev)); +} + +static bool blk_dma_map_bus(struct request *req, struct device *dma_dev, + struct blk_dma_iter *iter, struct phys_vec *vec) +{ + iter->addr =3D pci_p2pdma_bus_addr_map(&iter->p2pdma, vec->paddr); + iter->len =3D vec->len; + return true; +} + +static bool blk_dma_map_direct(struct request *req, struct device *dma_dev, + struct blk_dma_iter *iter, struct phys_vec *vec) +{ + iter->addr =3D dma_map_page(dma_dev, blk_phys_to_page(vec->paddr), + offset_in_page(vec->paddr), vec->len, rq_dma_dir(req)); + if (dma_mapping_error(dma_dev, iter->addr)) { + iter->status =3D BLK_STS_RESOURCE; + return false; + } + iter->len =3D vec->len; + return true; +} + +static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_de= v, + struct dma_iova_state *state, struct blk_dma_iter *iter, + struct phys_vec *vec) +{ + enum dma_data_direction dir =3D rq_dma_dir(req); + unsigned int mapped =3D 0; + int error =3D 0; + + iter->addr =3D state->addr; + iter->len =3D dma_iova_size(state); + + do { + error =3D dma_iova_link(dma_dev, state, vec->paddr, mapped, + vec->len, dir, 0); + if (error) + break; + mapped +=3D vec->len; + } while (blk_map_iter_next(req, &iter->iter, vec)); + + error =3D dma_iova_sync(dma_dev, state, 0, mapped, error); + if (error) { + iter->status =3D errno_to_blk_status(error); + return false; + } + + return true; +} + +/** + * blk_rq_dma_map_iter_start - map the first DMA segment for a request + * @req: request to map + * @dma_dev: device to map to + * @state: DMA IOVA state + * @iter: block layer DMA iterator + * + * Start DMA mapping @req to @dma_dev. @state and @iter are provided by t= he + * caller and don't need to be initialized. @state needs to be stored for= use + * at unmap time, @iter is only needed at map time. + * + * Returns %false if there is no segment to map, including due to an error= , or + * %true it it did map a segment. + * + * If a segment was mapped, the DMA address for it is returned in @iter.ad= dr and + * the length in @iter.len. If no segment was mapped the status code is + * returned in @iter.status. + * + * The caller can call blk_rq_dma_map_coalesce() to check if further segme= nts + * need to be mapped after this, or go straight to blk_rq_dma_map_iter_nex= t() + * to try to map the following segments. + */ +bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter) +{ + unsigned int total_len =3D blk_rq_payload_bytes(req); + struct phys_vec vec; + + iter->iter.bio =3D req->bio; + iter->iter.iter =3D req->bio->bi_iter; + memset(&iter->p2pdma, 0, sizeof(iter->p2pdma)); + iter->status =3D BLK_STS_OK; + + /* + * Grab the first segment ASAP because we'll need it to check for P2P + * transfers. + */ + if (!blk_map_iter_next(req, &iter->iter, &vec)) + return false; + + if (IS_ENABLED(CONFIG_PCI_P2PDMA) && (req->cmd_flags & REQ_P2PDMA)) { + switch (pci_p2pdma_state(&iter->p2pdma, dma_dev, + blk_phys_to_page(vec.paddr))) { + case PCI_P2PDMA_MAP_BUS_ADDR: + return blk_dma_map_bus(req, dma_dev, iter, &vec); + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + /* + * P2P transfers through the host bridge are treated the + * same as non-P2P transfers below and during unmap. + */ + req->cmd_flags &=3D ~REQ_P2PDMA; + break; + default: + iter->status =3D BLK_STS_INVAL; + return false; + } + } + + if (blk_can_dma_map_iova(req, dma_dev) && + dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len)) + return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec); + return blk_dma_map_direct(req, dma_dev, iter, &vec); +} +EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start); + +/** + * blk_rq_dma_map_iter_next - map the next DMA segment for a request + * @req: request to map + * @dma_dev: device to map to + * @state: DMA IOVA state + * @iter: block layer DMA iterator + * + * Iterate to the next mapping after a previous call to + * blk_rq_dma_map_iter_start(). See there for a detailed description of t= he + * arguments. + * + * Returns %false if there is no segment to map, including due to an error= , or + * %true it it did map a segment. + * + * If a segment was mapped, the DMA address for it is returned in @iter.ad= dr and + * the length in @iter.len. If no segment was mapped the status code is + * returned in @iter.status. + */ +bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter) +{ + struct phys_vec vec; + + if (!blk_map_iter_next(req, &iter->iter, &vec)) + return false; + + if (iter->p2pdma.map =3D=3D PCI_P2PDMA_MAP_BUS_ADDR) + return blk_dma_map_bus(req, dma_dev, iter, &vec); + return blk_dma_map_direct(req, dma_dev, iter, &vec); +} +EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_next); + static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist) { diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h new file mode 100644 index 000000000000..f1dae14e8203 --- /dev/null +++ b/include/linux/blk-mq-dma.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BLK_MQ_DMA_H +#define BLK_MQ_DMA_H + +#include +#include + +struct blk_dma_iter { + /* Output address range for this iteration */ + dma_addr_t addr; + u32 len; + + /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */ + blk_status_t status; + + /* Internal to blk_rq_dma_map_iter_* */ + struct req_iterator iter; + struct pci_p2pdma_map_state p2pdma; +}; + +bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter); +bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter); + +/** + * blk_rq_dma_map_coalesce - were all segments coalesced? + * @state: DMA state to check + * + * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a + * single DMA range. + */ +static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state) +{ + return dma_use_iova(state); +} + +/** + * blk_rq_dma_map_coalesce - try to DMA unmap a request + * @req: request to unmap + * @dma_dev: device to unmap from + * @state: DMA IOVA state + * + * Returns %false if the callers needs to manually unmap every DMA segment + * mapped using @iter or %true if no work is left to be done. + */ +static inline bool blk_rq_dma_unmap(struct request *req, struct device *dm= a_dev, + struct dma_iova_state *state) +{ + if (req->cmd_flags & REQ_P2PDMA) + return true; + + if (dma_use_iova(state)) { + dma_iova_destroy(dma_dev, state, rq_dma_dir(req), 0); + return true; + } + + if (!dma_need_unmap(dma_dev)) + return true; + + return false; +} + +#endif /* BLK_MQ_DMA_H */ --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1AEC71DA61D; Sun, 27 Oct 2024 14:23:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038985; cv=none; b=ScuX6eGFa49JR7MZsBy9Gml9TIpRPjzebJA+EiYAeRoyG/VP4rB7aT+FVDJ4HH9XMnM60dscr+h2xLZkJF810hTsWttopY+wmZTEdPuSG4htsFxsQVflfSlmCAyUEBdHsY3NdFxYhdcqHWVUYzQH3Bv7lZstUkmtzzUvaU7rkos= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038985; c=relaxed/simple; bh=tSv2Q/UIDmsmBR/B+0LlfVehWktc2fzMNx0dgOtrb5I=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=LY41dlfondHJflQD1rd3R0BdhzwTb5sJcfQsnApfRBV6P1wj5pi7g1t3ZtwKSJFTfDWW//i2yxdS+tXqeZnNO2eld53TZUMFcJEDAjjNFQhfTfNyN58N+ZtM0HW7aMLT6E+O4Ya5WhShwRbfY3HIIMWltF/Jlgsujfvuhpl8d3w= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=fcGU8E7n; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="fcGU8E7n" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 30CB6C4CEC3; Sun, 27 Oct 2024 14:23:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038985; bh=tSv2Q/UIDmsmBR/B+0LlfVehWktc2fzMNx0dgOtrb5I=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=fcGU8E7nqAZ/TN3lckAgvc+gp+UfN38AiQA0fW96+skp6qRwatz6+Tp+Y9GZAXoZv 32gYhCzvwkTALLTPnjKbgOaQPHt8sGsNEdt9KJbNhKApAh7jv24SRFA3hGiLfrQYgb G/Po80vNtP9F8BeE8e4uWTkMLLm5fivWyKQHJxEIt/mCjmNIb97v/FnvfgqfoT4TTh rz7W2/MVey+NJeDPA+D1i7fdC/b2byuhn0NKXfgpQS+wRQ6AryyTiMSut6bA7FGvnc bh/vGMOJ+JsUoHC7zVNhazkcjoWtkM0V5Idz20gkDCG7oEUw/cZU4OFtBrphpw8Tfp o+KYa1jtD4fhg== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 5/7] nvme-pci: remove struct nvme_descriptor Date: Sun, 27 Oct 2024 16:21:58 +0200 Message-ID: <31fe216877a270c00bc79cddace3a9f4b3ade50c.1730037261.git.leon@kernel.org> X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig There is no real point in having a union of two pointer types here, just use a void pointer as we mix and match types between the arms of the union between the allocation and freeing side already. Also rename the nr_allocations field to nr_descriptors to better describe what it does. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- drivers/nvme/host/pci.c | 51 ++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4b9fda0b1d9a..ba077a42cbba 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -43,7 +43,8 @@ */ #define NVME_MAX_KB_SZ 8192 #define NVME_MAX_SEGS 128 -#define NVME_MAX_NR_ALLOCATIONS 5 + +#define NVME_MAX_NR_DESCRIPTORS 5 =20 static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -216,28 +217,20 @@ struct nvme_queue { struct completion delete_done; }; =20 -union nvme_descriptor { - struct nvme_sgl_desc *sg_list; - __le64 *prp_list; -}; - /* * The nvme_iod describes the data in an I/O. - * - * The sg pointer contains the list of PRP/SGL chunk allocations in additi= on - * to the actual struct scatterlist. */ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; bool aborted; - s8 nr_allocations; /* PRP list pool allocations. 0 means small - pool in use */ + /* # of PRP/SGL descriptors: (0 for small pool) */ + s8 nr_descriptors; unsigned int dma_len; /* length of single DMA segment mapping */ dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; - union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; + void *descriptors[NVME_MAX_NR_DESCRIPTORS]; }; =20 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) @@ -528,8 +521,8 @@ static void nvme_free_prps(struct nvme_dev *dev, struct= request *req) dma_addr_t dma_addr =3D iod->first_dma; int i; =20 - for (i =3D 0; i < iod->nr_allocations; i++) { - __le64 *prp_list =3D iod->list[i].prp_list; + for (i =3D 0; i < iod->nr_descriptors; i++) { + __le64 *prp_list =3D iod->descriptors[i]; dma_addr_t next_dma_addr =3D le64_to_cpu(prp_list[last_prp]); =20 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); @@ -551,11 +544,11 @@ static void nvme_unmap_data(struct nvme_dev *dev, str= uct request *req) =20 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); =20 - if (iod->nr_allocations =3D=3D 0) - dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, + if (iod->nr_descriptors =3D=3D 0) + dma_pool_free(dev->prp_small_pool, iod->descriptors[0], iod->first_dma); - else if (iod->nr_allocations =3D=3D 1) - dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, + else if (iod->nr_descriptors =3D=3D 1) + dma_pool_free(dev->prp_page_pool, iod->descriptors[0], iod->first_dma); else nvme_free_prps(dev, req); @@ -613,18 +606,18 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_d= ev *dev, nprps =3D DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); if (nprps <=3D (256 / 8)) { pool =3D dev->prp_small_pool; - iod->nr_allocations =3D 0; + iod->nr_descriptors =3D 0; } else { pool =3D dev->prp_page_pool; - iod->nr_allocations =3D 1; + iod->nr_descriptors =3D 1; } =20 prp_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) { - iod->nr_allocations =3D -1; + iod->nr_descriptors =3D -1; return BLK_STS_RESOURCE; } - iod->list[0].prp_list =3D prp_list; + iod->descriptors[0] =3D prp_list; iod->first_dma =3D prp_dma; i =3D 0; for (;;) { @@ -633,7 +626,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev= *dev, prp_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; - iod->list[iod->nr_allocations++].prp_list =3D prp_list; + iod->descriptors[iod->nr_descriptors++] =3D prp_list; prp_list[0] =3D old_prp_list[i - 1]; old_prp_list[i - 1] =3D cpu_to_le64(prp_dma); i =3D 1; @@ -703,19 +696,19 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_d= ev *dev, =20 if (entries <=3D (256 / sizeof(struct nvme_sgl_desc))) { pool =3D dev->prp_small_pool; - iod->nr_allocations =3D 0; + iod->nr_descriptors =3D 0; } else { pool =3D dev->prp_page_pool; - iod->nr_allocations =3D 1; + iod->nr_descriptors =3D 1; } =20 sg_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); if (!sg_list) { - iod->nr_allocations =3D -1; + iod->nr_descriptors =3D -1; return BLK_STS_RESOURCE; } =20 - iod->list[0].sg_list =3D sg_list; + iod->descriptors[0] =3D sg_list; iod->first_dma =3D sgl_dma; =20 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); @@ -841,7 +834,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, = struct request *req) blk_status_t ret; =20 iod->aborted =3D false; - iod->nr_allocations =3D -1; + iod->nr_descriptors =3D -1; iod->sgt.nents =3D 0; =20 ret =3D nvme_setup_cmd(req->q->queuedata, req); @@ -3626,7 +3619,7 @@ static int __init nvme_init(void) BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); - BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); + BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS); =20 return pci_register_driver(&nvme_driver); } --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1B68117E017; Sun, 27 Oct 2024 14:23:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038989; cv=none; b=qh9piPMx7JygPGCTCiD7kEqQqI/BBumC+GKfE2tCv8bL++Dy6/I6qSKyDnfNov/ZvSmed7HUzPLWN9982+th4L7/I1+G+lb9jXT/qzb5g6NN46M/OjBkFvymdpZ7pk/UbOMyxUdxbINYfHpHmwDc9bihdQkC0pYbjuk6k3fhdBY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038989; c=relaxed/simple; bh=ApIelQA2oAfFsx+G9CAFnUZptSSapywa7pTJgnwTMSc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Ccv2A8CMMNvx3ajGDc8EmDuDGHBURlT+FrC266V/4KTfxNh50ByTdMCuPSJY3cGldRmMIJfHEradprB1vxCeYbxBfv6ULytlCQTYh2mZZIN3xrWEiXNxP387e1w9PsYQfzlw3ATLzWf27qQQ224YBq5ohIhaLZlreBbgr6t9DSY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=eiJIF0ZF; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="eiJIF0ZF" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 29F16C4CEC3; Sun, 27 Oct 2024 14:23:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038989; bh=ApIelQA2oAfFsx+G9CAFnUZptSSapywa7pTJgnwTMSc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=eiJIF0ZFLHGKP8f4jSGly33bDKFCG2di1eSRLyav3A3c0jDbi3wFuI/4gTIyAJqsP 6BsfaifyFf9MsSzeY+5fbfpU0b6hcssFg4v/xfeYYw6VwQWGLLeo3B/keDpUA4VwIn +NmMzdDiyGF3OZZgTiwOFjAj7pV52518FgsFTHu7WSP5J++G+dgRues27OYr/5xhtM pnBlX75XHyWUZvAl9+fd09fuOyo/S0uvjf0rdnRkSmwZeoJZcK471IFO5l/4Q3+qwV p+epEDOkoend5m1LL4+a9d5gYc5UJxwnsS3xd1VeOpI9zP/h1Z21Z2tQwfEqqppaO5 FYRlcndyyO9gg== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 6/7] nvme-pci: use a better encoding for small prp pool allocations Date: Sun, 27 Oct 2024 16:21:59 +0200 Message-ID: X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig There is plenty of unused space in the iod next to nr_descriptors. Add a separate bool (which could be condensed to a single bit once we start running out of space) to encode that the transfer is using the full page sized pool, and use a normal 0..n count for the number of descriptors. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- drivers/nvme/host/pci.c | 85 +++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 45 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ba077a42cbba..79cd65a5f311 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -45,6 +45,7 @@ #define NVME_MAX_SEGS 128 =20 #define NVME_MAX_NR_DESCRIPTORS 5 +#define NVME_SMALL_DESCRIPTOR_SIZE 256 =20 static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -224,8 +225,8 @@ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; bool aborted; - /* # of PRP/SGL descriptors: (0 for small pool) */ - s8 nr_descriptors; + u8 nr_descriptors; /* # of PRP/SGL descriptors */ + bool large_descriptors; /* uses the full page sized descriptor pool */ unsigned int dma_len; /* length of single DMA segment mapping */ dma_addr_t first_dma; dma_addr_t meta_dma; @@ -514,13 +515,27 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev = *dev, struct request *req, return true; } =20 -static void nvme_free_prps(struct nvme_dev *dev, struct request *req) +static inline struct dma_pool *nvme_dma_pool(struct nvme_dev *dev, + struct nvme_iod *iod) +{ + if (iod->large_descriptors) + return dev->prp_page_pool; + return dev->prp_small_pool; +} + +static void nvme_free_descriptors(struct nvme_dev *dev, struct request *re= q) { const int last_prp =3D NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); dma_addr_t dma_addr =3D iod->first_dma; int i; =20 + if (iod->nr_descriptors =3D=3D 1) { + dma_pool_free(nvme_dma_pool(dev, iod), iod->descriptors[0], + dma_addr); + return; + } + for (i =3D 0; i < iod->nr_descriptors; i++) { __le64 *prp_list =3D iod->descriptors[i]; dma_addr_t next_dma_addr =3D le64_to_cpu(prp_list[last_prp]); @@ -543,15 +558,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, stru= ct request *req) WARN_ON_ONCE(!iod->sgt.nents); =20 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); - - if (iod->nr_descriptors =3D=3D 0) - dma_pool_free(dev->prp_small_pool, iod->descriptors[0], - iod->first_dma); - else if (iod->nr_descriptors =3D=3D 1) - dma_pool_free(dev->prp_page_pool, iod->descriptors[0], - iod->first_dma); - else - nvme_free_prps(dev, req); + nvme_free_descriptors(dev, req); mempool_free(iod->sgt.sgl, dev->iod_mempool); } =20 @@ -573,7 +580,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev= *dev, struct request *req, struct nvme_rw_command *cmnd) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - struct dma_pool *pool; int length =3D blk_rq_payload_bytes(req); struct scatterlist *sg =3D iod->sgt.sgl; int dma_len =3D sg_dma_len(sg); @@ -581,7 +587,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev= *dev, int offset =3D dma_addr & (NVME_CTRL_PAGE_SIZE - 1); __le64 *prp_list; dma_addr_t prp_dma; - int nprps, i; + int i; =20 length -=3D (NVME_CTRL_PAGE_SIZE - offset); if (length <=3D 0) { @@ -603,27 +609,23 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_d= ev *dev, goto done; } =20 - nprps =3D DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); - if (nprps <=3D (256 / 8)) { - pool =3D dev->prp_small_pool; - iod->nr_descriptors =3D 0; - } else { - pool =3D dev->prp_page_pool; - iod->nr_descriptors =3D 1; - } + if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) > + NVME_SMALL_DESCRIPTOR_SIZE / sizeof(__le64)) + iod->large_descriptors =3D true; =20 - prp_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); - if (!prp_list) { - iod->nr_descriptors =3D -1; + prp_list =3D dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, + &prp_dma); + if (!prp_list) return BLK_STS_RESOURCE; - } - iod->descriptors[0] =3D prp_list; + iod->descriptors[iod->nr_descriptors++] =3D prp_list; iod->first_dma =3D prp_dma; i =3D 0; for (;;) { if (i =3D=3D NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list =3D prp_list; - prp_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + + prp_list =3D dma_pool_alloc(dev->prp_page_pool, + GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; iod->descriptors[iod->nr_descriptors++] =3D prp_list; @@ -650,7 +652,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev= *dev, cmnd->dptr.prp2 =3D cpu_to_le64(iod->first_dma); return BLK_STS_OK; free_prps: - nvme_free_prps(dev, req); + nvme_free_descriptors(dev, req); return BLK_STS_RESOURCE; bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), @@ -679,7 +681,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev= *dev, struct request *req, struct nvme_rw_command *cmd) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - struct dma_pool *pool; struct nvme_sgl_desc *sg_list; struct scatterlist *sg =3D iod->sgt.sgl; unsigned int entries =3D iod->sgt.nents; @@ -694,21 +695,13 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_d= ev *dev, return BLK_STS_OK; } =20 - if (entries <=3D (256 / sizeof(struct nvme_sgl_desc))) { - pool =3D dev->prp_small_pool; - iod->nr_descriptors =3D 0; - } else { - pool =3D dev->prp_page_pool; - iod->nr_descriptors =3D 1; - } + if (entries > NVME_SMALL_DESCRIPTOR_SIZE / sizeof(*sg_list)) + iod->large_descriptors =3D true; =20 - sg_list =3D dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); - if (!sg_list) { - iod->nr_descriptors =3D -1; + sg_list =3D dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, &sgl_dma); + if (!sg_list) return BLK_STS_RESOURCE; - } - - iod->descriptors[0] =3D sg_list; + iod->descriptors[iod->nr_descriptors++] =3D sg_list; iod->first_dma =3D sgl_dma; =20 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); @@ -834,7 +827,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, = struct request *req) blk_status_t ret; =20 iod->aborted =3D false; - iod->nr_descriptors =3D -1; + iod->nr_descriptors =3D 0; + iod->large_descriptors =3D false; iod->sgt.nents =3D 0; =20 ret =3D nvme_setup_cmd(req->q->queuedata, req); @@ -2694,7 +2688,8 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev) =20 /* Optimisation for I/Os between 4k and 128k */ dev->prp_small_pool =3D dma_pool_create("prp list 256", dev->dev, - 256, 256, 0); + NVME_SMALL_DESCRIPTOR_SIZE, + NVME_SMALL_DESCRIPTOR_SIZE, 0); if (!dev->prp_small_pool) { dma_pool_destroy(dev->prp_page_pool); return -ENOMEM; --=20 2.46.2 From nobody Mon Nov 25 12:33:42 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 17C3917E8E5; Sun, 27 Oct 2024 14:23:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038993; cv=none; b=lQ1elCeqCIiwaxNd6jh/oD0JTNoU8WjfTAkqtsPCF1cZEtZtKszQX7eYiyliKirYtmQE8zhqYQ7RthFSnWMhYn7N9qvsCWZRua9EzN/a1pdgYIjW65wQlkBSFHsmQV1GJN6+im3JIQ0wVSRejrIxs+nQBh2UheCEW3rUlPHqLic= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730038993; c=relaxed/simple; bh=mGSLC0i0j/C6wQh6DJGfX8WLshJrc4MOVDi7g2OudPw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=EhibGHP1I1D27unYWOHSyixO+s8BWK/qkiYXhjIODHcwQYTvuE7JvhGbVgT6sHd09ObAZrDNYaZlbTVbLkUbFFzpmr2AFit5+kbycJSEGrTgJ92SGV3ZeinuMShY6ykdhc5K30NWK3wQTaoLelx+hdgBePvs9ePlksAW++e57eA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ptsQ2SUL; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ptsQ2SUL" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 29A8FC4CEE6; Sun, 27 Oct 2024 14:23:12 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730038993; bh=mGSLC0i0j/C6wQh6DJGfX8WLshJrc4MOVDi7g2OudPw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ptsQ2SULkjspInuBxCoAkFPYY7jouQyTCXw99hTQsjsyguomCiYwPAhX2ulgbDfVD v1Q4Ve/tne6hzcG5k0jykyby4bLlF2mRUXpswpsqXN8vqFQcQg07bdvLO6byu01DcS clbAWWn3zHoII1UOFysYLMz9+H1SG3FiCpfweUYqXrGy9a77XXW0xU2gwgdROT2Nhf ptDji064KNRa3Ddvj171WP7r35qZULRqe1pbNJfHlYsdUXBb5J6BanoovA3bZ4hYG/ 8HpJLLqa7rP+PaY92IHdwfikA3lXG6yT29ILuWbDlX6DTfEIce6r1dR2MTd142WuMe wmbBQoGCtKJ0Q== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org Subject: [RFC PATCH 7/7] nvme-pci: convert to blk_rq_dma_map Date: Sun, 27 Oct 2024 16:22:00 +0200 Message-ID: <6038b47007ae804f0795e5f9d9cbc9c4a63a15b2.1730037261.git.leon@kernel.org> X-Mailer: git-send-email 2.46.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christoph Hellwig Use the blk_rq_dma_map API to DMA map requests instead of scatterlists. This also removes the fast path single segment code as the blk_rq_dma_map naturally inlines single IOVA segment mappings into the preallocated structure. Signed-off-by: Christoph Hellwig Signed-off-by: Leon Romanovsky --- drivers/nvme/host/pci.c | 382 +++++++++++++++++++++------------------- 1 file changed, 205 insertions(+), 177 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 79cd65a5f311..f41db1efecb1 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -27,7 +27,6 @@ #include #include #include -#include =20 #include "trace.h" #include "nvme.h" @@ -227,10 +226,9 @@ struct nvme_iod { bool aborted; u8 nr_descriptors; /* # of PRP/SGL descriptors */ bool large_descriptors; /* uses the full page sized descriptor pool */ - unsigned int dma_len; /* length of single DMA segment mapping */ - dma_addr_t first_dma; + unsigned int total_len; /* length of the entire transfer */ dma_addr_t meta_dma; - struct sg_table sgt; + struct dma_iova_state dma_state; void *descriptors[NVME_MAX_NR_DESCRIPTORS]; }; =20 @@ -527,9 +525,14 @@ static void nvme_free_descriptors(struct nvme_dev *dev= , struct request *req) { const int last_prp =3D NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - dma_addr_t dma_addr =3D iod->first_dma; + dma_addr_t dma_addr; int i; =20 + if (iod->cmd.common.flags & NVME_CMD_SGL_METABUF) + dma_addr =3D le64_to_cpu(iod->cmd.common.dptr.sgl.addr); + else + dma_addr =3D le64_to_cpu(iod->cmd.common.dptr.prp2); + if (iod->nr_descriptors =3D=3D 1) { dma_pool_free(nvme_dma_pool(dev, iod), iod->descriptors[0], dma_addr); @@ -545,67 +548,143 @@ static void nvme_free_descriptors(struct nvme_dev *d= ev, struct request *req) } } =20 -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_free_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - - if (iod->dma_len) { - dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, - rq_dma_dir(req)); + enum dma_data_direction dir =3D rq_dma_dir(req); + int length =3D iod->total_len; + dma_addr_t dma_addr; + int prp_len, nprps, i, desc; + __le64 *prp_list; + dma_addr_t dma_start; + u32 dma_len; + + dma_addr =3D le64_to_cpu(iod->cmd.common.dptr.prp1); + prp_len =3D NVME_CTRL_PAGE_SIZE - (dma_addr & (NVME_CTRL_PAGE_SIZE - 1)); + prp_len =3D min(length, prp_len); + length -=3D prp_len; + if (!length) { + dma_unmap_page(dev->dev, dma_addr, prp_len, dir); return; } =20 - WARN_ON_ONCE(!iod->sgt.nents); + dma_start =3D dma_addr; + dma_len =3D prp_len; =20 - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); - nvme_free_descriptors(dev, req); - mempool_free(iod->sgt.sgl, dev->iod_mempool); + dma_addr =3D le64_to_cpu(iod->cmd.common.dptr.prp2); + if (length <=3D NVME_CTRL_PAGE_SIZE) { + if (dma_addr !=3D dma_start + dma_len) { + dma_unmap_page(dev->dev, dma_start, dma_len, dir); + dma_start =3D dma_addr; + dma_len =3D 0; + } + dma_len +=3D length; + goto done; + } + + nprps =3D DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); + i =3D 0; + desc =3D 0; + prp_list =3D iod->descriptors[desc]; + do { + if (i =3D=3D NVME_CTRL_PAGE_SIZE >> 3) { + prp_list =3D iod->descriptors[++desc]; + i =3D 0; + } + + dma_addr =3D le64_to_cpu(prp_list[i++]); + if (dma_addr !=3D dma_start + dma_len) { + dma_unmap_page(dev->dev, dma_start, dma_len, dir); + dma_start =3D dma_addr; + dma_len =3D 0; + } + prp_len =3D min(length, NVME_CTRL_PAGE_SIZE); + dma_len +=3D prp_len; + length -=3D prp_len; + } while (length); +done: + dma_unmap_page(dev->dev, dma_start, dma_len, dir); } =20 -static void nvme_print_sgl(struct scatterlist *sgl, int nents) +static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) { - int i; - struct scatterlist *sg; + struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); + dma_addr_t sqe_dma_addr =3D le64_to_cpu(iod->cmd.common.dptr.sgl.addr); + unsigned int sqe_dma_len =3D le32_to_cpu(iod->cmd.common.dptr.sgl.length); + struct nvme_sgl_desc *sg_list =3D iod->descriptors[0]; + enum dma_data_direction dir =3D rq_dma_dir(req); + + if (iod->nr_descriptors) { + unsigned int nr_entries =3D sqe_dma_len / sizeof(*sg_list), i; + + for (i =3D 0; i < nr_entries; i++) + dma_unmap_page(dev->dev, le64_to_cpu(sg_list[i].addr), + le32_to_cpu(sg_list[i].length), dir); + } else { + dma_unmap_page(dev->dev, sqe_dma_addr, sqe_dma_len, dir); + } +} + +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); =20 - for_each_sg(sgl, sg, nents, i) { - dma_addr_t phys =3D sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", - i, &phys, sg->offset, sg->length, &sg_dma_address(sg), - sg_dma_len(sg)); + if (!blk_rq_dma_unmap(req, dev->dev, &iod->dma_state)) { + if (iod->cmd.common.flags & NVME_CMD_SGL_METABUF) + nvme_free_sgls(dev, req); + else + nvme_free_prps(dev, req); } + + if (iod->nr_descriptors) + nvme_free_descriptors(dev, req); } =20 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, struct request *req, struct nvme_rw_command *cmnd) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - int length =3D blk_rq_payload_bytes(req); - struct scatterlist *sg =3D iod->sgt.sgl; - int dma_len =3D sg_dma_len(sg); - u64 dma_addr =3D sg_dma_address(sg); - int offset =3D dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + unsigned int length =3D blk_rq_payload_bytes(req); + struct blk_dma_iter iter; + dma_addr_t prp1_dma, prp2_dma =3D 0; + unsigned int prp_len, i; __le64 *prp_list; - dma_addr_t prp_dma; - int i; =20 - length -=3D (NVME_CTRL_PAGE_SIZE - offset); - if (length <=3D 0) { - iod->first_dma =3D 0; + if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) + return iter.status; + + /* + * PRP1 always points to the start of the DMA transfers. + * + * This is the only PRP (except for the list entries) that could be + * non-aligned. + */ + prp1_dma =3D iter.addr; + prp_len =3D min(length, NVME_CTRL_PAGE_SIZE - + (iter.addr & (NVME_CTRL_PAGE_SIZE - 1))); + iod->total_len +=3D prp_len; + iter.addr +=3D prp_len; + iter.len -=3D prp_len; + length -=3D prp_len; + if (!length) goto done; - } =20 - dma_len -=3D (NVME_CTRL_PAGE_SIZE - offset); - if (dma_len) { - dma_addr +=3D (NVME_CTRL_PAGE_SIZE - offset); - } else { - sg =3D sg_next(sg); - dma_addr =3D sg_dma_address(sg); - dma_len =3D sg_dma_len(sg); + if (!iter.len) { + if (!blk_rq_dma_map_iter_next(req, dev->dev, &iod->dma_state, + &iter)) { + if (WARN_ON_ONCE(!iter.status)) + goto bad_sgl; + goto done; + } } =20 + /* + * PRP2 is usually a list, but can point to data if all data to be + * transferred fits into PRP1 + PRP2: + */ if (length <=3D NVME_CTRL_PAGE_SIZE) { - iod->first_dma =3D dma_addr; + prp2_dma =3D iter.addr; + iod->total_len +=3D length; goto done; } =20 @@ -614,58 +693,83 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_d= ev *dev, iod->large_descriptors =3D true; =20 prp_list =3D dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, - &prp_dma); - if (!prp_list) - return BLK_STS_RESOURCE; + &prp2_dma); + if (!prp_list) { + iter.status =3D BLK_STS_RESOURCE; + goto done; + } iod->descriptors[iod->nr_descriptors++] =3D prp_list; - iod->first_dma =3D prp_dma; + i =3D 0; for (;;) { + prp_list[i++] =3D cpu_to_le64(iter.addr); + prp_len =3D min(length, NVME_CTRL_PAGE_SIZE); + if (WARN_ON_ONCE(iter.len < prp_len)) + goto bad_sgl; + + iod->total_len +=3D prp_len; + iter.addr +=3D prp_len; + iter.len -=3D prp_len; + length -=3D prp_len; + if (!length) + break; + + if (iter.len =3D=3D 0) { + if (!blk_rq_dma_map_iter_next(req, dev->dev, + &iod->dma_state, &iter)) { + if (WARN_ON_ONCE(!iter.status)) + goto bad_sgl; + goto done; + } + } + + /* + * If we've filled the entire descriptor, allocate a new that is + * pointed to be the last entry in the previous PRP list. To + * accommodate for that move the last actual entry to the new + * descriptor. + */ if (i =3D=3D NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list =3D prp_list; + dma_addr_t prp_list_dma; =20 prp_list =3D dma_pool_alloc(dev->prp_page_pool, - GFP_ATOMIC, &prp_dma); - if (!prp_list) - goto free_prps; + GFP_ATOMIC, &prp_list_dma); + if (!prp_list) { + iter.status =3D BLK_STS_RESOURCE; + goto done; + } iod->descriptors[iod->nr_descriptors++] =3D prp_list; + prp_list[0] =3D old_prp_list[i - 1]; - old_prp_list[i - 1] =3D cpu_to_le64(prp_dma); + old_prp_list[i - 1] =3D cpu_to_le64(prp_list_dma); i =3D 1; } - prp_list[i++] =3D cpu_to_le64(dma_addr); - dma_len -=3D NVME_CTRL_PAGE_SIZE; - dma_addr +=3D NVME_CTRL_PAGE_SIZE; - length -=3D NVME_CTRL_PAGE_SIZE; - if (length <=3D 0) - break; - if (dma_len > 0) - continue; - if (unlikely(dma_len < 0)) - goto bad_sgl; - sg =3D sg_next(sg); - dma_addr =3D sg_dma_address(sg); - dma_len =3D sg_dma_len(sg); } + done: - cmnd->dptr.prp1 =3D cpu_to_le64(sg_dma_address(iod->sgt.sgl)); - cmnd->dptr.prp2 =3D cpu_to_le64(iod->first_dma); - return BLK_STS_OK; -free_prps: - nvme_free_descriptors(dev, req); - return BLK_STS_RESOURCE; + /* + * nvme_unmap_data uses the DPT field in the SQE to tear down the + * mapping, so initialize it even for failures. + */ + cmnd->dptr.prp1 =3D cpu_to_le64(prp1_dma); + cmnd->dptr.prp2 =3D cpu_to_le64(prp2_dma); + if (unlikely(iter.status)) + nvme_unmap_data(dev, req); + return iter.status; + bad_sgl: - WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), - "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->sgt.nents); + dev_err_once(dev->dev, + "Incorrectly formed request for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req)); return BLK_STS_IOERR; } =20 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, - struct scatterlist *sg) + struct blk_dma_iter *iter) { - sge->addr =3D cpu_to_le64(sg_dma_address(sg)); - sge->length =3D cpu_to_le32(sg_dma_len(sg)); + sge->addr =3D cpu_to_le64(iter->addr); + sge->length =3D cpu_to_le32(iter->len); sge->type =3D NVME_SGL_FMT_DATA_DESC << 4; } =20 @@ -681,17 +785,21 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_d= ev *dev, struct request *req, struct nvme_rw_command *cmd) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); + unsigned int entries =3D blk_rq_nr_phys_segments(req); struct nvme_sgl_desc *sg_list; - struct scatterlist *sg =3D iod->sgt.sgl; - unsigned int entries =3D iod->sgt.nents; + struct blk_dma_iter iter; dma_addr_t sgl_dma; - int i =3D 0; + unsigned int mapped =3D 0; =20 /* setting the transfer type as SGL */ cmd->flags =3D NVME_CMD_SGL_METABUF; =20 - if (entries =3D=3D 1) { - nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); + if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) + return iter.status; + + if (entries =3D=3D 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) { + nvme_pci_sgl_set_data(&cmd->dptr.sgl, &iter); + iod->total_len +=3D iter.len; return BLK_STS_OK; } =20 @@ -702,110 +810,30 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_= dev *dev, if (!sg_list) return BLK_STS_RESOURCE; iod->descriptors[iod->nr_descriptors++] =3D sg_list; - iod->first_dma =3D sgl_dma; =20 - nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); do { - nvme_pci_sgl_set_data(&sg_list[i++], sg); - sg =3D sg_next(sg); - } while (--entries > 0); - - return BLK_STS_OK; -} - -static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) -{ - struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - unsigned int offset =3D bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); - unsigned int first_prp_len =3D NVME_CTRL_PAGE_SIZE - offset; - - iod->first_dma =3D dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) - return BLK_STS_RESOURCE; - iod->dma_len =3D bv->bv_len; - - cmnd->dptr.prp1 =3D cpu_to_le64(iod->first_dma); - if (bv->bv_len > first_prp_len) - cmnd->dptr.prp2 =3D cpu_to_le64(iod->first_dma + first_prp_len); - else - cmnd->dptr.prp2 =3D 0; - return BLK_STS_OK; -} - -static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) -{ - struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); + if (WARN_ON_ONCE(mapped =3D=3D entries)) { + iter.status =3D BLK_STS_IOERR; + break; + } + nvme_pci_sgl_set_data(&sg_list[mapped++], &iter); + iod->total_len +=3D iter.len; + } while (blk_rq_dma_map_iter_next(req, dev->dev, &iod->dma_state, + &iter)); =20 - iod->first_dma =3D dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) - return BLK_STS_RESOURCE; - iod->dma_len =3D bv->bv_len; + nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, mapped); =20 - cmnd->flags =3D NVME_CMD_SGL_METABUF; - cmnd->dptr.sgl.addr =3D cpu_to_le64(iod->first_dma); - cmnd->dptr.sgl.length =3D cpu_to_le32(iod->dma_len); - cmnd->dptr.sgl.type =3D NVME_SGL_FMT_DATA_DESC << 4; - return BLK_STS_OK; + if (unlikely(iter.status)) + nvme_free_sgls(dev, req); + return iter.status; } =20 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *re= q, struct nvme_command *cmnd) { - struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); - blk_status_t ret =3D BLK_STS_RESOURCE; - int rc; - - if (blk_rq_nr_phys_segments(req) =3D=3D 1) { - struct nvme_queue *nvmeq =3D req->mq_hctx->driver_data; - struct bio_vec bv =3D req_bvec(req); - - if (!is_pci_p2pdma_page(bv.bv_page)) { - if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + - bv.bv_len <=3D NVME_CTRL_PAGE_SIZE * 2) - return nvme_setup_prp_simple(dev, req, - &cmnd->rw, &bv); - - if (nvmeq->qid && sgl_threshold && - nvme_ctrl_sgl_supported(&dev->ctrl)) - return nvme_setup_sgl_simple(dev, req, - &cmnd->rw, &bv); - } - } - - iod->dma_len =3D 0; - iod->sgt.sgl =3D mempool_alloc(dev->iod_mempool, GFP_ATOMIC); - if (!iod->sgt.sgl) - return BLK_STS_RESOURCE; - sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); - iod->sgt.orig_nents =3D blk_rq_map_sg(req->q, req, iod->sgt.sgl); - if (!iod->sgt.orig_nents) - goto out_free_sg; - - rc =3D dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), - DMA_ATTR_NO_WARN); - if (rc) { - if (rc =3D=3D -EREMOTEIO) - ret =3D BLK_STS_TARGET; - goto out_free_sg; - } - - if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) - ret =3D nvme_pci_setup_sgls(dev, req, &cmnd->rw); - else - ret =3D nvme_pci_setup_prps(dev, req, &cmnd->rw); - if (ret !=3D BLK_STS_OK) - goto out_unmap_sg; - return BLK_STS_OK; - -out_unmap_sg: - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); -out_free_sg: - mempool_free(iod->sgt.sgl, dev->iod_mempool); - return ret; + if (nvme_pci_use_sgls(dev, req, blk_rq_nr_phys_segments(req))) + return nvme_pci_setup_sgls(dev, req, &cmnd->rw); + return nvme_pci_setup_prps(dev, req, &cmnd->rw); } =20 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request= *req, @@ -829,7 +857,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, = struct request *req) iod->aborted =3D false; iod->nr_descriptors =3D 0; iod->large_descriptors =3D false; - iod->sgt.nents =3D 0; + iod->total_len =3D 0; =20 ret =3D nvme_setup_cmd(req->q->queuedata, req); if (ret) --=20 2.46.2