From nobody Fri Sep 20 06:58:22 2024 Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6339B45949; Mon, 5 Feb 2024 12:46:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.190 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707137167; cv=none; b=OEg9gk/WG5fMwxPhRr9+xJaBs0mZTbVccPsTm1/w1Z68hEE2en1ev8NxSv3noBT1MAdNtRQgFv4690yihR8synLeKetvLV/FGshHg7ZqvWvO1vUVu5ga7ef4xBFqEH4M/Lwt57P5winLHqZUHJIyRBHT0YkZJxutjiy8sghl6rU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707137167; c=relaxed/simple; bh=Po9UmepGah+wDcjtjlXv76oDnLy8VqpUY9/GZvXhthQ=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=Z6sf2PLrFCUMLSqeOUmICdTNrWcwgxofwOYcWuCrc8zd9uhUBYxbQt/1zTYnIWnYOPq2INHwe9K+BMB2Q9EGduOjJN2k/LyZTmL7m2Jw7G0CoP2h6EpJjQpzoi9xLjo1aYhO4OtvrYDInOjhqOr+EYIlE4yrdO3S1kgs6ZB/Xtc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.190 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.163.44]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4TT5gY14XFz1xn3N; Mon, 5 Feb 2024 20:44:57 +0800 (CST) Received: from dggpemm500005.china.huawei.com (unknown [7.185.36.74]) by mail.maildlp.com (Postfix) with ESMTPS id 95B841400C9; Mon, 5 Feb 2024 20:46:02 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by dggpemm500005.china.huawei.com (7.185.36.74) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.35; Mon, 5 Feb 2024 20:46:02 +0800 From: Yunsheng Lin To: , , CC: , , Yunsheng Lin , Jason Wang , Alexander Duyck , Jeroen de Borst , Praveen Kaligineedi , Shailend Chand , Eric Dumazet , Felix Fietkau , Sean Wang , Mark Lee , Lorenzo Bianconi , Matthias Brugger , AngeloGioacchino Del Regno , Keith Busch , Jens Axboe , Christoph Hellwig , Sagi Grimberg , Chaitanya Kulkarni , Andrew Morton , , , , Subject: [PATCH net-next v5 3/5] net: introduce page_frag_cache_drain() Date: Mon, 5 Feb 2024 20:45:03 +0800 Message-ID: <20240205124506.57670-4-linyunsheng@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20240205124506.57670-1-linyunsheng@huawei.com> References: <20240205124506.57670-1-linyunsheng@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To dggpemm500005.china.huawei.com (7.185.36.74) Content-Type: text/plain; charset="utf-8" When draining a page_frag_cache, most user are doing the similar steps, so introduce an API to avoid code duplication. Signed-off-by: Yunsheng Lin Acked-by: Jason Wang Reviewed-by: Alexander Duyck --- drivers/net/ethernet/google/gve/gve_main.c | 11 ++--------- drivers/net/ethernet/mediatek/mtk_wed_wo.c | 17 ++--------------- drivers/nvme/host/tcp.c | 7 +------ drivers/nvme/target/tcp.c | 4 +--- include/linux/gfp.h | 1 + mm/page_alloc.c | 10 ++++++++++ 6 files changed, 17 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ether= net/google/gve/gve_main.c index db6d9ae7cd78..dec6458bb8d7 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1276,17 +1276,10 @@ static void gve_unreg_xdp_info(struct gve_priv *pri= v) =20 static void gve_drain_page_cache(struct gve_priv *priv) { - struct page_frag_cache *nc; int i; =20 - for (i =3D 0; i < priv->rx_cfg.num_queues; i++) { - nc =3D &priv->rx[i].page_cache; - if (nc->va) { - __page_frag_cache_drain(virt_to_page(nc->va), - nc->pagecnt_bias); - nc->va =3D NULL; - } - } + for (i =3D 0; i < priv->rx_cfg.num_queues; i++) + page_frag_cache_drain(&priv->rx[i].page_cache); } =20 static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv, diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ether= net/mediatek/mtk_wed_wo.c index d58b07e7e123..7063c78bd35f 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c @@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk= _wed_wo_queue *q) static void mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *= q) { - struct page *page; int i; =20 for (i =3D 0; i < q->n_desc; i++) { @@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, stru= ct mtk_wed_wo_queue *q) entry->buf =3D NULL; } =20 - if (!q->cache.va) - return; - - page =3D virt_to_page(q->cache.va); - __page_frag_cache_drain(page, q->cache.pagecnt_bias); - memset(&q->cache, 0, sizeof(q->cache)); + page_frag_cache_drain(&q->cache); } =20 static void mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *= q) { - struct page *page; - for (;;) { void *buf =3D mtk_wed_wo_dequeue(wo, q, NULL, true); =20 @@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struc= t mtk_wed_wo_queue *q) skb_free_frag(buf); } =20 - if (!q->cache.va) - return; - - page =3D virt_to_page(q->cache.va); - __page_frag_cache_drain(page, q->cache.pagecnt_bias); - memset(&q->cache, 0, sizeof(q->cache)); + page_frag_cache_drain(&q->cache); } =20 static void diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index d058d990532b..22e1fb9c9c0f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_c= trl *ctrl) =20 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { - struct page *page; struct nvme_tcp_ctrl *ctrl =3D to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue =3D &ctrl->queues[qid]; unsigned int noreclaim_flag; @@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nc= trl, int qid) if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); =20 - if (queue->pf_cache.va) { - page =3D virt_to_head_page(queue->pf_cache.va); - __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); - queue->pf_cache.va =3D NULL; - } + page_frag_cache_drain(&queue->pf_cache); =20 noreclaim_flag =3D memalloc_noreclaim_save(); /* ->sock will be released by fput() */ diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 6a1e6bb80062..56224dc59f17 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct= nvmet_tcp_queue *queue) =20 static void nvmet_tcp_release_queue_work(struct work_struct *w) { - struct page *page; struct nvmet_tcp_queue *queue =3D container_of(w, struct nvmet_tcp_queue, release_work); =20 @@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_= struct *w) if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); - page =3D virt_to_head_page(queue->pf_cache.va); - __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + page_frag_cache_drain(&queue->pf_cache); kfree(queue); } =20 diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 28aea17fa59b..6cef1c241180 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -311,6 +311,7 @@ extern void __free_pages(struct page *page, unsigned in= t order); extern void free_pages(unsigned long addr, unsigned int order); =20 struct page_frag_cache; +void page_frag_cache_drain(struct page_frag_cache *nc); extern void __page_frag_cache_drain(struct page *page, unsigned int count); void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fra= gsz, gfp_t gfp_mask, unsigned int align_mask); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 636145c29f70..06aa1ebbd21c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4699,6 +4699,16 @@ static struct page *__page_frag_cache_refill(struct = page_frag_cache *nc, return page; } =20 +void page_frag_cache_drain(struct page_frag_cache *nc) +{ + if (!nc->va) + return; + + __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias); + nc->va =3D NULL; +} +EXPORT_SYMBOL(page_frag_cache_drain); + void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) =3D=3D 0, page); --=20 2.33.0