From nobody Sat Dec 27 09:02:15 2025 Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.20]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 29AA62D045; Sat, 23 Dec 2023 02:59:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="mdCo6+IY" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1703300358; x=1734836358; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=55nUHP1btzkRwh2wBgXXfQGCTaUg8xL69mt9JVj+8po=; b=mdCo6+IY9QguRuLFZXWLuL406SDbM7TMgXhi/hp12mT+TKgsACA3ZYLG NNggdbXavddypYg9DMSsb4W0L8hQ4t8PUE0Ndi0JCnBASk0n/foKnv9Nm v59Enzf2sJkDQbL55Heq3EuYLThR4srsxsDH+s3ZJ/leBs65tCAVMpvpC Xrnoeh1WTc6mHatn8SLUVduzKyqrw5/rcbf8kuGXkxun95zc9ayWFx5cE MdMwSmqUs9dlcOwNEQp4lNlKu37tiDW7wRgLZECS9a5r8SYiy1u9xTKL3 IvX86o1iUM6AC3tfBlDtnd6t7uVdU2mn0qbO/6GeDrD0ZZuOdYjmGURuN A==; X-IronPort-AV: E=McAfee;i="6600,9927,10932"; a="386610978" X-IronPort-AV: E=Sophos;i="6.04,298,1695711600"; d="scan'208";a="386610978" Received: from orviesa001.jf.intel.com ([10.64.159.141]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Dec 2023 18:59:17 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.04,298,1695711600"; d="scan'208";a="25537556" Received: from newjersey.igk.intel.com ([10.102.20.203]) by orviesa001.jf.intel.com with ESMTP; 22 Dec 2023 18:59:14 -0800 From: Alexander Lobakin To: "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni Cc: Alexander Lobakin , Maciej Fijalkowski , Michal Kubiak , Larysa Zaremba , Alexei Starovoitov , Daniel Borkmann , Willem de Bruijn , intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH RFC net-next 19/34] idpf: stop using macros for accessing queue descriptors Date: Sat, 23 Dec 2023 03:55:39 +0100 Message-ID: <20231223025554.2316836-20-aleksander.lobakin@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20231223025554.2316836-1-aleksander.lobakin@intel.com> References: <20231223025554.2316836-1-aleksander.lobakin@intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" In C, we have structures and unions. Casting `void *` via macros is not only error-prone, but also looks confusing and awful in general. Replace it with a union and direct array dereferences. Had idpf had separate queue structures, it would look way more elegant -- will do one day. Signed-off-by: Alexander Lobakin --- .../ethernet/intel/idpf/idpf_singleq_txrx.c | 20 +++++----- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++-------- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 37 ++++++++----------- 3 files changed, 40 insertions(+), 47 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/= net/ethernet/intel/idpf/idpf_singleq_txrx.c index 23dcc02e6976..7072d45f007b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -206,7 +206,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, data_len =3D skb->data_len; size =3D skb_headlen(skb); =20 - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, i); + tx_desc =3D &tx_q->base_tx[i]; =20 dma =3D dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); =20 @@ -242,7 +242,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; =20 if (i =3D=3D tx_q->desc_count) { - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->base_tx[0]; i =3D 0; } =20 @@ -262,7 +262,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; =20 if (i =3D=3D tx_q->desc_count) { - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->base_tx[0]; i =3D 0; } =20 @@ -311,7 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); txq->tx_buf[ntu].ctx_entry =3D true; =20 - ctx_desc =3D IDPF_BASE_TX_CTX_DESC(txq, ntu); + ctx_desc =3D &txq->base_ctx[ntu]; =20 IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); txq->next_to_use =3D ntu; @@ -460,7 +460,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx= _q, int napi_budget, struct netdev_queue *nq; bool dont_wake; =20 - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, ntc); + tx_desc =3D &tx_q->base_tx[ntc]; tx_buf =3D &tx_q->tx_buf[ntc]; ntc -=3D tx_q->desc_count; =20 @@ -509,7 +509,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx= _q, int napi_budget, if (unlikely(!ntc)) { ntc -=3D tx_q->desc_count; tx_buf =3D tx_q->tx_buf; - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->base_tx[0]; } =20 /* unmap any remaining paged data */ @@ -527,7 +527,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx= _q, int napi_budget, if (unlikely(!ntc)) { ntc -=3D tx_q->desc_count; tx_buf =3D tx_q->tx_buf; - tx_desc =3D IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->base_tx[0]; } } while (likely(budget)); =20 @@ -880,7 +880,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue= *rx_q, if (!cleaned_count) return false; =20 - desc =3D IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); + desc =3D &rx_q->single_buf[nta]; =20 do { dma_addr_t addr; @@ -898,7 +898,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue= *rx_q, =20 nta++; if (unlikely(nta =3D=3D rx_q->desc_count)) { - desc =3D IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); + desc =3D &rx_q->single_buf[0]; nta =3D 0; } =20 @@ -998,7 +998,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_= q, int budget) struct idpf_rx_buf *rx_buf; =20 /* get the Rx desc from Rx queue based on 'next_to_clean' */ - rx_desc =3D IDPF_RX_DESC(rx_q, ntc); + rx_desc =3D &rx_q->rx[ntc]; =20 /* status_error_ptype_len will always be zero for unused * descriptors because it's cleared in cleanup, and overlaps diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethe= rnet/intel/idpf/idpf_txrx.c index 6fd9128e61d8..40b8d8b17827 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -533,7 +533,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bu= fq, u16 buf_id) u16 nta =3D bufq->next_to_alloc; dma_addr_t addr; =20 - splitq_rx_desc =3D IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); + splitq_rx_desc =3D &bufq->split_buf[nta]; =20 if (bufq->rx_hsplit_en) { bq.pp =3D bufq->hdr_pp; @@ -1560,7 +1560,7 @@ do { \ if (unlikely(!(ntc))) { \ ntc -=3D (txq)->desc_count; \ buf =3D (txq)->tx_buf; \ - desc =3D IDPF_FLEX_TX_DESC(txq, 0); \ + desc =3D &(txq)->flex_tx[0]; \ } else { \ (buf)++; \ (desc)++; \ @@ -1593,8 +1593,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *t= x_q, u16 end, s16 ntc =3D tx_q->next_to_clean; struct idpf_tx_buf *tx_buf; =20 - tx_desc =3D IDPF_FLEX_TX_DESC(tx_q, ntc); - next_pending_desc =3D IDPF_FLEX_TX_DESC(tx_q, end); + tx_desc =3D &tx_q->flex_tx[ntc]; + next_pending_desc =3D &tx_q->flex_tx[end]; tx_buf =3D &tx_q->tx_buf[ntc]; ntc -=3D tx_q->desc_count; =20 @@ -1774,7 +1774,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *c= omplq, int budget, int i; =20 complq_budget =3D vport->compln_clean_budget; - tx_desc =3D IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + tx_desc =3D &complq->comp[ntc]; ntc -=3D complq->desc_count; =20 do { @@ -1840,7 +1840,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *c= omplq, int budget, ntc++; if (unlikely(!ntc)) { ntc -=3D complq->desc_count; - tx_desc =3D IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); + tx_desc =3D &complq->comp[0]; change_bit(__IDPF_Q_GEN_CHK, complq->flags); } =20 @@ -2107,7 +2107,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, st= ruct sk_buff *skb, * used one additional descriptor for a context * descriptor. Reset that here. */ - tx_desc =3D IDPF_FLEX_TX_DESC(txq, idx); + tx_desc =3D &txq->flex_tx[idx]; memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); if (idx =3D=3D 0) idx =3D txq->desc_count; @@ -2167,7 +2167,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_= q, data_len =3D skb->data_len; size =3D skb_headlen(skb); =20 - tx_desc =3D IDPF_FLEX_TX_DESC(tx_q, i); + tx_desc =3D &tx_q->flex_tx[i]; =20 dma =3D dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); =20 @@ -2241,7 +2241,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_= q, i++; =20 if (i =3D=3D tx_q->desc_count) { - tx_desc =3D IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->flex_tx[0]; i =3D 0; tx_q->compl_tag_cur_gen =3D IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); @@ -2286,7 +2286,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_= q, i++; =20 if (i =3D=3D tx_q->desc_count) { - tx_desc =3D IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc =3D &tx_q->flex_tx[0]; i =3D 0; tx_q->compl_tag_cur_gen =3D IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } @@ -2520,7 +2520,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) txq->tx_buf[i].compl_tag =3D IDPF_SPLITQ_TX_INVAL_COMPL_TAG; =20 /* grab the next descriptor */ - desc =3D IDPF_FLEX_TX_CTX_DESC(txq, i); + desc =3D &txq->flex_ctx[i]; txq->next_to_use =3D idpf_tx_splitq_bump_ntu(txq, i); =20 return desc; @@ -3020,7 +3020,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rx= q, int budget) u8 rxdid; =20 /* get the Rx desc from Rx queue based on 'next_to_clean' */ - desc =3D IDPF_RX_DESC(rxq, ntc); + desc =3D &rxq->rx[ntc]; rx_desc =3D (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; =20 /* This memory barrier is needed to keep us from reading @@ -3225,11 +3225,11 @@ static void idpf_rx_clean_refillq(struct idpf_queue= *bufq, int cleaned =3D 0; u16 gen; =20 - buf_desc =3D IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + buf_desc =3D &bufq->split_buf[bufq_nta]; =20 /* make sure we stop at ring wrap in the unlikely case ring is full */ while (likely(cleaned < refillq->desc_count)) { - u16 refill_desc =3D IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + u16 refill_desc =3D refillq->ring[ntc]; bool failure; =20 gen =3D FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); @@ -3247,7 +3247,7 @@ static void idpf_rx_clean_refillq(struct idpf_queue *= bufq, } =20 if (unlikely(++bufq_nta =3D=3D bufq->desc_count)) { - buf_desc =3D IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + buf_desc =3D &bufq->split_buf[0]; bufq_nta =3D 0; } else { buf_desc++; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethe= rnet/intel/idpf/idpf_txrx.h index 5975c6d029d7..2584bd94363f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -112,24 +112,6 @@ do { \ #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M =20 -#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) - -#define IDPF_BASE_TX_DESC(txq, i) \ - (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) -#define IDPF_BASE_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) -#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ - (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) - -#define IDPF_FLEX_TX_DESC(txq, i) \ - (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) -#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) - #define IDPF_DESC_UNUSED(txq) \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) @@ -275,9 +257,6 @@ struct idpf_rx_extracted { #define IDPF_TX_MAX_DESC_DATA_ALIGNED \ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) =20 -#define IDPF_RX_DESC(rxq, i) \ - (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) - #define idpf_rx_buf libie_rx_buffer =20 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 @@ -586,7 +565,21 @@ struct idpf_queue { struct page_pool *pp; struct device *dev; }; - void *desc_ring; + union { + union virtchnl2_rx_desc *rx; + + struct virtchnl2_singleq_rx_buf_desc *single_buf; + struct virtchnl2_splitq_rx_buf_desc *split_buf; + + struct idpf_base_tx_desc *base_tx; + struct idpf_base_tx_ctx_desc *base_ctx; + union idpf_tx_flex_desc *flex_tx; + struct idpf_flex_tx_ctx_desc *flex_ctx; + + struct idpf_splitq_tx_compl_desc *comp; + + void *desc_ring; + }; =20 u32 hdr_truesize; u32 truesize; --=20 2.43.0