Drop TX packets when posting the work request fails and ensure DMA
mappings are always cleaned up.
Signed-off-by: Aditya Garg <gargaditya@linux.microsoft.com>
---
drivers/net/ethernet/microsoft/mana/gdma_main.c | 1 -
drivers/net/ethernet/microsoft/mana/mana_en.c | 7 +++----
include/net/mana/mana.h | 1 +
3 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index effe0a2f207a..65dd8060c7f4 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1332,7 +1332,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
gc = wq->gdma_dev->gdma_context;
- dev_err(gc->dev, "unsuccessful flow control!\n");
return -ENOSPC;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 67ae5421f9ee..066d822f68f0 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -491,9 +491,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
+ mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
- err = NETDEV_TX_BUSY;
- goto tx_busy;
+ goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@@ -513,7 +513,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
-tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@@ -1679,7 +1678,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 50a532fb30d6..d05457d3e1ab 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -585,6 +585,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
void mana_query_phy_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
extern struct dentry *mana_debugfs_root;
--
2.43.0
Hi Aditya,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Aditya-Garg/net-mana-Handle-SKB-if-TX-SGEs-exceed-hardware-limit/20251111-162216
base: net-next/main
patch link: https://lore.kernel.org/r/1762848781-357-3-git-send-email-gargaditya%40linux.microsoft.com
patch subject: [PATCH net-next v3 2/2] net: mana: Drop TX skb on post_work_request failure and unmap resources
config: arm64-allyesconfig (https://download.01.org/0day-ci/archive/20251112/202511120917.rSwJ1zUm-lkp@intel.com/config)
compiler: clang version 22.0.0git (https://github.com/llvm/llvm-project 996639d6ebb86ff15a8c99b67f1c2e2117636ae7)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251112/202511120917.rSwJ1zUm-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511120917.rSwJ1zUm-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/net/ethernet/microsoft/mana/gdma_main.c:1303:23: warning: variable 'gc' set but not used [-Wunused-but-set-variable]
1303 | struct gdma_context *gc;
| ^
1 warning generated.
vim +/gc +1303 drivers/net/ethernet/microsoft/mana/gdma_main.c
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1297
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1298 int mana_gd_post_work_request(struct gdma_queue *wq,
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1299 const struct gdma_wqe_request *wqe_req,
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1300 struct gdma_posted_wqe_info *wqe_info)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1301 {
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1302 u32 client_oob_size = wqe_req->inline_oob_size;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 @1303 struct gdma_context *gc;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1304 u32 sgl_data_size;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1305 u32 max_wqe_size;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1306 u32 wqe_size;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1307 u8 *wqe_ptr;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1308
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1309 if (wqe_req->num_sge == 0)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1310 return -EINVAL;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1311
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1312 if (wq->type == GDMA_RQ) {
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1313 if (client_oob_size != 0)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1314 return -EINVAL;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1315
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1316 client_oob_size = INLINE_OOB_SMALL_SIZE;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1317
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1318 max_wqe_size = GDMA_MAX_RQE_SIZE;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1319 } else {
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1320 if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1321 client_oob_size != INLINE_OOB_LARGE_SIZE)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1322 return -EINVAL;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1323
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1324 max_wqe_size = GDMA_MAX_SQE_SIZE;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1325 }
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1326
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1327 sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1328 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1329 sgl_data_size, GDMA_WQE_BU_SIZE);
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1330 if (wqe_size > max_wqe_size)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1331 return -EINVAL;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1332
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1333 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1334 gc = wq->gdma_dev->gdma_context;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1335 return -ENOSPC;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1336 }
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1337
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1338 if (wqe_info)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1339 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1340
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1341 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1342 wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1343 sgl_data_size, wqe_ptr);
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1344 if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1345 wqe_ptr -= wq->queue_size;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1346
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1347 mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1348
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1349 wq->head += wqe_size / GDMA_WQE_BU_SIZE;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1350
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1351 return 0;
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1352 }
c8017f5b4856d5 Konstantin Taranov 2025-01-20 1353 EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
ca9c54d2d6a5ab Dexuan Cui 2021-04-16 1354
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Tue, 11 Nov 2025 00:13:01 -0800 Aditya Garg wrote:
> Drop TX packets when posting the work request fails and ensure DMA
> mappings are always cleaned up.
drivers/net/ethernet/microsoft/mana/gdma_main.c:1303:23: warning: variable 'gc' set but not used [-Wunused-but-set-variable]
1303 | struct gdma_context *gc;
| ^
--
pw-bot: cr
On 12-11-2025 06:38, Jakub Kicinski wrote: > On Tue, 11 Nov 2025 00:13:01 -0800 Aditya Garg wrote: >> Drop TX packets when posting the work request fails and ensure DMA >> mappings are always cleaned up. > > drivers/net/ethernet/microsoft/mana/gdma_main.c:1303:23: warning: variable 'gc' set but not used [-Wunused-but-set-variable] > 1303 | struct gdma_context *gc; > | ^ Thanks for pointing this out. Will fix this in next revision. Regards, Aditya
© 2016 - 2026 Red Hat, Inc.