Variables are named q or queue_index. Types are int, unsigned int, u32
and u16. Use `unsigned int q` everywhere.
Skip over taprio functions. They use `u8 queue_id` which fits with the
`struct macb_queue_enst_config` field. Using `queue_id` everywhere
would be too verbose.
Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
---
drivers/net/ethernet/cadence/macb_main.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 05ccb6f186f7..087401163771 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -873,7 +873,7 @@ static void gem_shuffle_tx_one_ring(struct macb_queue *queue)
static void gem_shuffle_tx_rings(struct macb *bp)
{
struct macb_queue *queue;
- int q;
+ unsigned int q;
for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
gem_shuffle_tx_one_ring(queue);
@@ -1254,7 +1254,7 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
- u32 queue_index;
+ unsigned int q;
u32 packets = 0;
u32 bytes = 0;
struct macb_tx_skb *tx_skb;
@@ -1263,9 +1263,9 @@ static void macb_tx_error_task(struct work_struct *work)
unsigned int tail;
unsigned long flags;
- queue_index = queue - bp->queues;
+ q = queue - bp->queues;
netdev_vdbg(bp->netdev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- queue_index, queue->tx_tail, queue->tx_head);
+ q, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1338,7 +1338,7 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
- netdev_tx_completed_queue(netdev_get_tx_queue(bp->netdev, queue_index),
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->netdev, q),
packets, bytes);
/* Set end of TX queue */
@@ -1403,7 +1403,7 @@ static bool ptp_one_step_sync(struct sk_buff *skb)
static int macb_tx_complete(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
- u16 queue_index = queue - bp->queues;
+ unsigned int q = queue - bp->queues;
unsigned long flags;
unsigned int tail;
unsigned int head;
@@ -1465,14 +1465,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
- netdev_tx_completed_queue(netdev_get_tx_queue(bp->netdev, queue_index),
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->netdev, q),
packets, bytes);
queue->tx_tail = tail;
- if (__netif_subqueue_stopped(bp->netdev, queue_index) &&
+ if (__netif_subqueue_stopped(bp->netdev, q) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
- netif_wake_subqueue(bp->netdev, queue_index);
+ netif_wake_subqueue(bp->netdev, q);
spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
if (packets)
@@ -2496,10 +2496,10 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *netdev)
static netdev_tx_t macb_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(netdev);
- struct macb_queue *queue = &bp->queues[queue_index];
+ unsigned int q = skb_get_queue_mapping(skb);
unsigned int desc_cnt, nr_frags, frag_size, f;
+ struct macb_queue *queue = &bp->queues[q];
unsigned int hdrlen;
unsigned long flags;
bool is_lso;
@@ -2539,7 +2539,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb,
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->netdev,
"start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
- queue_index, skb->len, skb->head, skb->data,
+ q, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
@@ -2565,7 +2565,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb,
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
- netif_stop_subqueue(netdev, queue_index);
+ netif_stop_subqueue(netdev, q);
netdev_dbg(netdev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
ret = NETDEV_TX_BUSY;
@@ -2581,7 +2581,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb,
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
- netdev_tx_sent_queue(netdev_get_tx_queue(bp->netdev, queue_index),
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->netdev, q),
skb->len);
spin_lock(&bp->lock);
@@ -2590,7 +2590,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb,
spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
- netif_stop_subqueue(netdev, queue_index);
+ netif_stop_subqueue(netdev, q);
unlock:
spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
--
2.53.0