Add no functional changes, but rename enablement functions, variables
etc. that are used in teaming driver transmit decisions.
Since rx and tx enablement are still coupled, some of the variables
renamed in this patch are still used for the rx path, but that will
change in a follow-up patch.
Signed-off-by: Marc Harvey <marcharvey@google.com>
---
Changes in v4:
- New patch: split from the original monolithic v3 patch "net: team:
Decouple rx and tx enablement in the team driver".
- Link to v3: https://lore.kernel.org/netdev/20260402-teaming-driver-internal-v3-6-e8cfdec3b5c2@google.com/
---
drivers/net/team/team_core.c | 44 +++++++++++++++---------------
drivers/net/team/team_mode_loadbalance.c | 2 +-
drivers/net/team/team_mode_random.c | 4 +--
drivers/net/team/team_mode_roundrobin.c | 2 +-
include/linux/if_team.h | 46 +++++++++++++++++---------------
5 files changed, 51 insertions(+), 47 deletions(-)
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 2ce31999c99f..826769473878 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -532,13 +532,13 @@ static void team_adjust_ops(struct team *team)
* correct ops are always set.
*/
- if (!team->en_port_count || !team_is_mode_set(team) ||
+ if (!team->tx_en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (!team->en_port_count || !team_is_mode_set(team) ||
+ if (!team->tx_en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
@@ -831,7 +831,7 @@ static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
return true;
if (port->priority > cur->priority)
return false;
- if (port->index < cur->index)
+ if (port->tx_index < cur->tx_index)
return true;
return false;
}
@@ -929,7 +929,7 @@ static bool team_port_find(const struct team *team,
/*
* Enable/disable port by adding to enabled port hashlist and setting
- * port->index (Might be racy so reader could see incorrect ifindex when
+ * port->tx_index (Might be racy so reader could see incorrect ifindex when
* processing a flying packet, but that is not a problem). Write guarded
* by RTNL.
*/
@@ -938,10 +938,10 @@ static void team_port_enable(struct team *team,
{
if (team_port_enabled(port))
return;
- WRITE_ONCE(port->index, team->en_port_count);
- WRITE_ONCE(team->en_port_count, team->en_port_count + 1);
- hlist_add_head_rcu(&port->hlist,
- team_port_index_hash(team, port->index));
+ WRITE_ONCE(port->tx_index, team->tx_en_port_count);
+ WRITE_ONCE(team->tx_en_port_count, team->tx_en_port_count + 1);
+ hlist_add_head_rcu(&port->tx_hlist,
+ team_tx_port_index_hash(team, port->tx_index));
team_adjust_ops(team);
team_queue_override_port_add(team, port);
team_notify_peers(team);
@@ -951,15 +951,17 @@ static void team_port_enable(struct team *team,
static void __reconstruct_port_hlist(struct team *team, int rm_index)
{
- int i;
+ struct hlist_head *tx_port_index_hash;
struct team_port *port;
+ int i;
- for (i = rm_index + 1; i < team->en_port_count; i++) {
- port = team_get_port_by_index(team, i);
- hlist_del_rcu(&port->hlist);
- WRITE_ONCE(port->index, port->index - 1);
- hlist_add_head_rcu(&port->hlist,
- team_port_index_hash(team, port->index));
+ for (i = rm_index + 1; i < team->tx_en_port_count; i++) {
+ port = team_get_port_by_tx_index(team, i);
+ hlist_del_rcu(&port->tx_hlist);
+ WRITE_ONCE(port->tx_index, port->tx_index - 1);
+ tx_port_index_hash = team_tx_port_index_hash(team,
+ port->tx_index);
+ hlist_add_head_rcu(&port->tx_hlist, tx_port_index_hash);
}
}
@@ -970,10 +972,10 @@ static void team_port_disable(struct team *team,
return;
if (team->ops.port_tx_disabled)
team->ops.port_tx_disabled(team, port);
- hlist_del_rcu(&port->hlist);
- __reconstruct_port_hlist(team, port->index);
- WRITE_ONCE(port->index, -1);
- WRITE_ONCE(team->en_port_count, team->en_port_count - 1);
+ hlist_del_rcu(&port->tx_hlist);
+ __reconstruct_port_hlist(team, port->tx_index);
+ WRITE_ONCE(port->tx_index, -1);
+ WRITE_ONCE(team->tx_en_port_count, team->tx_en_port_count - 1);
team_queue_override_port_del(team, port);
team_adjust_ops(team);
team_lower_state_changed(port);
@@ -1244,7 +1246,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
netif_addr_unlock_bh(dev);
}
- WRITE_ONCE(port->index, -1);
+ WRITE_ONCE(port->tx_index, -1);
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
netdev_compute_master_upper_features(dev, true);
@@ -1595,7 +1597,7 @@ static int team_init(struct net_device *dev)
return -ENOMEM;
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
- INIT_HLIST_HEAD(&team->en_port_hlist[i]);
+ INIT_HLIST_HEAD(&team->tx_en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
err = team_queue_override_init(team);
if (err)
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 840f409d250b..4833fbfe241e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -120,7 +120,7 @@ static struct team_port *lb_hash_select_tx_port(struct team *team,
{
int port_index = team_num_to_port_index(team, hash);
- return team_get_port_by_index_rcu(team, port_index);
+ return team_get_port_by_tx_index_rcu(team, port_index);
}
/* Hash to port mapping select tx port */
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 169a7bc865b2..370e974f3dca 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -16,8 +16,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
struct team_port *port;
int port_index;
- port_index = get_random_u32_below(READ_ONCE(team->en_port_count));
- port = team_get_port_by_index_rcu(team, port_index);
+ port_index = get_random_u32_below(READ_ONCE(team->tx_en_port_count));
+ port = team_get_port_by_tx_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
port = team_get_first_port_txable_rcu(team, port);
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index dd405d82c6ac..ecbeef28c221 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -27,7 +27,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = team_num_to_port_index(team,
rr_priv(team)->sent_packets++);
- port = team_get_port_by_index_rcu(team, port_index);
+ port = team_get_port_by_tx_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
port = team_get_first_port_txable_rcu(team, port);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 740cb3100dfc..c777170ef552 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -27,10 +27,10 @@ struct team;
struct team_port {
struct net_device *dev;
- struct hlist_node hlist; /* node in enabled ports hash list */
+ struct hlist_node tx_hlist; /* node in tx-enabled ports hash list */
struct list_head list; /* node in ordinary list */
struct team *team;
- int index; /* index of enabled port. If disabled, it's set to -1 */
+ int tx_index; /* index of tx enabled port. If disabled, -1 */
bool linkup; /* either state.linkup or user.linkup */
@@ -77,7 +77,7 @@ static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
static inline bool team_port_enabled(struct team_port *port)
{
- return READ_ONCE(port->index) != -1;
+ return READ_ONCE(port->tx_index) != -1;
}
static inline bool team_port_txable(struct team_port *port)
@@ -190,10 +190,10 @@ struct team {
const struct header_ops *header_ops_cache;
/*
- * List of enabled ports and their count
+ * List of tx-enabled ports and counts of rx and tx-enabled ports.
*/
- int en_port_count;
- struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
+ int tx_en_port_count;
+ struct hlist_head tx_en_port_hlist[TEAM_PORT_HASHENTRIES];
struct list_head port_list; /* list of all ports */
@@ -237,41 +237,43 @@ static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
return dev_queue_xmit(skb);
}
-static inline struct hlist_head *team_port_index_hash(struct team *team,
- int port_index)
+static inline struct hlist_head *team_tx_port_index_hash(struct team *team,
+ int tx_port_index)
{
- return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+ unsigned int list_entry = tx_port_index & (TEAM_PORT_HASHENTRIES - 1);
+
+ return &team->tx_en_port_hlist[list_entry];
}
-static inline struct team_port *team_get_port_by_index(struct team *team,
- int port_index)
+static inline struct team_port *team_get_port_by_tx_index(struct team *team,
+ int tx_port_index)
{
+ struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
struct team_port *port;
- struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry(port, head, hlist)
- if (port->index == port_index)
+ hlist_for_each_entry(port, head, tx_hlist)
+ if (port->tx_index == tx_port_index)
return port;
return NULL;
}
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = READ_ONCE(team->en_port_count);
+ int tx_en_port_count = READ_ONCE(team->tx_en_port_count);
- if (unlikely(!en_port_count))
+ if (unlikely(!tx_en_port_count))
return 0;
- return num % en_port_count;
+ return num % tx_en_port_count;
}
-static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
- int port_index)
+static inline struct team_port *team_get_port_by_tx_index_rcu(struct team *team,
+ int tx_port_index)
{
+ struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
struct team_port *port;
- struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry_rcu(port, head, hlist)
- if (READ_ONCE(port->index) == port_index)
+ hlist_for_each_entry_rcu(port, head, tx_hlist)
+ if (READ_ONCE(port->tx_index) == tx_port_index)
return port;
return NULL;
}
--
2.53.0.1185.g05d4b7b318-goog
© 2016 - 2026 Red Hat, Inc.