>>Happy to see your reply. But Why? 'delete' is false from tipc_node_timeout(). Refer to:
>>https://elixir.bootlin.com/linux/v6.7-rc2/source/net/tipc/node.c#L844
>I should have explained it clearly:
>1/ link status must be protected.
>tipc_node_timeout()
> tipc_node_link_down()
> {
> struct tipc_link *l = le->link;
>
> ...
> __tipc_node_link_down(); <-- link status is referred.
> ...
> if (delete) {
> kfree(l);
> le->link = NULL;
> }
> ...
> }
>
>__tipc_node_link_down()
>{
> ...
> if (!l || tipc_link_is_reset(l)) <-- read link status
> ...
> tipc_link_reset(l); <--- this function will reset all things related to link.
>}
>
>2/ le->link must be protected.
>bearer_disable()
>{
> ...
> tipc_node_delete_links(net, bearer_id); <--- this will delete all links.
> ...
>}
>
>tipc_node_delete_links()
>{
> ...
> tipc_node_link_down(n, bearer_id, true);
> ...
>}
Could we please solve the problem mentioned above by adding spinlock(&le->lock)?
For example:
(BTW, I have tested it, with this change, enabling RPS based on tipc port can improve 25% of general throughput)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3105abe97bb9..470c272d798e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1079,12 +1079,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
} else {
/* Defuse pending tipc_node_link_up() */
+ spin_lock_bh(&le->lock);
tipc_link_reset(l);
+ spin_unlock_bh(&le->lock);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
}
if (delete) {
+ spin_lock_bh(&le->lock);
kfree(l);
le->link = NULL;
+ spin_unlock_bh(&le->lock);
n->link_cnt--;
}
trace_tipc_node_link_down(n, true, "node link down or deleted!");
@@ -2154,14 +2158,15 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Receive packet directly if conditions permit */
tipc_node_read_lock(n);
if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
+ tipc_node_read_unlock(n);
spin_lock_bh(&le->lock);
if (le->link) {
rc = tipc_link_rcv(le->link, skb, &xmitq);
skb = NULL;
}
spin_unlock_bh(&le->lock);
- }
- tipc_node_read_unlock(n);
+ } else
+ tipc_node_read_unlock(n);
/* Check/update node state before receiving */
if (unlikely(skb)) {
@@ -2169,12 +2174,13 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
goto out_node_put;
tipc_node_write_lock(n);
if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
+ tipc_node_write_unlock(n);
if (le->link) {
rc = tipc_link_rcv(le->link, skb, &xmitq);
skb = NULL;
}
- }
- tipc_node_write_unlock(n);
+ } else
+ tipc_node_write_unlock(n);
}
if (unlikely(rc & TIPC_LINK_UP_EVT))
>Could we please solve the problem mentioned above by adding spinlock(&le->lock)?
>
No, you cannot do that. As I said before, the link status (including l->state) needs to be protected by node lock.
What I showed you were just 2 use cases (link reset/delete). There are more use cases (netlink, transmit path etc) that need proper locks.
>For example:
>
>(BTW, I have tested it, with this change, enabling RPS based on tipc port can improve 25% of general throughput)
>
>diff --git a/net/tipc/node.c b/net/tipc/node.c index 3105abe97bb9..470c272d798e 100644
>--- a/net/tipc/node.c
>+++ b/net/tipc/node.c
>@@ -1079,12 +1079,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
> __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
> } else {
> /* Defuse pending tipc_node_link_up() */
>+ spin_lock_bh(&le->lock);
> tipc_link_reset(l);
>+ spin_unlock_bh(&le->lock);
> tipc_link_fsm_evt(l, LINK_RESET_EVT);
> }
> if (delete) {
>+ spin_lock_bh(&le->lock);
> kfree(l);
> le->link = NULL;
>+ spin_unlock_bh(&le->lock);
> n->link_cnt--;
> }
> trace_tipc_node_link_down(n, true, "node link down or deleted!"); @@ -2154,14 +2158,15 @@ void tipc_rcv(struct net *net,
>struct sk_buff *skb, struct tipc_bearer *b)
> /* Receive packet directly if conditions permit */
> tipc_node_read_lock(n);
> if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
>+ tipc_node_read_unlock(n);
> spin_lock_bh(&le->lock);
> if (le->link) {
> rc = tipc_link_rcv(le->link, skb, &xmitq);
> skb = NULL;
> }
> spin_unlock_bh(&le->lock);
>- }
>- tipc_node_read_unlock(n);
>+ } else
>+ tipc_node_read_unlock(n);
>
> /* Check/update node state before receiving */
> if (unlikely(skb)) {
>@@ -2169,12 +2174,13 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
> goto out_node_put;
> tipc_node_write_lock(n);
> if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
>+ tipc_node_write_unlock(n);
> if (le->link) {
> rc = tipc_link_rcv(le->link, skb, &xmitq);
> skb = NULL;
> }
>- }
>- tipc_node_write_unlock(n);
>+ } else
>+ tipc_node_write_unlock(n);
> }
>
> if (unlikely(rc & TIPC_LINK_UP_EVT))
>>Could we please solve the problem mentioned above by adding spinlock(&le->lock)?
>>
>
>No, you cannot do that. As I said before, the link status (including l->state) needs to be protected by node lock.
Why can't use le->lock instead of node's lock to protect it in tipc_link_rcv.
>What I showed you were just 2 use cases (link reset/delete). There are more use cases (netlink, transmit path etc) that need proper locks.
The same. We can also add spin_lock_bh(&le->lock) to protect the link in other places where it changes the
link status in addition to 'reset/delete'. Because using node lock to protect the link in tipc_link_rcv is
really wasting CPU performance.
>
>>For example:
>>
>>(BTW, I have tested it, with this change, enabling RPS based on tipc port can improve 25% of general throughput)
>>
>>diff --git a/net/tipc/node.c b/net/tipc/node.c index 3105abe97bb9..470c272d798e 100644
>>--- a/net/tipc/node.c
>>+++ b/net/tipc/node.c
>>@@ -1079,12 +1079,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
>> __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
>> } else {
>> /* Defuse pending tipc_node_link_up() */
>>+ spin_lock_bh(&le->lock);
>> tipc_link_reset(l);
>>+ spin_unlock_bh(&le->lock);
>> tipc_link_fsm_evt(l, LINK_RESET_EVT);
>> }
>> if (delete) {
>>+ spin_lock_bh(&le->lock);
>> kfree(l);
>> le->link = NULL;
>>+ spin_unlock_bh(&le->lock);
>> n->link_cnt--;
>> }
>> trace_tipc_node_link_down(n, true, "node link down or deleted!"); @@ -2154,14 +2158,15 @@ void tipc_rcv(struct net *net,
>>struct sk_buff *skb, struct tipc_bearer *b)
>> /* Receive packet directly if conditions permit */
>> tipc_node_read_lock(n);
>> if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
>>+ tipc_node_read_unlock(n);
>> spin_lock_bh(&le->lock);
>> if (le->link) {
>> rc = tipc_link_rcv(le->link, skb, &xmitq);
>> skb = NULL;
>> }
>> spin_unlock_bh(&le->lock);
>>- }
>>- tipc_node_read_unlock(n);
>>+ } else
>>+ tipc_node_read_unlock(n);
>>
>> /* Check/update node state before receiving */
>> if (unlikely(skb)) {
>>@@ -2169,12 +2174,13 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
>> goto out_node_put;
>> tipc_node_write_lock(n);
>> if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
>>+ tipc_node_write_unlock(n);
>> if (le->link) {
>> rc = tipc_link_rcv(le->link, skb, &xmitq);
>> skb = NULL;
>> }
>>- }
>>- tipc_node_write_unlock(n);
>>+ } else
>>+ tipc_node_write_unlock(n);
>> }
>>
>> if (unlikely(rc & TIPC_LINK_UP_EVT))
>Why can't use le->lock instead of node's lock to protect it in tipc_link_rcv.
>
I have already explained:
__tipc_node_link_down()
{
...
if (!l || tipc_link_is_reset(l)) <-- read link status
...
}
>>What I showed you were just 2 use cases (link reset/delete). There are more use cases (netlink, transmit path etc) that need proper
>locks.
>
>The same. We can also add spin_lock_bh(&le->lock) to protect the link in other places where it changes the link status in addition to
>'reset/delete'. Because using node lock to protect the link in tipc_link_rcv is really wasting CPU performance.
>
If you want to change current lock policy, you need to submit a complete/correct patch. I will acknowledge this patch if I can see a significant improvement in my test.
© 2016 - 2025 Red Hat, Inc.