A later patch will change the mptcp memory accounting schema
in such a way that MPTCP sockets will encode total amount of
forward allocated memory in two separate fields (one for tx
and one for rx).
MPTCP sockets will use their own helper to provide the accurate
amount of fwd alloced memory.
To allow the above, this patch adds a new, optional, sk method to
fetch the fwd memory, wrap the call in a new helper and use it
where apprioriate.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
This schema was suggested long time ago by Eric in a completely
different context:
https://marc.info/?l=linux-netdev&m=147516056204838&w=2
I'm unsure if that is still applicable/valid.
---
include/net/sock.h | 11 +++++++++++
net/ipv4/af_inet.c | 2 +-
net/ipv4/inet_diag.c | 2 +-
net/sched/em_meta.c | 2 +-
4 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 9c5d0502090f..86ea60df6e84 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1205,6 +1205,8 @@ struct proto {
unsigned int inuse_idx;
#endif
+ int (*forward_alloc_get)(const struct sock *sk);
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
@@ -1212,6 +1214,7 @@ struct proto {
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
@@ -1289,6 +1292,14 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+ if (!sk->sk_prot->forward_alloc_get)
+ return sk->sk_forward_alloc;
+
+ return sk->sk_prot->forward_alloc_get(sk);
+}
+
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 37e69fd9246c..55f862abe811 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -150,7 +150,7 @@ void inet_sock_destruct(struct sock *sk)
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
- WARN_ON(sk->sk_forward_alloc);
+ WARN_ON(sk_forward_alloc_get(sk));
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ef7897226f08..c8fa6e7f7d12 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -271,7 +271,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct inet_diag_meminfo minfo = {
.idiag_rmem = sk_rmem_alloc_get(sk),
.idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
- .idiag_fmem = sk->sk_forward_alloc,
+ .idiag_fmem = sk_forward_alloc_get(sk),
.idiag_tmem = sk_wmem_alloc_get(sk),
};
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 46254968d390..0a04468b7314 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -457,7 +457,7 @@ META_COLLECTOR(int_sk_fwd_alloc)
*err = -1;
return;
}
- dst->value = sk->sk_forward_alloc;
+ dst->value = sk_forward_alloc_get(sk);
}
META_COLLECTOR(int_sk_sndbuf)
--
2.26.3