include/net/mptcp.h | 7 +--- net/mptcp/bpf.c | 34 +++++++++---------- net/mptcp/protocol.h | 1 + net/mptcp/sched.c | 21 +++++------- tools/testing/selftests/bpf/bpf_tcp_helpers.h | 8 ++--- .../selftests/bpf/progs/mptcp_bpf_backup.c | 6 ++-- .../selftests/bpf/progs/mptcp_bpf_first.c | 2 +- .../selftests/bpf/progs/mptcp_bpf_rr.c | 8 ++--- 8 files changed, 36 insertions(+), 51 deletions(-)
This patch should be squashed into "BPF packet scheduler" v3.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
include/net/mptcp.h | 7 +---
net/mptcp/bpf.c | 34 +++++++++----------
net/mptcp/protocol.h | 1 +
net/mptcp/sched.c | 21 +++++-------
tools/testing/selftests/bpf/bpf_tcp_helpers.h | 8 ++---
.../selftests/bpf/progs/mptcp_bpf_backup.c | 6 ++--
.../selftests/bpf/progs/mptcp_bpf_first.c | 2 +-
.../selftests/bpf/progs/mptcp_bpf_rr.c | 8 ++---
8 files changed, 36 insertions(+), 51 deletions(-)
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 8a53583a9745..7af7fd48acc7 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -99,14 +99,9 @@ struct mptcp_out_options {
#define MPTCP_SCHED_NAME_MAX 16
#define MPTCP_SUBFLOWS_MAX 8
-struct mptcp_sched_subflow {
- struct mptcp_subflow_context *context;
- bool is_scheduled;
-};
-
struct mptcp_sched_data {
bool reinject;
- struct mptcp_sched_subflow subflows[MPTCP_SUBFLOWS_MAX];
+ struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
};
struct mptcp_sched_ops {
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index 4f4559d37f9e..0529e70d53b1 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -33,12 +33,6 @@ bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
return bpf_base_func_proto(func_id);
}
-static size_t subflow_offset(int i)
-{
- return offsetof(struct mptcp_sched_data, subflows) +
- i * sizeof(struct mptcp_sched_subflow);
-}
-
static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off,
@@ -46,8 +40,7 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
u32 *next_btf_id,
enum bpf_type_flag *flag)
{
- size_t start, end, soff;
- int i;
+ size_t end;
if (atype == BPF_READ) {
return btf_struct_access(log, btf, t, off, size, atype,
@@ -55,21 +48,26 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
}
if (t != mptcp_sched_type) {
- bpf_log(log, "only access to mptcp_sched_data is supported\n");
+ bpf_log(log, "only access to mptcp_subflow_context is supported\n");
return -EACCES;
}
- start = offsetof(struct mptcp_sched_subflow, is_scheduled);
- end = offsetofend(struct mptcp_sched_subflow, is_scheduled);
+ switch (off) {
+ case offsetof(struct mptcp_subflow_context, scheduled):
+ end = offsetofend(struct mptcp_subflow_context, scheduled);
+ break;
+ default:
+ bpf_log(log, "no write support to mptcp_subflow_context at off %d\n", off);
+ return -EACCES;
+ }
- for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- soff = subflow_offset(i);
- if (off == soff + start && off + size <= soff + end)
- return NOT_INIT; /* offsets match up with is_scheduled */
+ if (off + size > end) {
+ bpf_log(log, "access beyond mptcp_subflow_context at off %u size %u ended at %zu",
+ off, size, end);
+ return -EACCES;
}
- bpf_log(log, "no write support to mptcp_sched_data at off %d\n", off);
- return -EACCES;
+ return NOT_INIT;
}
static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
@@ -144,7 +142,7 @@ static int bpf_mptcp_sched_init(struct btf *btf)
{
s32 type_id;
- type_id = btf_find_by_name_kind(btf, "mptcp_sched_data",
+ type_id = btf_find_by_name_kind(btf, "mptcp_subflow_context",
BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 8739794166d8..48c5261b7b15 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -469,6 +469,7 @@ struct mptcp_subflow_context {
valid_csum_seen : 1; /* at least one csum validated */
enum mptcp_data_avail data_avail;
bool mp_fail_response_expect;
+ bool scheduled;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 46396eed62d0..613b7005938c 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -101,15 +101,12 @@ static int mptcp_sched_data_init(struct mptcp_sock *msk, bool reinject,
pr_warn_once("too many subflows");
break;
}
- data->subflows[i].context = subflow;
- data->subflows[i].is_scheduled = 0;
- i++;
+ WRITE_ONCE(subflow->scheduled, false);
+ data->contexts[i++] = subflow;
}
- for (; i < MPTCP_SUBFLOWS_MAX; i++) {
- data->subflows[i].context = NULL;
- data->subflows[i].is_scheduled = 0;
- }
+ for (; i < MPTCP_SUBFLOWS_MAX; i++)
+ data->contexts[i] = NULL;
return 0;
}
@@ -136,9 +133,8 @@ struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
msk->sched->get_subflow(msk, &data);
for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (data.subflows[i].is_scheduled &&
- data.subflows[i].context) {
- ssk = data.subflows[i].context->tcp_sock;
+ if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
+ ssk = data.contexts[i]->tcp_sock;
msk->last_snd = ssk;
break;
}
@@ -166,9 +162,8 @@ struct sock *mptcp_sched_get_retrans(struct mptcp_sock *msk)
msk->sched->get_subflow(msk, &data);
for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (data.subflows[i].is_scheduled &&
- data.subflows[i].context) {
- ssk = data.subflows[i].context->tcp_sock;
+ if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
+ ssk = data.contexts[i]->tcp_sock;
msk->last_snd = ssk;
break;
}
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 480be2ea7d59..915f5735ebbd 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -237,17 +237,13 @@ struct mptcp_subflow_context {
__u32 token;
__u32 padding : 12,
backup : 1;
+ bool scheduled;
struct sock *tcp_sock; /* tcp sk backpointer */
} __attribute__((preserve_access_index));
-struct mptcp_sched_subflow {
- struct mptcp_subflow_context *context;
- bool is_scheduled;
-};
-
struct mptcp_sched_data {
bool reinject;
- struct mptcp_sched_subflow subflows[MPTCP_SUBFLOWS_MAX];
+ struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
};
struct mptcp_sched_ops {
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_backup.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_backup.c
index 4f394e971c03..088dbc4be99a 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_backup.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_backup.c
@@ -22,16 +22,16 @@ void BPF_STRUCT_OPS(bpf_backup_get_subflow, const struct mptcp_sock *msk,
int nr = 0;
for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (!data->subflows[i].context)
+ if (!data->contexts[i])
break;
- if (!data->subflows[i].context->backup) {
+ if (!data->contexts[i]->backup) {
nr = i;
break;
}
}
- data->subflows[nr].is_scheduled = 1;
+ data->contexts[nr]->scheduled = 1;
}
SEC(".struct_ops")
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
index 0baacd8b6426..5f866f51ac70 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
@@ -19,7 +19,7 @@ void BPF_PROG(mptcp_sched_first_release, const struct mptcp_sock *msk)
void BPF_STRUCT_OPS(bpf_first_get_subflow, const struct mptcp_sock *msk,
struct mptcp_sched_data *data)
{
- data->subflows[0].is_scheduled = 1;
+ data->contexts[0]->scheduled = 1;
}
SEC(".struct_ops")
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
index de0d893e08b4..03c1032bc31a 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
@@ -22,11 +22,11 @@ void BPF_STRUCT_OPS(bpf_rr_get_subflow, const struct mptcp_sock *msk,
int nr = 0;
for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (!msk->last_snd || !data->subflows[i].context)
+ if (!msk->last_snd || !data->contexts[i])
break;
- if (data->subflows[i].context->tcp_sock == msk->last_snd) {
- if (i + 1 == MPTCP_SUBFLOWS_MAX || !data->subflows[i + 1].context)
+ if (data->contexts[i]->tcp_sock == msk->last_snd) {
+ if (i + 1 == MPTCP_SUBFLOWS_MAX || !data->contexts[i + 1])
break;
nr = i + 1;
@@ -34,7 +34,7 @@ void BPF_STRUCT_OPS(bpf_rr_get_subflow, const struct mptcp_sock *msk,
}
}
- data->subflows[nr].is_scheduled = 1;
+ data->contexts[nr]->scheduled = 1;
}
SEC(".struct_ops")
--
2.34.1
Hello, On Sun, 2022-05-29 at 10:21 +0800, Geliang Tang wrote: > This patch should be squashed into "BPF packet scheduler" v3. > > Signed-off-by: Geliang Tang <geliang.tang@suse.com> It looks like this patch should be squashed into several existing ones. perhaps is simpler if you provied a git tree to pull the new revision for this series ??? Thanks! Paolo
> Hello, > > On Sun, 2022-05-29 at 10:21 +0800, Geliang Tang wrote: > > This patch should be squashed into "BPF packet scheduler" v3. > > > > Signed-off-by: Geliang Tang <geliang.tang@suse.com> > > It looks like this patch should be squashed into several existing ones. > perhaps is simpler if you provied a git tree to pull the new revision > for this series ??? Thanks Paolo, I merged it in v4, and here's my git tree with full patches: https://github.com/geliangtang/mptcp_net-next. -Geliang > > Thanks! > > Paolo > >
© 2016 - 2025 Red Hat, Inc.