The ip_vs_ctl.c file and the associated ip_vs.h file are the only places
in the kernel where HK_TYPE_KTHREAD cpumask is being retrieved and used.
Now that HK_TYPE_KTHREAD/HK_TYPE_DOMAIN cpumask can be changed at run
time. We need to use RCU to guard access to this cpumask to avoid a
potential UAF problem as the returned cpumask may be freed before it
is being used.
Signed-off-by: Waiman Long <longman@redhat.com>
---
include/net/ip_vs.h | 20 ++++++++++++++++----
net/netfilter/ipvs/ip_vs_ctl.c | 13 ++++++++-----
2 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 29a36709e7f3..17c85a575ef4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1155,7 +1155,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
return ipvs->sysctl_run_estimation;
}
-static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
{
if (ipvs->est_cpulist_valid)
return ipvs->sysctl_est_cpulist;
@@ -1273,7 +1273,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
return 1;
}
-static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
{
return housekeeping_cpumask(HK_TYPE_KTHREAD);
}
@@ -1290,6 +1290,18 @@ static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
#endif
+static inline bool sysctl_est_cpulist_empty(struct netns_ipvs *ipvs)
+{
+ guard(rcu)();
+ return cpumask_empty(__sysctl_est_cpulist(ipvs));
+}
+
+static inline unsigned int sysctl_est_cpulist_weight(struct netns_ipvs *ipvs)
+{
+ guard(rcu)();
+ return cpumask_weight(__sysctl_est_cpulist(ipvs));
+}
+
/* IPVS core functions
* (from ip_vs_core.c)
*/
@@ -1604,7 +1616,7 @@ static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs)
/* Stop tasks while cpulist is empty or if disabled with flag */
ipvs->est_stopped = !sysctl_run_estimation(ipvs) ||
(ipvs->est_cpulist_valid &&
- cpumask_empty(sysctl_est_cpulist(ipvs)));
+ sysctl_est_cpulist_empty(ipvs));
#endif
}
@@ -1620,7 +1632,7 @@ static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs)
static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs)
{
unsigned int limit = IPVS_EST_CPU_KTHREADS *
- cpumask_weight(sysctl_est_cpulist(ipvs));
+ sysctl_est_cpulist_weight(ipvs);
return max(1U, limit);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 35642de2a0fe..f38a2e2a9dc5 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1973,11 +1973,14 @@ static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
mutex_lock(&ipvs->est_mutex);
- if (ipvs->est_cpulist_valid)
- mask = *valp;
- else
- mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
- ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
+ /* HK_TYPE_KTHREAD cpumask needs RCU protection */
+ scoped_guard(rcu) {
+ if (ipvs->est_cpulist_valid)
+ mask = *valp;
+ else
+ mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
+ ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
+ }
mutex_unlock(&ipvs->est_mutex);
--
2.53.0
Hello,
On Tue, 24 Mar 2026, Waiman Long wrote:
> The ip_vs_ctl.c file and the associated ip_vs.h file are the only places
> in the kernel where HK_TYPE_KTHREAD cpumask is being retrieved and used.
> Now that HK_TYPE_KTHREAD/HK_TYPE_DOMAIN cpumask can be changed at run
> time. We need to use RCU to guard access to this cpumask to avoid a
> potential UAF problem as the returned cpumask may be freed before it
> is being used.
>
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
> include/net/ip_vs.h | 20 ++++++++++++++++----
> net/netfilter/ipvs/ip_vs_ctl.c | 13 ++++++++-----
> 2 files changed, 24 insertions(+), 9 deletions(-)
>
> diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
> index 29a36709e7f3..17c85a575ef4 100644
> --- a/include/net/ip_vs.h
> +++ b/include/net/ip_vs.h
> @@ -1155,7 +1155,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
> return ipvs->sysctl_run_estimation;
> }
>
> -static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
> +static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
> {
> if (ipvs->est_cpulist_valid)
> return ipvs->sysctl_est_cpulist;
> @@ -1273,7 +1273,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
> return 1;
> }
>
> -static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
> +static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
> {
> return housekeeping_cpumask(HK_TYPE_KTHREAD);
> }
> @@ -1290,6 +1290,18 @@ static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
>
> #endif
>
May be there is little fuzz here, due to the recent
changes in the nf-next tree. If this is a bugfix due to the
missing RCU protection, may be you should add Fixes line too
and use the nf tree. Probably, there will be fuzz/collisions with
the changes in the nf-next tree...
> +static inline bool sysctl_est_cpulist_empty(struct netns_ipvs *ipvs)
> +{
> + guard(rcu)();
> + return cpumask_empty(__sysctl_est_cpulist(ipvs));
> +}
> +
> +static inline unsigned int sysctl_est_cpulist_weight(struct netns_ipvs *ipvs)
> +{
> + guard(rcu)();
> + return cpumask_weight(__sysctl_est_cpulist(ipvs));
> +}
> +
> /* IPVS core functions
> * (from ip_vs_core.c)
> */
> @@ -1604,7 +1616,7 @@ static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs)
> /* Stop tasks while cpulist is empty or if disabled with flag */
> ipvs->est_stopped = !sysctl_run_estimation(ipvs) ||
> (ipvs->est_cpulist_valid &&
> - cpumask_empty(sysctl_est_cpulist(ipvs)));
> + sysctl_est_cpulist_empty(ipvs));
> #endif
> }
>
> @@ -1620,7 +1632,7 @@ static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs)
> static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs)
> {
> unsigned int limit = IPVS_EST_CPU_KTHREADS *
> - cpumask_weight(sysctl_est_cpulist(ipvs));
> + sysctl_est_cpulist_weight(ipvs);
>
> return max(1U, limit);
> }
> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
> index 35642de2a0fe..f38a2e2a9dc5 100644
> --- a/net/netfilter/ipvs/ip_vs_ctl.c
> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
> @@ -1973,11 +1973,14 @@ static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
>
> mutex_lock(&ipvs->est_mutex);
>
> - if (ipvs->est_cpulist_valid)
> - mask = *valp;
> - else
> - mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
> - ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
> + /* HK_TYPE_KTHREAD cpumask needs RCU protection */
Can we switch IPVS to use HK_TYPE_DOMAIN? The initial
intention was to follow the code in kthread.c. Then you can
reconsider if HK_TYPE_KTHREAD should be alias to HK_TYPE_DOMAIN,
may be not if there are no other users...
> + scoped_guard(rcu) {
> + if (ipvs->est_cpulist_valid)
> + mask = *valp;
> + else
> + mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
> + ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
> + }
>
> mutex_unlock(&ipvs->est_mutex);
>
> --
> 2.53.0
Regards
--
Julian Anastasov <ja@ssi.bg>
On 3/26/26 4:32 AM, Julian Anastasov wrote:
> Hello,
>
> On Tue, 24 Mar 2026, Waiman Long wrote:
>
>> The ip_vs_ctl.c file and the associated ip_vs.h file are the only places
>> in the kernel where HK_TYPE_KTHREAD cpumask is being retrieved and used.
>> Now that HK_TYPE_KTHREAD/HK_TYPE_DOMAIN cpumask can be changed at run
>> time. We need to use RCU to guard access to this cpumask to avoid a
>> potential UAF problem as the returned cpumask may be freed before it
>> is being used.
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
>> ---
>> include/net/ip_vs.h | 20 ++++++++++++++++----
>> net/netfilter/ipvs/ip_vs_ctl.c | 13 ++++++++-----
>> 2 files changed, 24 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
>> index 29a36709e7f3..17c85a575ef4 100644
>> --- a/include/net/ip_vs.h
>> +++ b/include/net/ip_vs.h
>> @@ -1155,7 +1155,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
>> return ipvs->sysctl_run_estimation;
>> }
>>
>> -static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
>> +static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
>> {
>> if (ipvs->est_cpulist_valid)
>> return ipvs->sysctl_est_cpulist;
>> @@ -1273,7 +1273,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
>> return 1;
>> }
>>
>> -static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
>> +static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
>> {
>> return housekeeping_cpumask(HK_TYPE_KTHREAD);
>> }
>> @@ -1290,6 +1290,18 @@ static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
>>
>> #endif
>>
> May be there is little fuzz here, due to the recent
> changes in the nf-next tree. If this is a bugfix due to the
> missing RCU protection, may be you should add Fixes line too
> and use the nf tree. Probably, there will be fuzz/collisions with
> the changes in the nf-next tree...
Thanks for the suggestion, I will rebase the patches on top of the
nf-next tree.
>
>> +static inline bool sysctl_est_cpulist_empty(struct netns_ipvs *ipvs)
>> +{
>> + guard(rcu)();
>> + return cpumask_empty(__sysctl_est_cpulist(ipvs));
>> +}
>> +
>> +static inline unsigned int sysctl_est_cpulist_weight(struct netns_ipvs *ipvs)
>> +{
>> + guard(rcu)();
>> + return cpumask_weight(__sysctl_est_cpulist(ipvs));
>> +}
>> +
>> /* IPVS core functions
>> * (from ip_vs_core.c)
>> */
>> @@ -1604,7 +1616,7 @@ static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs)
>> /* Stop tasks while cpulist is empty or if disabled with flag */
>> ipvs->est_stopped = !sysctl_run_estimation(ipvs) ||
>> (ipvs->est_cpulist_valid &&
>> - cpumask_empty(sysctl_est_cpulist(ipvs)));
>> + sysctl_est_cpulist_empty(ipvs));
>> #endif
>> }
>>
>> @@ -1620,7 +1632,7 @@ static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs)
>> static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs)
>> {
>> unsigned int limit = IPVS_EST_CPU_KTHREADS *
>> - cpumask_weight(sysctl_est_cpulist(ipvs));
>> + sysctl_est_cpulist_weight(ipvs);
>>
>> return max(1U, limit);
>> }
>> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
>> index 35642de2a0fe..f38a2e2a9dc5 100644
>> --- a/net/netfilter/ipvs/ip_vs_ctl.c
>> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
>> @@ -1973,11 +1973,14 @@ static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
>>
>> mutex_lock(&ipvs->est_mutex);
>>
>> - if (ipvs->est_cpulist_valid)
>> - mask = *valp;
>> - else
>> - mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
>> - ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
>> + /* HK_TYPE_KTHREAD cpumask needs RCU protection */
> Can we switch IPVS to use HK_TYPE_DOMAIN? The initial
> intention was to follow the code in kthread.c. Then you can
> reconsider if HK_TYPE_KTHREAD should be alias to HK_TYPE_DOMAIN,
> may be not if there are no other users...
Yes, I can certainly switch to use HK_TYPE_DOMAIN instead. The reason I
keep HK_TYPE_KTHREAD is that it may not be obvious to others that
kthread is now following the HK_TYPE_DOMAIN cpumask,
keeping HK_TYPE_KTHREAD but making it an alias can make that clear.
>> + scoped_guard(rcu) {
>> + if (ipvs->est_cpulist_valid)
>> + mask = *valp;
>> + else
>> + mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
>> + ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
>> + }
>>
>> mutex_unlock(&ipvs->est_mutex);
>>
>> --
>> 2.53.0
> Regards
>
> --
> Julian Anastasov <ja@ssi.bg>
>
© 2016 - 2026 Red Hat, Inc.