page_pool page may be freed from skb_defer_free_flush() in
softirq context without binding to any specific napi, it
may cause use-after-free problem due to the below time window,
as below, CPU1 may still access napi->list_owner after CPU0
free the napi memory:
CPU 0 CPU1
page_pool_destroy() skb_defer_free_flush()
. .
. napi = READ_ONCE(pool->p.napi);
. .
page_pool_disable_direct_recycling() .
driver free napi memory .
. .
. napi && READ_ONCE(napi->list_owner) == cpuid
. .
Use rcu mechanism to avoid the above problem.
Note, the above was found during code reviewing on how to fix
the problem in [1].
1. https://lore.kernel.org/lkml/8067f204-1380-4d37-8ffd-007fc6f26738@kernel.org/T/
Fixes: dd64b232deb8 ("page_pool: unlink from napi during destroy")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
---
As the IOMMU fix patch depends on synchronize_rcu() added in this
patch and the time window is so small that it doesn't seem to be
an urgent fix, so target the net-next as the IOMMU fix patch does.
---
net/core/page_pool.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index a813d30d2135..dd497f5c927d 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -795,6 +795,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
static bool page_pool_napi_local(const struct page_pool *pool)
{
const struct napi_struct *napi;
+ bool napi_local;
u32 cpuid;
if (unlikely(!in_softirq()))
@@ -810,9 +811,15 @@ static bool page_pool_napi_local(const struct page_pool *pool)
if (READ_ONCE(pool->cpuid) == cpuid)
return true;
+ /* Synchronizated with page_pool_destory() to avoid use-after-free
+ * for 'napi'.
+ */
+ rcu_read_lock();
napi = READ_ONCE(pool->p.napi);
+ napi_local = napi && READ_ONCE(napi->list_owner) == cpuid;
+ rcu_read_unlock();
- return napi && READ_ONCE(napi->list_owner) == cpuid;
+ return napi_local;
}
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
@@ -1126,6 +1133,12 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_release(pool))
return;
+ /* Paired with rcu lock in page_pool_napi_local() to enable clearing
+ * of pool->p.napi in page_pool_disable_direct_recycling() is seen
+ * before returning to driver to free the napi instance.
+ */
+ synchronize_rcu();
+
page_pool_detached(pool);
pool->defer_start = jiffies;
pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
--
2.33.0
On Tue, 22 Oct 2024 11:22:12 +0800, Yunsheng Lin <linyunsheng@huawei.com> wrote: > page_pool page may be freed from skb_defer_free_flush() in > softirq context without binding to any specific napi, it > may cause use-after-free problem due to the below time window, > as below, CPU1 may still access napi->list_owner after CPU0 > free the napi memory: > > CPU 0 CPU1 > page_pool_destroy() skb_defer_free_flush() > . . > . napi = READ_ONCE(pool->p.napi); > . . > page_pool_disable_direct_recycling() . > driver free napi memory . > . . > . napi && READ_ONCE(napi->list_owner) == cpuid > . . > > Use rcu mechanism to avoid the above problem. > > Note, the above was found during code reviewing on how to fix > the problem in [1]. > > 1. https://lore.kernel.org/lkml/8067f204-1380-4d37-8ffd-007fc6f26738@kernel.org/T/ > > Fixes: dd64b232deb8 ("page_pool: unlink from napi during destroy") > Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > CC: Alexander Lobakin <aleksander.lobakin@intel.com> > --- > As the IOMMU fix patch depends on synchronize_rcu() added in this > patch and the time window is so small that it doesn't seem to be > an urgent fix, so target the net-next as the IOMMU fix patch does. > --- > net/core/page_pool.c | 15 ++++++++++++++- > 1 file changed, 14 insertions(+), 1 deletion(-) > > diff --git a/net/core/page_pool.c b/net/core/page_pool.c > index a813d30d2135..dd497f5c927d 100644 > --- a/net/core/page_pool.c > +++ b/net/core/page_pool.c > @@ -795,6 +795,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, > static bool page_pool_napi_local(const struct page_pool *pool) > { > const struct napi_struct *napi; > + bool napi_local; > u32 cpuid; > > if (unlikely(!in_softirq())) > @@ -810,9 +811,15 @@ static bool page_pool_napi_local(const struct page_pool *pool) > if (READ_ONCE(pool->cpuid) == cpuid) > return true; > > + /* Synchronizated with page_pool_destory() to avoid use-after-free > + * for 'napi'. > + */ > + rcu_read_lock(); > napi = READ_ONCE(pool->p.napi); > + napi_local = napi && READ_ONCE(napi->list_owner) == cpuid; > + rcu_read_unlock(); > > - return napi && READ_ONCE(napi->list_owner) == cpuid; > + return napi_local; > } > > void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, > @@ -1126,6 +1133,12 @@ void page_pool_destroy(struct page_pool *pool) > if (!page_pool_release(pool)) > return; > > + /* Paired with rcu lock in page_pool_napi_local() to enable clearing > + * of pool->p.napi in page_pool_disable_direct_recycling() is seen > + * before returning to driver to free the napi instance. > + */ > + synchronize_rcu(); > + > page_pool_detached(pool); > pool->defer_start = jiffies; > pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; > -- > 2.33.0 > >
© 2016 - 2024 Red Hat, Inc.