This reduces contention on the lock during parallel clone/exit.
It remains the primary bottleneck in such a case.
While here tidy up the code.
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
---
kernel/pid.c | 99 +++++++++++++++++++++++++++++-----------------------
1 file changed, 55 insertions(+), 44 deletions(-)
diff --git a/kernel/pid.c b/kernel/pid.c
index a31771bc89c1..6a87ce5a6040 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -159,9 +159,11 @@ void free_pids(struct pid **pids)
free_pid(pids[tmp]);
}
-struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
- size_t set_tid_size)
+struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
+ size_t arg_set_tid_size)
{
+ int set_tid[MAX_PID_NS_LEVEL + 1] = {};
+ int pid_max[MAX_PID_NS_LEVEL + 1] = {};
struct pid *pid;
enum pid_type type;
int i, nr;
@@ -170,47 +172,71 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
int retval = -ENOMEM;
/*
- * set_tid_size contains the size of the set_tid array. Starting at
+ * arg_set_tid_size contains the size of the arg_set_tid array. Starting at
* the most nested currently active PID namespace it tells alloc_pid()
* which PID to set for a process in that most nested PID namespace
- * up to set_tid_size PID namespaces. It does not have to set the PID
- * for a process in all nested PID namespaces but set_tid_size must
+ * up to arg_set_tid_size PID namespaces. It does not have to set the PID
+ * for a process in all nested PID namespaces but arg_set_tid_size must
* never be greater than the current ns->level + 1.
*/
- if (set_tid_size > ns->level + 1)
+ if (arg_set_tid_size > ns->level + 1)
return ERR_PTR(-EINVAL);
+ /*
+ * Prep before we take locks:
+ *
+ * 1. allocate and fill in pid struct
+ */
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
return ERR_PTR(retval);
- tmp = ns;
+ get_pid_ns(ns);
pid->level = ns->level;
+ refcount_set(&pid->count, 1);
+ spin_lock_init(&pid->lock);
+ for (type = 0; type < PIDTYPE_MAX; ++type)
+ INIT_HLIST_HEAD(&pid->tasks[type]);
+ init_waitqueue_head(&pid->wait_pidfd);
+ INIT_HLIST_HEAD(&pid->inodes);
- for (i = ns->level; i >= 0; i--) {
- int tid = 0;
- int pid_max = READ_ONCE(tmp->pid_max);
+ /*
+ * 2. perm check checkpoint_restore_ns_capable()
+ *
+ * This stores found pid_max to make sure the used value is the same should
+ * later code need it.
+ */
+ for (tmp = ns, i = ns->level; i >= 0; i--) {
+ pid_max[ns->level - i] = READ_ONCE(tmp->pid_max);
- if (set_tid_size) {
- tid = set_tid[ns->level - i];
+ if (arg_set_tid_size) {
+ int tid = set_tid[ns->level - i] = arg_set_tid[ns->level - i];
retval = -EINVAL;
- if (tid < 1 || tid >= pid_max)
- goto out_free;
+ if (tid < 1 || tid >= pid_max[ns->level - i])
+ goto out_abort;
/*
* Also fail if a PID != 1 is requested and
* no PID 1 exists.
*/
if (tid != 1 && !tmp->child_reaper)
- goto out_free;
+ goto out_abort;
retval = -EPERM;
if (!checkpoint_restore_ns_capable(tmp->user_ns))
- goto out_free;
- set_tid_size--;
+ goto out_abort;
+ arg_set_tid_size--;
}
- idr_preload(GFP_KERNEL);
- spin_lock(&pidmap_lock);
+ tmp = tmp->parent;
+ }
+
+ /*
+ * Prep is done, id allocation goes here:
+ */
+ idr_preload_many(ns->level + 1, GFP_KERNEL);
+ spin_lock(&pidmap_lock);
+ for (tmp = ns, i = ns->level; i >= 0; i--) {
+ int tid = set_tid[ns->level - i];
if (tid) {
nr = idr_alloc(&tmp->idr, NULL, tid,
@@ -235,10 +261,8 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
* a partially initialized PID (see below).
*/
nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
- pid_max, GFP_ATOMIC);
+ pid_max[ns->level - i], GFP_ATOMIC);
}
- spin_unlock(&pidmap_lock);
- idr_preload_end();
if (nr < 0) {
retval = (nr == -ENOSPC) ? -EAGAIN : nr;
@@ -257,25 +281,15 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
* is what we have exposed to userspace for a long time and it is
* documented behavior for pid namespaces. So we can't easily
* change it even if there were an error code better suited.
+ *
+ * This can't be done earlier because we need to preserve other
+ * error conditions.
*/
retval = -ENOMEM;
-
- get_pid_ns(ns);
- refcount_set(&pid->count, 1);
- spin_lock_init(&pid->lock);
- for (type = 0; type < PIDTYPE_MAX; ++type)
- INIT_HLIST_HEAD(&pid->tasks[type]);
-
- init_waitqueue_head(&pid->wait_pidfd);
- INIT_HLIST_HEAD(&pid->inodes);
-
- upid = pid->numbers + ns->level;
- idr_preload(GFP_KERNEL);
- spin_lock(&pidmap_lock);
- if (!(ns->pid_allocated & PIDNS_ADDING))
- goto out_unlock;
+ if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
+ goto out_free;
pidfs_add_pid(pid);
- for ( ; upid >= pid->numbers; --upid) {
+ for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
/* Make the PID visible to find_pid_ns. */
idr_replace(&upid->ns->idr, pid, upid->nr);
upid->ns->pid_allocated++;
@@ -286,13 +300,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
return pid;
-out_unlock:
- spin_unlock(&pidmap_lock);
- idr_preload_end();
- put_pid_ns(ns);
-
out_free:
- spin_lock(&pidmap_lock);
while (++i <= ns->level) {
upid = pid->numbers + i;
idr_remove(&upid->ns->idr, upid->nr);
@@ -303,7 +311,10 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
idr_set_cursor(&ns->idr, 0);
spin_unlock(&pidmap_lock);
+ idr_preload_end();
+out_abort:
+ put_pid_ns(ns);
kmem_cache_free(ns->pid_cachep, pid);
return ERR_PTR(retval);
}
--
2.48.1
On 11/23, Mateusz Guzik wrote:
>
> This reduces contention on the lock during parallel clone/exit.
>
> It remains the primary bottleneck in such a case.
>
> While here tidy up the code.
Not sure I can review... But FWIW this patch looks good to me after the
very quick glance. I'll try to actually read it tomorrow.
But please find a couple of minor "can't resist" nits below.
> + for (tmp = ns, i = ns->level; i >= 0; i--) {
> + int tid = set_tid[ns->level - i];
>
> if (tid) {
> nr = idr_alloc(&tmp->idr, NULL, tid,
> @@ -235,10 +261,8 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> * a partially initialized PID (see below).
> */
> nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
> - pid_max, GFP_ATOMIC);
> + pid_max[ns->level - i], GFP_ATOMIC);
> }
> - spin_unlock(&pidmap_lock);
> - idr_preload_end();
>
> if (nr < 0) {
> retval = (nr == -ENOSPC) ? -EAGAIN : nr;
So. With or without this patch we have
if (tid) {
nr = idr_alloc(...);
if (nr == -ENOSPC)
nr = -EEXIST;
} else {
nr = idr_alloc_cyclic(...);
}
if (nr < 0) {
retval = (nr == -ENOSPC) ? -EAGAIN : nr;
goto out_free;
}
and somehow this looks annoying to me... Perhaps it makes sense to make this
code more symmetric (and imo more readable) ?
if (tid) {
nr = idr_alloc(...);
if (nr == -ENOSPC)
nr = -EEXIST;
} else {
nr = idr_alloc_cyclic(...);
if (nr == -ENOSPC)
nr = -EAGAIN;
}
if (nr < 0)
retval = nr;
goto out_free;
}
> - idr_preload(GFP_KERNEL);
> - spin_lock(&pidmap_lock);
> - if (!(ns->pid_allocated & PIDNS_ADDING))
> - goto out_unlock;
> + if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
> + goto out_free;
> pidfs_add_pid(pid);
> - for ( ; upid >= pid->numbers; --upid) {
> + for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
> /* Make the PID visible to find_pid_ns. */
> idr_replace(&upid->ns->idr, pid, upid->nr);
> upid->ns->pid_allocated++;
So.. unless I am totally confused the current code has another
idr_preload + idr_preload_end around pidfs_add_pid().
AFAICS, this makes no sense, and your patch removes it. But perhaps this
deserves a note in the changelog or even a separate patch?
And another stupid question... I don't understand fs/pidfs.c, but it looks
a bit strange to me that pidfs_add_pid() is called before the
for (...)
idr_replace(...);
loop. I don't see any problem, but to me it would look a bit better to do
pidfs_add_pid(pid) when this pid is fully initialized...
Oleg.
On Sun, Nov 23, 2025 at 9:10 PM Oleg Nesterov <oleg@redhat.com> wrote:
>
> On 11/23, Mateusz Guzik wrote:
> >
> > This reduces contention on the lock during parallel clone/exit.
> >
> > It remains the primary bottleneck in such a case.
> >
> > While here tidy up the code.
>
> Not sure I can review... But FWIW this patch looks good to me after the
> very quick glance. I'll try to actually read it tomorrow.
>
> But please find a couple of minor "can't resist" nits below.
>
> > + for (tmp = ns, i = ns->level; i >= 0; i--) {
> > + int tid = set_tid[ns->level - i];
> >
> > if (tid) {
> > nr = idr_alloc(&tmp->idr, NULL, tid,
> > @@ -235,10 +261,8 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> > * a partially initialized PID (see below).
> > */
> > nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
> > - pid_max, GFP_ATOMIC);
> > + pid_max[ns->level - i], GFP_ATOMIC);
> > }
> > - spin_unlock(&pidmap_lock);
> > - idr_preload_end();
> >
> > if (nr < 0) {
> > retval = (nr == -ENOSPC) ? -EAGAIN : nr;
>
> So. With or without this patch we have
>
> if (tid) {
> nr = idr_alloc(...);
>
> if (nr == -ENOSPC)
> nr = -EEXIST;
> } else {
> nr = idr_alloc_cyclic(...);
> }
>
> if (nr < 0) {
> retval = (nr == -ENOSPC) ? -EAGAIN : nr;
> goto out_free;
> }
>
> and somehow this looks annoying to me... Perhaps it makes sense to make this
> code more symmetric (and imo more readable) ?
>
I agree, but I also tried to not make non-perf changes to make it
easier to review.
There is tons of clean up which can be done here, maybe I'll add some later.
> if (tid) {
> nr = idr_alloc(...);
>
> if (nr == -ENOSPC)
> nr = -EEXIST;
> } else {
> nr = idr_alloc_cyclic(...);
>
> if (nr == -ENOSPC)
> nr = -EAGAIN;
> }
>
> if (nr < 0)
> retval = nr;
> goto out_free;
> }
>
> > - idr_preload(GFP_KERNEL);
> > - spin_lock(&pidmap_lock);
> > - if (!(ns->pid_allocated & PIDNS_ADDING))
> > - goto out_unlock;
> > + if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
> > + goto out_free;
> > pidfs_add_pid(pid);
> > - for ( ; upid >= pid->numbers; --upid) {
> > + for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
> > /* Make the PID visible to find_pid_ns. */
> > idr_replace(&upid->ns->idr, pid, upid->nr);
> > upid->ns->pid_allocated++;
>
> So.. unless I am totally confused the current code has another
> idr_preload + idr_preload_end around pidfs_add_pid().
>
> AFAICS, this makes no sense, and your patch removes it. But perhaps this
> deserves a note in the changelog or even a separate patch?
>
Oh heh, that was not intentional. After I was done massaging this I
just mindlessly removed the lock acquire + preload.
It does indeed look like it serves no purpose, I'm going to submit a
separate patch to remove it.
>
> And another stupid question... I don't understand fs/pidfs.c, but it looks
> a bit strange to me that pidfs_add_pid() is called before the
>
> for (...)
> idr_replace(...);
>
> loop. I don't see any problem, but to me it would look a bit better to do
> pidfs_add_pid(pid) when this pid is fully initialized...
>
That's a question to Christian.
© 2016 - 2025 Red Hat, Inc.