Instead of waiting for an RCU grace period between each ipc_namespace
structure that is being freed, wait an RCU grace period for every batch
of ipc_namespace structures.
Thanks to Al Viro for the suggestion of the helper function.
This speeds up the run time of the test case that allocates ipc_namespaces
in a loop from 6 minutes, to a little over 1 second:
real 0m1.192s
user 0m0.038s
sys 0m1.152s
Signed-off-by: Rik van Riel <riel@surriel.com>
Reported-by: Chris Mason <clm@meta.com>
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
---
fs/namespace.c | 10 ++++++++++
include/linux/mount.h | 1 +
ipc/namespace.c | 13 ++++++++++---
3 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/fs/namespace.c b/fs/namespace.c
index ab467ee58341..296432ba3716 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1397,6 +1397,16 @@ struct vfsmount *mntget(struct vfsmount *mnt)
}
EXPORT_SYMBOL(mntget);
+/*
+ * Make a mount point inaccessible to new lookups.
+ * Because there may still be current users, the caller MUST WAIT
+ * for an RCU grace period before destroying the mount point.
+ */
+void mnt_make_shortterm(struct vfsmount *mnt)
+{
+ real_mount(mnt)->mnt_ns = NULL;
+}
+
/**
* path_is_mountpoint() - Check if path is a mount in the current namespace.
* @path: path to check
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 62475996fac6..ec55a031aa8c 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -88,6 +88,7 @@ extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index a26860a41dac..6ecc30effd3e 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -145,10 +145,11 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
static void free_ipc_ns(struct ipc_namespace *ns)
{
- /* mq_put_mnt() waits for a grace period as kern_unmount()
- * uses synchronize_rcu().
+ /*
+ * Caller needs to wait for an RCU grace period to have passed
+ * after making the mount point inaccessible to new accesses.
*/
- mq_put_mnt(ns);
+ mntput(ns->mq_mnt);
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
@@ -168,6 +169,12 @@ static void free_ipc(struct work_struct *unused)
struct llist_node *node = llist_del_all(&free_ipc_list);
struct ipc_namespace *n, *t;
+ llist_for_each_entry_safe(n, t, node, mnt_llist)
+ mnt_make_shortterm(n->mq_mnt);
+
+ /* Wait for any last users to have gone away. */
+ synchronize_rcu();
+
llist_for_each_entry_safe(n, t, node, mnt_llist)
free_ipc_ns(n);
}
--
2.38.1
On Thu, Jan 26, 2023 at 03:57:21PM -0500, Rik van Riel wrote: > Instead of waiting for an RCU grace period between each ipc_namespace > structure that is being freed, wait an RCU grace period for every batch > of ipc_namespace structures. > > Thanks to Al Viro for the suggestion of the helper function. > > This speeds up the run time of the test case that allocates ipc_namespaces > in a loop from 6 minutes, to a little over 1 second: > > real 0m1.192s > user 0m0.038s > sys 0m1.152s > > Signed-off-by: Rik van Riel <riel@surriel.com> > Reported-by: Chris Mason <clm@meta.com> > Suggested-by: Al Viro <viro@zeniv.linux.org.uk> OK, except that I'd rather a) made it if (mnt) real_mount(mnt)->mnt_ns = NULL; so that it would treat NULL as no-op and b) made kern_unmount() and kern_unmount_array() use it: void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR(mnt)) { mnt_make_shorterm(mnt); synchronize_rcu(); /* yecchhh... */ mntput(mnt); } } void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) mnt_make_shorterm(mnt[i]); synchronize_rcu_expedited(); for (i = 0; i < num; i++) mntput(mnt[i]); }
© 2016 - 2025 Red Hat, Inc.