We want to protect vmcore modifications from concurrent opening of
the vmcore, and also serialize vmcore modiciations. Let's convert the
spinlock into a mutex, because some of the operations we'll be
protecting might sleep (e.g., memory allocations) and might take a bit
longer.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
fs/proc/vmcore.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index b52d85f8ad59..110ce193d20f 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -62,7 +62,8 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0);
/* Device Dump Size */
static size_t vmcoredd_orig_sz;
-static DEFINE_SPINLOCK(vmcore_cb_lock);
+static DEFINE_MUTEX(vmcore_mutex);
+
DEFINE_STATIC_SRCU(vmcore_cb_srcu);
/* List of registered vmcore callbacks. */
static LIST_HEAD(vmcore_cb_list);
@@ -72,7 +73,7 @@ static bool vmcore_opened;
void register_vmcore_cb(struct vmcore_cb *cb)
{
INIT_LIST_HEAD(&cb->next);
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
list_add_tail(&cb->next, &vmcore_cb_list);
/*
* Registering a vmcore callback after the vmcore was opened is
@@ -80,13 +81,13 @@ void register_vmcore_cb(struct vmcore_cb *cb)
*/
if (vmcore_opened)
pr_warn_once("Unexpected vmcore callback registration\n");
- spin_unlock(&vmcore_cb_lock);
+ mutex_unlock(&vmcore_mutex);
}
EXPORT_SYMBOL_GPL(register_vmcore_cb);
void unregister_vmcore_cb(struct vmcore_cb *cb)
{
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
list_del_rcu(&cb->next);
/*
* Unregistering a vmcore callback after the vmcore was opened is
@@ -95,7 +96,7 @@ void unregister_vmcore_cb(struct vmcore_cb *cb)
*/
if (vmcore_opened)
pr_warn_once("Unexpected vmcore callback unregistration\n");
- spin_unlock(&vmcore_cb_lock);
+ mutex_unlock(&vmcore_mutex);
synchronize_srcu(&vmcore_cb_srcu);
}
@@ -120,9 +121,9 @@ static bool pfn_is_ram(unsigned long pfn)
static int open_vmcore(struct inode *inode, struct file *file)
{
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
vmcore_opened = true;
- spin_unlock(&vmcore_cb_lock);
+ mutex_unlock(&vmcore_mutex);
return 0;
}
--
2.46.1
On 10/25/24 at 05:11pm, David Hildenbrand wrote: > We want to protect vmcore modifications from concurrent opening of > the vmcore, and also serialize vmcore modiciations. Let's convert the > spinlock into a mutex, because some of the operations we'll be > protecting might sleep (e.g., memory allocations) and might take a bit > longer. Could you elaborate this a little further. E.g the concurrent opening of vmcore is spot before this patchset or have been seen, and in which place the memory allocation is spot. Asking this becasue I'd like to learn and make clear if this is a existing issue and need be back ported into our old RHEL distros. Thanks in advance. > > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > fs/proc/vmcore.c | 15 ++++++++------- > 1 file changed, 8 insertions(+), 7 deletions(-) > > diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c > index b52d85f8ad59..110ce193d20f 100644 > --- a/fs/proc/vmcore.c > +++ b/fs/proc/vmcore.c > @@ -62,7 +62,8 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0); > /* Device Dump Size */ > static size_t vmcoredd_orig_sz; > > -static DEFINE_SPINLOCK(vmcore_cb_lock); > +static DEFINE_MUTEX(vmcore_mutex); > + > DEFINE_STATIC_SRCU(vmcore_cb_srcu); > /* List of registered vmcore callbacks. */ > static LIST_HEAD(vmcore_cb_list); > @@ -72,7 +73,7 @@ static bool vmcore_opened; > void register_vmcore_cb(struct vmcore_cb *cb) > { > INIT_LIST_HEAD(&cb->next); > - spin_lock(&vmcore_cb_lock); > + mutex_lock(&vmcore_mutex); > list_add_tail(&cb->next, &vmcore_cb_list); > /* > * Registering a vmcore callback after the vmcore was opened is > @@ -80,13 +81,13 @@ void register_vmcore_cb(struct vmcore_cb *cb) > */ > if (vmcore_opened) > pr_warn_once("Unexpected vmcore callback registration\n"); > - spin_unlock(&vmcore_cb_lock); > + mutex_unlock(&vmcore_mutex); > } > EXPORT_SYMBOL_GPL(register_vmcore_cb); > > void unregister_vmcore_cb(struct vmcore_cb *cb) > { > - spin_lock(&vmcore_cb_lock); > + mutex_lock(&vmcore_mutex); > list_del_rcu(&cb->next); > /* > * Unregistering a vmcore callback after the vmcore was opened is > @@ -95,7 +96,7 @@ void unregister_vmcore_cb(struct vmcore_cb *cb) > */ > if (vmcore_opened) > pr_warn_once("Unexpected vmcore callback unregistration\n"); > - spin_unlock(&vmcore_cb_lock); > + mutex_unlock(&vmcore_mutex); > > synchronize_srcu(&vmcore_cb_srcu); > } > @@ -120,9 +121,9 @@ static bool pfn_is_ram(unsigned long pfn) > > static int open_vmcore(struct inode *inode, struct file *file) > { > - spin_lock(&vmcore_cb_lock); > + mutex_lock(&vmcore_mutex); > vmcore_opened = true; > - spin_unlock(&vmcore_cb_lock); > + mutex_unlock(&vmcore_mutex); > > return 0; > } > -- > 2.46.1 >
On 15.11.24 10:30, Baoquan He wrote: > On 10/25/24 at 05:11pm, David Hildenbrand wrote: >> We want to protect vmcore modifications from concurrent opening of >> the vmcore, and also serialize vmcore modiciations. Let's convert the > > >> spinlock into a mutex, because some of the operations we'll be >> protecting might sleep (e.g., memory allocations) and might take a bit >> longer. > > Could you elaborate this a little further. E.g the concurrent opening of > vmcore is spot before this patchset or have been seen, and in which place > the memory allocation is spot. Asking this becasue I'd like to learn and > make clear if this is a existing issue and need be back ported into our > old RHEL distros. Thanks in advance. It's a preparation for the other patches, that do what is described here: a) We can currently modify the vmcore after it was opened. This can happen if the vmcoredd is added after the vmcore was loaded. Similar things will happen with the PROC_VMCORE_DEVICE_RAM extension. b) To handle it cleanly we need to protect the modifications against concurrent opening. And the modifcations end up allocating memory and cannot easily take the spinlock. So far a spinlock was sufficient, now a mutex is required. Maybe we'd want to backport 1,2,3, but not sure if we consider this critical enough. -- Cheers, David / dhildenb
On 11/15/24 at 11:03am, David Hildenbrand wrote: > On 15.11.24 10:30, Baoquan He wrote: > > On 10/25/24 at 05:11pm, David Hildenbrand wrote: > > > We want to protect vmcore modifications from concurrent opening of > > > the vmcore, and also serialize vmcore modiciations. Let's convert the > > > > > > > spinlock into a mutex, because some of the operations we'll be > > > protecting might sleep (e.g., memory allocations) and might take a bit > > > longer. > > > > Could you elaborate this a little further. E.g the concurrent opening of > > vmcore is spot before this patchset or have been seen, and in which place > > the memory allocation is spot. Asking this becasue I'd like to learn and > > make clear if this is a existing issue and need be back ported into our > > old RHEL distros. Thanks in advance. > > It's a preparation for the other patches, that do what is described here: > > a) We can currently modify the vmcore after it was opened. This can happen > if the vmcoredd is added after the vmcore was loaded. Similar things will > happen with the PROC_VMCORE_DEVICE_RAM extension. > > b) To handle it cleanly we need to protect the modifications against > concurrent opening. And the modifcations end up allocating memory and cannot > easily take the spinlock. > > So far a spinlock was sufficient, now a mutex is required. I see, as we talked in patch 2 sub-thread, these information are very valuable to help people get the background information when they read code. Let's put it in patch log. Thanks. > > Maybe we'd want to backport 1,2,3, but not sure if we consider this critical > enough. > > -- > Cheers, > > David / dhildenb >
On 20.11.24 09:16, Baoquan He wrote: > On 11/15/24 at 11:03am, David Hildenbrand wrote: >> On 15.11.24 10:30, Baoquan He wrote: >>> On 10/25/24 at 05:11pm, David Hildenbrand wrote: >>>> We want to protect vmcore modifications from concurrent opening of >>>> the vmcore, and also serialize vmcore modiciations. Let's convert the >>> >>> >>>> spinlock into a mutex, because some of the operations we'll be >>>> protecting might sleep (e.g., memory allocations) and might take a bit >>>> longer. >>> >>> Could you elaborate this a little further. E.g the concurrent opening of >>> vmcore is spot before this patchset or have been seen, and in which place >>> the memory allocation is spot. Asking this becasue I'd like to learn and >>> make clear if this is a existing issue and need be back ported into our >>> old RHEL distros. Thanks in advance. >> >> It's a preparation for the other patches, that do what is described here: >> >> a) We can currently modify the vmcore after it was opened. This can happen >> if the vmcoredd is added after the vmcore was loaded. Similar things will >> happen with the PROC_VMCORE_DEVICE_RAM extension. >> >> b) To handle it cleanly we need to protect the modifications against >> concurrent opening. And the modifcations end up allocating memory and cannot >> easily take the spinlock. >> >> So far a spinlock was sufficient, now a mutex is required. > > I see, as we talked in patch 2 sub-thread, these information are very > valuable to help people get the background information when they read > code. Let's put it in patch log. Thanks. I'll extend the description if that helps, thanks! -- Cheers, David / dhildenb
© 2016 - 2024 Red Hat, Inc.