In order to remove the dependency on the page lock for balloon
pages, we need a lock that is independent of the page.
It's crucial that we can handle the scenario where balloon deflation
(clearing page->private) can race with page isolation (using
page->private to obtain the balloon_dev_info where the lock currently
resides).
The current lock in balloon_dev_info is therefore not suitable.
Fortunately, we never really have more than a single balloon device
per VM, so we can just keep it simple and use a static lock to protect
all balloon devices.
Based on this change we will remove the dependency on the page lock
next.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
include/linux/balloon_compaction.h | 6 ++---
mm/balloon_compaction.c | 36 +++++++++++++++++-------------
2 files changed, 23 insertions(+), 19 deletions(-)
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 3109d3c43d306..e2d9eb40e1fbb 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -21,10 +21,10 @@
* i. Setting the PG_movable_ops flag and page->private with the following
* lock order
* +-page_lock(page);
- * +--spin_lock_irq(&b_dev_info->pages_lock);
+ * +--spin_lock_irq(&balloon_pages_lock);
*
* ii. isolation or dequeueing procedure must remove the page from balloon
- * device page list under b_dev_info->pages_lock.
+ * device page list under &balloon_pages_lock
*
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
@@ -52,7 +52,6 @@
*/
struct balloon_dev_info {
unsigned long isolated_pages; /* # of isolated pages for migration */
- spinlock_t pages_lock; /* Protection to pages list */
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
@@ -71,7 +70,6 @@ extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{
balloon->isolated_pages = 0;
- spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
balloon->adjust_managed_page_count = false;
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index fd9ec47cf4670..97e838795354d 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -11,6 +11,12 @@
#include <linux/export.h>
#include <linux/balloon_compaction.h>
+/*
+ * Lock protecting the balloon_dev_info of all devices. We don't really
+ * expect more than one device.
+ */
+static DEFINE_SPINLOCK(balloon_pages_lock);
+
static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
struct page *page)
{
@@ -47,13 +53,13 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
unsigned long flags;
size_t n_pages = 0;
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
@@ -83,7 +89,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
unsigned long flags;
size_t n_pages = 0;
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
if (n_pages == n_req_pages)
break;
@@ -106,7 +112,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
dec_node_page_state(page, NR_BALLOON_PAGES);
n_pages++;
}
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
@@ -149,9 +155,9 @@ void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
{
unsigned long flags;
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
balloon_page_enqueue_one(b_dev_info, page);
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
EXPORT_SYMBOL_GPL(balloon_page_enqueue);
@@ -191,11 +197,11 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
* BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages.
*/
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
if (unlikely(list_empty(&b_dev_info->pages) &&
!b_dev_info->isolated_pages))
BUG();
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
return NULL;
}
return list_first_entry(&pages, struct page, lru);
@@ -213,10 +219,10 @@ static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
if (!b_dev_info)
return false;
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
list_del(&page->lru);
b_dev_info->isolated_pages++;
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
return true;
}
@@ -230,10 +236,10 @@ static void balloon_page_putback(struct page *page)
if (WARN_ON_ONCE(!b_dev_info))
return;
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
static int balloon_page_migrate(struct page *newpage, struct page *page,
@@ -253,7 +259,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
rc = b_dev_info->migratepage(b_dev_info, newpage, page, mode);
switch (rc) {
case 0:
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
/* Insert the new page into the balloon list. */
get_page(newpage);
@@ -272,7 +278,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
}
break;
case -ENOENT:
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ spin_lock_irqsave(&balloon_pages_lock, flags);
/* Old page was deflated but new page not inflated. */
__count_vm_event(BALLOON_DEFLATE);
@@ -285,7 +291,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
}
b_dev_info->isolated_pages--;
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ spin_unlock_irqrestore(&balloon_pages_lock, flags);
/* Free the now-deflated page we isolated in balloon_page_isolate(). */
balloon_page_finalize(page);
--
2.51.0
On Tue, Oct 21, 2025 at 02:59:12PM +0200, David Hildenbrand wrote:
> In order to remove the dependency on the page lock for balloon
> pages, we need a lock that is independent of the page.
>
> It's crucial that we can handle the scenario where balloon deflation
> (clearing page->private) can race with page isolation (using
> page->private to obtain the balloon_dev_info where the lock currently
> resides).
>
> The current lock in balloon_dev_info is therefore not suitable.
>
> Fortunately, we never really have more than a single balloon device
> per VM, so we can just keep it simple and use a static lock to protect
> all balloon devices.
>
> Based on this change we will remove the dependency on the page lock
> next.
>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
> include/linux/balloon_compaction.h | 6 ++---
> mm/balloon_compaction.c | 36 +++++++++++++++++-------------
> 2 files changed, 23 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
> index 3109d3c43d306..e2d9eb40e1fbb 100644
> --- a/include/linux/balloon_compaction.h
> +++ b/include/linux/balloon_compaction.h
> @@ -21,10 +21,10 @@
> * i. Setting the PG_movable_ops flag and page->private with the following
> * lock order
> * +-page_lock(page);
> - * +--spin_lock_irq(&b_dev_info->pages_lock);
> + * +--spin_lock_irq(&balloon_pages_lock);
> *
> * ii. isolation or dequeueing procedure must remove the page from balloon
> - * device page list under b_dev_info->pages_lock.
> + * device page list under &balloon_pages_lock
Using &balloon_pages_lock with an & is kinda weird here.
> *
> * The functions provided by this interface are placed to help on coping with
> * the aforementioned balloon page corner case, as well as to ensure the simple
> @@ -52,7 +52,6 @@
> */
> struct balloon_dev_info {
> unsigned long isolated_pages; /* # of isolated pages for migration */
> - spinlock_t pages_lock; /* Protection to pages list */
> struct list_head pages; /* Pages enqueued & handled to Host */
> int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
> struct page *page, enum migrate_mode mode);
> @@ -71,7 +70,6 @@ extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
> static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
> {
> balloon->isolated_pages = 0;
> - spin_lock_init(&balloon->pages_lock);
> INIT_LIST_HEAD(&balloon->pages);
> balloon->migratepage = NULL;
> balloon->adjust_managed_page_count = false;
> diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
> index fd9ec47cf4670..97e838795354d 100644
> --- a/mm/balloon_compaction.c
> +++ b/mm/balloon_compaction.c
> @@ -11,6 +11,12 @@
> #include <linux/export.h>
> #include <linux/balloon_compaction.h>
>
> +/*
> + * Lock protecting the balloon_dev_info of all devices. We don't really
> + * expect more than one device.
> + */
> +static DEFINE_SPINLOCK(balloon_pages_lock);
> +
> static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
> struct page *page)
> {
> @@ -47,13 +53,13 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
> unsigned long flags;
> size_t n_pages = 0;
>
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> list_for_each_entry_safe(page, tmp, pages, lru) {
> list_del(&page->lru);
> balloon_page_enqueue_one(b_dev_info, page);
> n_pages++;
> }
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
> return n_pages;
> }
> EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
> @@ -83,7 +89,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
> unsigned long flags;
> size_t n_pages = 0;
>
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
> if (n_pages == n_req_pages)
> break;
> @@ -106,7 +112,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
> dec_node_page_state(page, NR_BALLOON_PAGES);
> n_pages++;
> }
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
>
> return n_pages;
> }
> @@ -149,9 +155,9 @@ void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
> {
> unsigned long flags;
>
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> balloon_page_enqueue_one(b_dev_info, page);
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
> }
> EXPORT_SYMBOL_GPL(balloon_page_enqueue);
>
> @@ -191,11 +197,11 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
> * BUG() here, otherwise the balloon driver may get stuck in
> * an infinite loop while attempting to release all its pages.
> */
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> if (unlikely(list_empty(&b_dev_info->pages) &&
> !b_dev_info->isolated_pages))
> BUG();
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
> return NULL;
> }
> return list_first_entry(&pages, struct page, lru);
> @@ -213,10 +219,10 @@ static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
> if (!b_dev_info)
> return false;
>
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> list_del(&page->lru);
> b_dev_info->isolated_pages++;
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
>
> return true;
> }
> @@ -230,10 +236,10 @@ static void balloon_page_putback(struct page *page)
> if (WARN_ON_ONCE(!b_dev_info))
> return;
>
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
> list_add(&page->lru, &b_dev_info->pages);
> b_dev_info->isolated_pages--;
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
> }
>
> static int balloon_page_migrate(struct page *newpage, struct page *page,
> @@ -253,7 +259,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
> rc = b_dev_info->migratepage(b_dev_info, newpage, page, mode);
> switch (rc) {
> case 0:
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
>
> /* Insert the new page into the balloon list. */
> get_page(newpage);
> @@ -272,7 +278,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
> }
> break;
> case -ENOENT:
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + spin_lock_irqsave(&balloon_pages_lock, flags);
>
> /* Old page was deflated but new page not inflated. */
> __count_vm_event(BALLOON_DEFLATE);
> @@ -285,7 +291,7 @@ static int balloon_page_migrate(struct page *newpage, struct page *page,
> }
>
> b_dev_info->isolated_pages--;
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + spin_unlock_irqrestore(&balloon_pages_lock, flags);
>
> /* Free the now-deflated page we isolated in balloon_page_isolate(). */
> balloon_page_finalize(page);
> --
> 2.51.0
On 21.10.25 22:52, Michael S. Tsirkin wrote: > On Tue, Oct 21, 2025 at 02:59:12PM +0200, David Hildenbrand wrote: >> In order to remove the dependency on the page lock for balloon >> pages, we need a lock that is independent of the page. >> >> It's crucial that we can handle the scenario where balloon deflation >> (clearing page->private) can race with page isolation (using >> page->private to obtain the balloon_dev_info where the lock currently >> resides). >> >> The current lock in balloon_dev_info is therefore not suitable. >> >> Fortunately, we never really have more than a single balloon device >> per VM, so we can just keep it simple and use a static lock to protect >> all balloon devices. >> >> Based on this change we will remove the dependency on the page lock >> next. >> >> Signed-off-by: David Hildenbrand <david@redhat.com> >> --- >> include/linux/balloon_compaction.h | 6 ++--- >> mm/balloon_compaction.c | 36 +++++++++++++++++------------- >> 2 files changed, 23 insertions(+), 19 deletions(-) >> >> diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h >> index 3109d3c43d306..e2d9eb40e1fbb 100644 >> --- a/include/linux/balloon_compaction.h >> +++ b/include/linux/balloon_compaction.h >> @@ -21,10 +21,10 @@ >> * i. Setting the PG_movable_ops flag and page->private with the following >> * lock order >> * +-page_lock(page); >> - * +--spin_lock_irq(&b_dev_info->pages_lock); >> + * +--spin_lock_irq(&balloon_pages_lock); >> * >> * ii. isolation or dequeueing procedure must remove the page from balloon >> - * device page list under b_dev_info->pages_lock. >> + * device page list under &balloon_pages_lock > > Using &balloon_pages_lock with an & is kinda weird here. Indeed, fixed, thanks! -- Cheers David / dhildenb
© 2016 - 2026 Red Hat, Inc.