Since we already have bitmap_mutex to protect either the dirty bitmap or
the clear log bitmap, we don't need atomic operations to set/clear/test on
the clear log bitmap. Switching all ops from atomic to non-atomic
versions, meanwhile touch up the comments to show which lock is in charge.
Introduced non-atomic version of bitmap_test_and_clear_atomic(), mostly the
same as the atomic version but simplified a few places, e.g. dropped the
"old_bits" variable, and also the explicit memory barriers.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
include/exec/ram_addr.h | 11 +++++-----
include/qemu/bitmap.h | 1 +
util/bitmap.c | 45 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 52 insertions(+), 5 deletions(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index f3e0c78161..5092a2e0ff 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -42,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
}
/**
- * clear_bmap_set: set clear bitmap for the page range
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @start: the start page number
@@ -55,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
{
uint8_t shift = rb->clear_bmap_shift;
- bitmap_set_atomic(rb->clear_bmap, start >> shift,
- clear_bmap_size(npages, shift));
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
}
/**
- * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @page: the page number to check
@@ -71,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
{
uint8_t shift = rb->clear_bmap_shift;
- return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 82a1d2f41f..3ccb00865f 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
+bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map,
diff --git a/util/bitmap.c b/util/bitmap.c
index f81d8057a7..8d12e90a5a 100644
--- a/util/bitmap.c
+++ b/util/bitmap.c
@@ -240,6 +240,51 @@ void bitmap_clear(unsigned long *map, long start, long nr)
}
}
+bool bitmap_test_and_clear(unsigned long *map, long start, long nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const long size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+ bool dirty = false;
+
+ assert(start >= 0 && nr >= 0);
+
+ /* First word */
+ if (nr - bits_to_clear > 0) {
+ if ((*p) & mask_to_clear) {
+ dirty = true;
+ }
+ *p &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ p++;
+ }
+
+ /* Full words */
+ if (bits_to_clear == BITS_PER_LONG) {
+ while (nr >= BITS_PER_LONG) {
+ if (*p) {
+ dirty = true;
+ *p = 0;
+ }
+ nr -= BITS_PER_LONG;
+ p++;
+ }
+ }
+
+ /* Last word */
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ if ((*p) & mask_to_clear) {
+ dirty = true;
+ }
+ *p &= ~mask_to_clear;
+ }
+
+ return dirty;
+}
+
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
{
unsigned long *p = map + BIT_WORD(start);
--
2.32.0
* Peter Xu (peterx@redhat.com) wrote:
> Since we already have bitmap_mutex to protect either the dirty bitmap or
> the clear log bitmap, we don't need atomic operations to set/clear/test on
> the clear log bitmap. Switching all ops from atomic to non-atomic
> versions, meanwhile touch up the comments to show which lock is in charge.
>
> Introduced non-atomic version of bitmap_test_and_clear_atomic(), mostly the
> same as the atomic version but simplified a few places, e.g. dropped the
> "old_bits" variable, and also the explicit memory barriers.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Can you update the comment in ramblock.h above clear_bmap to say it's
always updated under that lock.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> include/exec/ram_addr.h | 11 +++++-----
> include/qemu/bitmap.h | 1 +
> util/bitmap.c | 45 +++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 52 insertions(+), 5 deletions(-)
>
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index f3e0c78161..5092a2e0ff 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -42,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
> }
>
> /**
> - * clear_bmap_set: set clear bitmap for the page range
> + * clear_bmap_set: set clear bitmap for the page range. Must be with
> + * bitmap_mutex held.
> *
> * @rb: the ramblock to operate on
> * @start: the start page number
> @@ -55,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
> {
> uint8_t shift = rb->clear_bmap_shift;
>
> - bitmap_set_atomic(rb->clear_bmap, start >> shift,
> - clear_bmap_size(npages, shift));
> + bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
> }
>
> /**
> - * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
> + * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
> + * Must be with bitmap_mutex held.
> *
> * @rb: the ramblock to operate on
> * @page: the page number to check
> @@ -71,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
> {
> uint8_t shift = rb->clear_bmap_shift;
>
> - return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
> + return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
> }
>
> static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
> diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
> index 82a1d2f41f..3ccb00865f 100644
> --- a/include/qemu/bitmap.h
> +++ b/include/qemu/bitmap.h
> @@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
> void bitmap_set_atomic(unsigned long *map, long i, long len);
> void bitmap_clear(unsigned long *map, long start, long nr);
> bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
> +bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
> void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
> long nr);
> unsigned long bitmap_find_next_zero_area(unsigned long *map,
> diff --git a/util/bitmap.c b/util/bitmap.c
> index f81d8057a7..8d12e90a5a 100644
> --- a/util/bitmap.c
> +++ b/util/bitmap.c
> @@ -240,6 +240,51 @@ void bitmap_clear(unsigned long *map, long start, long nr)
> }
> }
>
> +bool bitmap_test_and_clear(unsigned long *map, long start, long nr)
> +{
> + unsigned long *p = map + BIT_WORD(start);
> + const long size = start + nr;
> + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
> + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
> + bool dirty = false;
> +
> + assert(start >= 0 && nr >= 0);
> +
> + /* First word */
> + if (nr - bits_to_clear > 0) {
> + if ((*p) & mask_to_clear) {
> + dirty = true;
> + }
> + *p &= ~mask_to_clear;
> + nr -= bits_to_clear;
> + bits_to_clear = BITS_PER_LONG;
> + p++;
> + }
> +
> + /* Full words */
> + if (bits_to_clear == BITS_PER_LONG) {
> + while (nr >= BITS_PER_LONG) {
> + if (*p) {
> + dirty = true;
> + *p = 0;
> + }
> + nr -= BITS_PER_LONG;
> + p++;
> + }
> + }
> +
> + /* Last word */
> + if (nr) {
> + mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
> + if ((*p) & mask_to_clear) {
> + dirty = true;
> + }
> + *p &= ~mask_to_clear;
> + }
> +
> + return dirty;
> +}
> +
> bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
> {
> unsigned long *p = map + BIT_WORD(start);
> --
> 2.32.0
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Thu, Sep 15, 2022 at 07:49:57PM +0100, Dr. David Alan Gilbert wrote:
> * Peter Xu (peterx@redhat.com) wrote:
> > Since we already have bitmap_mutex to protect either the dirty bitmap or
> > the clear log bitmap, we don't need atomic operations to set/clear/test on
> > the clear log bitmap. Switching all ops from atomic to non-atomic
> > versions, meanwhile touch up the comments to show which lock is in charge.
> >
> > Introduced non-atomic version of bitmap_test_and_clear_atomic(), mostly the
> > same as the atomic version but simplified a few places, e.g. dropped the
> > "old_bits" variable, and also the explicit memory barriers.
> >
> > Signed-off-by: Peter Xu <peterx@redhat.com>
>
> Can you update the comment in ramblock.h above clear_bmap to say it's
> always updated under that lock.
I'll squash below into the same patch:
---8<---
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
index 6cbedf9e0c..adc03df59c 100644
--- a/include/exec/ramblock.h
+++ b/include/exec/ramblock.h
@@ -53,6 +53,9 @@ struct RAMBlock {
* and split clearing of dirty bitmap on the remote node (e.g.,
* KVM). The bitmap will be set only when doing global sync.
*
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
* NOTE: this bitmap is different comparing to the other bitmaps
* in that one bit can represent multiple guest pages (which is
* decided by the `clear_bmap_shift' variable below). On
---8<---
>
> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Thanks,
--
Peter Xu
© 2016 - 2026 Red Hat, Inc.