Add bytes parameter to the function, to limit searched range.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
include/block/dirty-bitmap.h | 3 ++-
include/qemu/hbitmap.h | 7 +++++--
block/backup.c | 2 +-
block/dirty-bitmap.c | 5 +++--
nbd/server.c | 2 +-
util/hbitmap.c | 13 ++++++++++---
6 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index 259bd27c40..5dc146abf3 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -98,7 +98,8 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap);
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
-int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start);
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start,
+ uint64_t bytes);
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap,
Error **errp);
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index ddca52c48e..259bfc2936 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -295,10 +295,13 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
/* hbitmap_next_zero:
* @hb: The HBitmap to operate on
* @start: The bit to start from.
+ * @bytes: Range length to search in. If @bytes is zero, search up to the bitmap
+ * end.
*
- * Find next not dirty bit.
+ * Find next not dirty bit within range [@start, @start + @bytes), or from
+ * @start to the bitmap end if @bytes is zero.
*/
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start);
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t bytes);
/* hbitmap_create_meta:
* Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
diff --git a/block/backup.c b/block/backup.c
index 8630d32926..3c8b054587 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -458,7 +458,7 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
break;
}
- offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset);
+ offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset, 0);
if (offset == -1) {
hbitmap_set(job->copy_bitmap, cluster, end - cluster);
break;
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index c9b8a6fd52..a9ee814da7 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -785,9 +785,10 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp)
return hbitmap_sha256(bitmap->bitmap, errp);
}
-int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset)
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
+ uint64_t bytes)
{
- return hbitmap_next_zero(bitmap->bitmap, offset);
+ return hbitmap_next_zero(bitmap->bitmap, offset, bytes);
}
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
diff --git a/nbd/server.c b/nbd/server.c
index ea5fe0eb33..92a5651ba9 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1952,7 +1952,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
assert(begin < overall_end && nb_extents);
while (begin < overall_end && i < nb_extents) {
if (dirty) {
- end = bdrv_dirty_bitmap_next_zero(bitmap, begin);
+ end = bdrv_dirty_bitmap_next_zero(bitmap, begin, 0);
} else {
bdrv_set_dirty_iter(it, begin);
end = bdrv_dirty_iter_next(it);
diff --git a/util/hbitmap.c b/util/hbitmap.c
index bcd304041a..73137c10a0 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -192,16 +192,23 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
}
}
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t bytes)
{
size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
- uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
+ uint64_t end_bit =
+ bytes ? ((start + bytes - 1) >> hb->granularity) + 1 : hb->size;
+ uint64_t sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
unsigned long cur = last_lev[pos];
unsigned start_bit_offset =
(start >> hb->granularity) & (BITS_PER_LONG - 1);
int64_t res;
+ assert(!bytes || start + bytes <= (hb->size << hb->granularity));
+
+ /* There may be some zero bits in @cur before @start. We are not interested
+ * in them, let's set them.
+ */
cur |= (1UL << start_bit_offset) - 1;
assert((start >> hb->granularity) < hb->size);
@@ -218,7 +225,7 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
}
res = (pos << BITS_PER_LEVEL) + ctol(cur);
- if (res >= hb->size) {
+ if (res >= end_bit) {
return -1;
}
--
2.11.1
On 08/03/2018 12:46 PM, Vladimir Sementsov-Ogievskiy wrote:
> Add bytes parameter to the function, to limit searched range.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
> include/block/dirty-bitmap.h | 3 ++-
> include/qemu/hbitmap.h | 7 +++++--
> block/backup.c | 2 +-
> block/dirty-bitmap.c | 5 +++--
> nbd/server.c | 2 +-
> util/hbitmap.c | 13 ++++++++++---
> 6 files changed, 22 insertions(+), 10 deletions(-)
>
> +++ b/include/qemu/hbitmap.h
> @@ -295,10 +295,13 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
> /* hbitmap_next_zero:
> * @hb: The HBitmap to operate on
> * @start: The bit to start from.
> + * @bytes: Range length to search in. If @bytes is zero, search up to the bitmap
> + * end.
> *
> - * Find next not dirty bit.
> + * Find next not dirty bit within range [@start, @start + @bytes), or from
> + * @start to the bitmap end if @bytes is zero.
Can @bytes (or rather, @start + @bytes) exceed the remaining bitmap
length (in which case it is silently truncated to the remaining length)?
> +++ b/util/hbitmap.c
> @@ -192,16 +192,23 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
> }
> }
>
> -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
> +int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t bytes)
> {
> size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
> unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
> - uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
> + uint64_t end_bit =
> + bytes ? ((start + bytes - 1) >> hb->granularity) + 1 : hb->size;
This computation can overflow if bytes is too large...
> + uint64_t sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
> unsigned long cur = last_lev[pos];
> unsigned start_bit_offset =
> (start >> hb->granularity) & (BITS_PER_LONG - 1);
> int64_t res;
>
> + assert(!bytes || start + bytes <= (hb->size << hb->granularity));
and only now are you asserting that bytes was in range. You should at
least document that bytes must be in range, and while I don't see any
memory dereferences dependent on a potentially bogus end_bit value, it
may also be worth hoisting the assert sooner in the function.
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3266
Virtualization: qemu.org | libvirt.org
© 2016 - 2025 Red Hat, Inc.