From nobody Mon Feb 9 23:00:28 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail(p=none dis=none) header.from=virtuozzo.com ARC-Seal: i=1; a=rsa-sha256; t=1555340084; cv=none; d=zoho.com; s=zohoarc; b=nC9Dh04NAZywzTLg0igPu80vb7TU8xNIbrM4BiqfrI8stByfrAdjI8/KlS9z34zw8O3hMcCyk9X9UOJqTn9ck599e+Qs4aFA0yriXC7w9n3Jt/6MdiVVGb5abCgC+6ucv2AN3vthp0BEdyHrWSp7696Pp8W7AEFbds4Re0b65PU= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com; s=zohoarc; t=1555340084; h=Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results; bh=4PeVipFSNvymMNQ22JKy6BcoeQ66p8Ut4/KR4JTovl8=; b=ocBySC/ShGrpuShqkOo0qK+bhuEIzEhBdtUkr8ow42N5tAWZ/T9/3y10xirYm+jIb6rkeUHkq+4O+4WWUDngi1vDYMMYlnYYEDDTYvLlWPy6UkTsJcoFRtk0Xp0IEg7OHBghVYWfMPSs6E+VdeCqjLmLtZvVb+K6wXcNsZNn67w= ARC-Authentication-Results: i=1; mx.zoho.com; spf=pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail header.from= (p=none dis=none) header.from= Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1555340084019735.6339569004934; Mon, 15 Apr 2019 07:54:44 -0700 (PDT) Received: from localhost ([127.0.0.1]:51307 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1hG30G-0000Vv-0q for importer@patchew.org; Mon, 15 Apr 2019 10:54:36 -0400 Received: from eggs.gnu.org ([209.51.188.92]:58355) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1hG2vo-0005Ug-VJ for qemu-devel@nongnu.org; Mon, 15 Apr 2019 10:50:02 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1hG2vk-0005S1-13 for qemu-devel@nongnu.org; Mon, 15 Apr 2019 10:50:00 -0400 Received: from relay.sw.ru ([185.231.240.75]:48870) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1hG2vh-0005QI-0Z; Mon, 15 Apr 2019 10:49:54 -0400 Received: from [10.28.8.145] (helo=kvm.sw.ru) by relay.sw.ru with esmtp (Exim 4.91) (envelope-from ) id 1hG2ve-0002By-A1; Mon, 15 Apr 2019 17:49:50 +0300 From: Vladimir Sementsov-Ogievskiy To: qemu-devel@nongnu.org, qemu-block@nongnu.org Date: Mon, 15 Apr 2019 17:49:45 +0300 Message-Id: <20190415144948.78568-3-vsementsov@virtuozzo.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20190415144948.78568-1-vsementsov@virtuozzo.com> References: <20190415144948.78568-1-vsementsov@virtuozzo.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 185.231.240.75 Subject: [Qemu-devel] [PATCH v6 2/5] block/backup: move to copy_bitmap with granularity X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: kwolf@redhat.com, den@openvz.org, vsementsov@virtuozzo.com, jsnow@redhat.com, mreitz@redhat.com Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" We are going to share this bitmap between backup and backup-top filter driver, so let's share something more meaningful. It also simplifies some calculations. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz --- block/backup.c | 48 +++++++++++++++++++++++------------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/block/backup.c b/block/backup.c index d9f5db18ac..510fc54f98 100644 --- a/block/backup.c +++ b/block/backup.c @@ -113,7 +113,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(B= ackupBlockJob *job, int read_flags =3D is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; int write_flags =3D job->serialize_target_writes ? BDRV_REQ_SERIALISIN= G : 0; =20 - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); + assert(QEMU_IS_ALIGNED(start, job->cluster_size)); + hbitmap_reset(job->copy_bitmap, start, job->cluster_size); nbytes =3D MIN(job->cluster_size, job->len - start); if (!*bounce_buffer) { *bounce_buffer =3D blk_blockalign(blk, job->cluster_size); @@ -147,7 +148,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(B= ackupBlockJob *job, =20 return nbytes; fail: - hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); + hbitmap_set(job->copy_bitmap, start, job->cluster_size); return ret; =20 } @@ -167,16 +168,15 @@ static int coroutine_fn backup_cow_with_offload(Backu= pBlockJob *job, int write_flags =3D job->serialize_target_writes ? BDRV_REQ_SERIALISIN= G : 0; =20 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); + assert(QEMU_IS_ALIGNED(start, job->cluster_size)); nbytes =3D MIN(job->copy_range_size, end - start); nr_clusters =3D DIV_ROUND_UP(nbytes, job->cluster_size); - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters= ); ret =3D blk_co_copy_range(blk, start, job->target, start, nbytes, read_flags, write_flags); if (ret < 0) { trace_backup_do_cow_copy_range_fail(job, start, ret); - hbitmap_set(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_cluste= rs); return ret; } =20 @@ -204,7 +204,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *j= ob, cow_request_begin(&cow_request, job, start, end); =20 while (start < end) { - if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { + if (!hbitmap_get(job->copy_bitmap, start)) { trace_backup_do_cow_skip(job, start); start +=3D job->cluster_size; continue; /* already copied */ @@ -300,6 +300,11 @@ static void backup_clean(Job *job) assert(s->target); blk_unref(s->target); s->target =3D NULL; + + if (s->copy_bitmap) { + hbitmap_free(s->copy_bitmap); + s->copy_bitmap =3D NULL; + } } =20 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_con= text) @@ -312,7 +317,6 @@ static void backup_attached_aio_context(BlockJob *job, = AioContext *aio_context) void backup_do_checkpoint(BlockJob *job, Error **errp) { BackupBlockJob *backup_job =3D container_of(job, BackupBlockJob, commo= n); - int64_t len; =20 assert(block_job_driver(job) =3D=3D &backup_job_driver); =20 @@ -322,8 +326,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) return; } =20 - len =3D DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); - hbitmap_set(backup_job->copy_bitmap, 0, len); + hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len); } =20 static void backup_drain(BlockJob *job) @@ -378,16 +381,16 @@ static int coroutine_fn backup_run_incremental(Backup= BlockJob *job) { int ret; bool error_is_read; - int64_t cluster; + int64_t offset; HBitmapIter hbi; =20 hbitmap_iter_init(&hbi, job->copy_bitmap, 0); - while ((cluster =3D hbitmap_iter_next(&hbi)) !=3D -1) { + while ((offset =3D hbitmap_iter_next(&hbi)) !=3D -1) { do { if (yield_and_check(job)) { return 0; } - ret =3D backup_do_cow(job, cluster * job->cluster_size, + ret =3D backup_do_cow(job, offset, job->cluster_size, &error_is_read, false); if (ret < 0 && backup_error_action(job, error_is_read, -ret) = =3D=3D BLOCK_ERROR_ACTION_REPORT) @@ -409,12 +412,9 @@ static void backup_incremental_init_copy_bitmap(Backup= BlockJob *job) while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap, &offset, &bytes)) { - uint64_t cluster =3D offset / job->cluster_size; - uint64_t end_cluster =3D DIV_ROUND_UP(offset + bytes, job->cluster= _size); + hbitmap_set(job->copy_bitmap, offset, bytes); =20 - hbitmap_set(job->copy_bitmap, cluster, end_cluster - cluster); - - offset =3D end_cluster * job->cluster_size; + offset +=3D bytes; if (offset >=3D job->len) { break; } @@ -423,30 +423,27 @@ static void backup_incremental_init_copy_bitmap(Backu= pBlockJob *job) =20 /* TODO job_progress_set_remaining() would make more sense */ job_progress_update(&job->common.job, - job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); + job->len - hbitmap_count(job->copy_bitmap)); } =20 static int coroutine_fn backup_run(Job *job, Error **errp) { BackupBlockJob *s =3D container_of(job, BackupBlockJob, common.job); BlockDriverState *bs =3D blk_bs(s->common.blk); - int64_t offset, nb_clusters; + int64_t offset; int ret =3D 0; =20 QLIST_INIT(&s->inflight_reqs); qemu_co_rwlock_init(&s->flush_rwlock); =20 - nb_clusters =3D DIV_ROUND_UP(s->len, s->cluster_size); job_progress_set_remaining(job, s->len); =20 - s->copy_bitmap =3D hbitmap_alloc(nb_clusters, 0); if (s->sync_mode =3D=3D MIRROR_SYNC_MODE_INCREMENTAL) { backup_incremental_init_copy_bitmap(s); } else { - hbitmap_set(s->copy_bitmap, 0, nb_clusters); + hbitmap_set(s->copy_bitmap, 0, s->len); } =20 - s->before_write.notify =3D backup_before_write_notify; bdrv_add_before_write_notifier(bs, &s->before_write); =20 @@ -527,7 +524,6 @@ static int coroutine_fn backup_run(Job *job, Error **er= rp) /* wait until pending backup_do_cow() calls have completed */ qemu_co_rwlock_wrlock(&s->flush_rwlock); qemu_co_rwlock_unlock(&s->flush_rwlock); - hbitmap_free(s->copy_bitmap); =20 return ret; } @@ -678,6 +674,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDr= iverState *bs, } else { job->cluster_size =3D MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster= _size); } + + job->copy_bitmap =3D hbitmap_alloc(len, ctz32(job->cluster_size)); job->use_copy_range =3D true; job->copy_range_size =3D MIN_NON_ZERO(blk_get_max_transfer(job->common= .blk), blk_get_max_transfer(job->target)); --=20 2.18.0