From nobody Thu Nov 6 01:32:26 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail(p=none dis=none) header.from=virtuozzo.com Return-Path: Received: from lists.gnu.org (208.118.235.17 [208.118.235.17]) by mx.zohomail.com with SMTPS id 1538389947893788.064561126088; Mon, 1 Oct 2018 03:32:27 -0700 (PDT) Received: from localhost ([::1]:37167 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1g6vUx-0006F2-2K for importer@patchew.org; Mon, 01 Oct 2018 06:32:19 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:40384) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1g6vSN-0004qe-MQ for qemu-devel@nongnu.org; Mon, 01 Oct 2018 06:29:41 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1g6vSK-0002Kl-BC for qemu-devel@nongnu.org; Mon, 01 Oct 2018 06:29:39 -0400 Received: from relay.sw.ru ([185.231.240.75]:39674) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1g6vSJ-0002JV-VP; Mon, 01 Oct 2018 06:29:36 -0400 Received: from [10.28.8.145] (helo=kvm.sw.ru) by relay.sw.ru with esmtp (Exim 4.90_1) (envelope-from ) id 1g6vSE-0005Yj-Or; Mon, 01 Oct 2018 13:29:30 +0300 From: Vladimir Sementsov-Ogievskiy To: qemu-devel@nongnu.org, qemu-block@nongnu.org Date: Mon, 1 Oct 2018 13:29:14 +0300 Message-Id: <20181001102928.20533-5-vsementsov@virtuozzo.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20181001102928.20533-1-vsementsov@virtuozzo.com> References: <20181001102928.20533-1-vsementsov@virtuozzo.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 185.231.240.75 Subject: [Qemu-devel] [PATCH v3 04/18] block/backup: move from HBitmap to BdrvDirtyBitmap X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: kwolf@redhat.com, vsementsov@virtuozzo.com, famz@redhat.com, wencongyang2@huawei.com, xiechanglong.d@gmail.com, armbru@redhat.com, mreitz@redhat.com, stefanha@redhat.com, den@openvz.org, jsnow@redhat.com, jcody@redhat.com Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RDMRC_1 RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" This is a necessary step to share copy-bitmap between backup job and special filter driver in further commit. Signed-off-by: Vladimir Sementsov-Ogievskiy --- block/backup.c | 73 ++++++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/block/backup.c b/block/backup.c index fbe7ce19e1..45212d54c9 100644 --- a/block/backup.c +++ b/block/backup.c @@ -51,7 +51,7 @@ typedef struct BackupBlockJob { NotifierWithReturn before_write; QLIST_HEAD(, CowRequest) inflight_reqs; =20 - HBitmap *copy_bitmap; + BdrvDirtyBitmap *copy_bitmap; bool use_copy_range; int64_t copy_range_size; =20 @@ -114,7 +114,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(B= ackupBlockJob *job, int read_flags =3D is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; int write_flags =3D job->serialize_target_writes ? BDRV_REQ_SERIALISIN= G : 0; =20 - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); + assert(start % job->cluster_size =3D=3D 0); + bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size); nbytes =3D MIN(job->cluster_size, job->len - start); if (!*bounce_buffer) { *bounce_buffer =3D blk_blockalign(blk, job->cluster_size); @@ -150,7 +151,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(B= ackupBlockJob *job, =20 return nbytes; fail: - hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); + bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size); return ret; =20 } @@ -170,16 +171,17 @@ static int coroutine_fn backup_cow_with_offload(Backu= pBlockJob *job, int write_flags =3D job->serialize_target_writes ? BDRV_REQ_SERIALISIN= G : 0; =20 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); + assert(QEMU_IS_ALIGNED(start, job->cluster_size)); nbytes =3D MIN(job->copy_range_size, end - start); nr_clusters =3D DIV_ROUND_UP(nbytes, job->cluster_size); - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + bdrv_reset_dirty_bitmap(job->copy_bitmap, start, + job->cluster_size * nr_clusters); ret =3D blk_co_copy_range(blk, start, job->target, start, nbytes, read_flags, write_flags); if (ret < 0) { trace_backup_do_cow_copy_range_fail(job, start, ret); - hbitmap_set(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + bdrv_set_dirty_bitmap(job->copy_bitmap, start, + job->cluster_size * nr_clusters); return ret; } =20 @@ -207,7 +209,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *j= ob, cow_request_begin(&cow_request, job, start, end); =20 while (start < end) { - if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { + if (!bdrv_get_dirty_locked(NULL, job->copy_bitmap, start)) { trace_backup_do_cow_skip(job, start); start +=3D job->cluster_size; continue; /* already copied */ @@ -303,6 +305,11 @@ static void backup_clean(Job *job) assert(s->target); blk_unref(s->target); s->target =3D NULL; + + if (s->copy_bitmap) { + bdrv_release_dirty_bitmap(blk_bs(s->common.blk), s->copy_bitmap); + s->copy_bitmap =3D NULL; + } } =20 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_con= text) @@ -315,7 +322,6 @@ static void backup_attached_aio_context(BlockJob *job, = AioContext *aio_context) void backup_do_checkpoint(BlockJob *job, Error **errp) { BackupBlockJob *backup_job =3D container_of(job, BackupBlockJob, commo= n); - int64_t len; =20 assert(block_job_driver(job) =3D=3D &backup_job_driver); =20 @@ -325,8 +331,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) return; } =20 - len =3D DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); - hbitmap_set(backup_job->copy_bitmap, 0, len); + bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len); } =20 static void backup_drain(BlockJob *job) @@ -379,28 +384,30 @@ static bool coroutine_fn yield_and_check(BackupBlockJ= ob *job) =20 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) { - int ret; + int ret =3D 0; bool error_is_read; - int64_t cluster; - HBitmapIter hbi; + int64_t offset; + BdrvDirtyBitmapIter *dbi =3D bdrv_dirty_iter_new(job->copy_bitmap); =20 - hbitmap_iter_init(&hbi, job->copy_bitmap, 0); - while ((cluster =3D hbitmap_iter_next(&hbi)) !=3D -1) { + while ((offset =3D bdrv_dirty_iter_next(dbi)) !=3D -1) { do { if (yield_and_check(job)) { - return 0; + goto out; } - ret =3D backup_do_cow(job, cluster * job->cluster_size, + ret =3D backup_do_cow(job, offset, job->cluster_size, &error_is_read, false); if (ret < 0 && backup_error_action(job, error_is_read, -ret) = =3D=3D BLOCK_ERROR_ACTION_REPORT) { - return ret; + goto out; } } while (ret < 0); } =20 - return 0; +out: + bdrv_dirty_iter_free(dbi); + + return ret; } =20 /* init copy_bitmap from sync_bitmap */ @@ -412,12 +419,9 @@ static void backup_incremental_init_copy_bitmap(Backup= BlockJob *job) while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap, &offset, &bytes)) { - uint64_t cluster =3D offset / job->cluster_size; - uint64_t last_cluster =3D (offset + bytes) / job->cluster_size; + bdrv_set_dirty_bitmap(job->copy_bitmap, offset, bytes); =20 - hbitmap_set(job->copy_bitmap, cluster, last_cluster - cluster + 1); - - offset =3D (last_cluster + 1) * job->cluster_size; + offset +=3D bytes; if (offset >=3D job->len) { break; } @@ -426,30 +430,29 @@ static void backup_incremental_init_copy_bitmap(Backu= pBlockJob *job) =20 /* TODO job_progress_set_remaining() would make more sense */ job_progress_update(&job->common.job, - job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); + job->len - bdrv_get_dirty_count(job->copy_bitmap)); } =20 static int coroutine_fn backup_run(Job *job, Error **errp) { BackupBlockJob *s =3D container_of(job, BackupBlockJob, common.job); BlockDriverState *bs =3D blk_bs(s->common.blk); - int64_t offset, nb_clusters; + int64_t offset; int ret =3D 0; =20 QLIST_INIT(&s->inflight_reqs); qemu_co_rwlock_init(&s->flush_rwlock); =20 - nb_clusters =3D DIV_ROUND_UP(s->len, s->cluster_size); job_progress_set_remaining(job, s->len); =20 - s->copy_bitmap =3D hbitmap_alloc(nb_clusters, 0); + bdrv_disable_dirty_bitmap(s->copy_bitmap); + if (s->sync_mode =3D=3D MIRROR_SYNC_MODE_INCREMENTAL) { backup_incremental_init_copy_bitmap(s); } else { - hbitmap_set(s->copy_bitmap, 0, nb_clusters); + bdrv_set_dirty_bitmap(s->copy_bitmap, 0, s->len); } =20 - s->before_write.notify =3D backup_before_write_notify; bdrv_add_before_write_notifier(bs, &s->before_write); =20 @@ -530,7 +533,6 @@ static int coroutine_fn backup_run(Job *job, Error **er= rp) /* wait until pending backup_do_cow() calls have completed */ qemu_co_rwlock_wrlock(&s->flush_rwlock); qemu_co_rwlock_unlock(&s->flush_rwlock); - hbitmap_free(s->copy_bitmap); =20 return ret; } @@ -681,6 +683,13 @@ BlockJob *backup_job_create(const char *job_id, BlockD= riverState *bs, } else { job->cluster_size =3D MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster= _size); } + + job->copy_bitmap =3D bdrv_create_dirty_bitmap(bs, job->cluster_size, + NULL, errp); + if (!job->copy_bitmap) { + goto error; + } + job->use_copy_range =3D true; job->copy_range_size =3D MIN_NON_ZERO(blk_get_max_transfer(job->common= .blk), blk_get_max_transfer(job->target)); --=20 2.18.0