From nobody Tue Feb 10 12:59:43 2026 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1626109541113992.4986107302008; Mon, 12 Jul 2021 10:05:41 -0700 (PDT) Received: from localhost ([::1]:55190 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1m2zNE-0005y9-3j for importer@patchew.org; Mon, 12 Jul 2021 13:05:40 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:46756) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1m2zF5-0005jd-7R for qemu-devel@nongnu.org; Mon, 12 Jul 2021 12:57:15 -0400 Received: from prt-mail.chinatelecom.cn ([42.123.76.227]:60834 helo=chinatelecom.cn) by eggs.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1m2zF2-0007J1-4P for qemu-devel@nongnu.org; Mon, 12 Jul 2021 12:57:15 -0400 Received: from clientip-182.138.181.19?logid-d6ee1067a9e34bf398e4104729ee285e (unknown [172.18.0.48]) by chinatelecom.cn (HERMES) with SMTP id A38E828008D; Tue, 13 Jul 2021 00:57:09 +0800 (CST) Received: from ([172.18.0.48]) by app0024 with ESMTP id e51670f48b4a48e48152baa86ec7043d for qemu-devel@nongnu.org; Tue Jul 13 00:57:09 2021 HMM_SOURCE_IP: 172.18.0.48:50580.4574419 HMM_ATTACHE_NUM: 0000 HMM_SOURCE_TYPE: SMTP X-189-SAVE-TO-SEND: +huangy81@chinatelecom.cn X-Transaction-ID: e51670f48b4a48e48152baa86ec7043d X-filter-score: X-Real-From: huangy81@chinatelecom.cn X-Receive-IP: 172.18.0.48 X-MEDUSA-Status: 0 From: huangy81@chinatelecom.cn To: qemu-devel@nongnu.org Subject: [PATCH v2 2/3] memory: introduce DirtyRateIncreasedPages and util function Date: Tue, 13 Jul 2021 00:56:51 +0800 Message-Id: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=42.123.76.227; envelope-from=huangy81@chinatelecom.cn; helo=chinatelecom.cn X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_PASS=-0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Eduardo Habkost , Juan Quintela , Hyman , "Dr. David Alan Gilbert" , Peter Xu , Chuan Zheng , Paolo Bonzini Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZM-MESSAGEID: 1626109543133100001 From: Hyman Huang(=E9=BB=84=E5=8B=87) introduce DirtyRateIncreasedPages to stat the increased dirty pages during the calculation time along with ramblock_sync_dirty_bitmap. introduce util functions to setup the DIRTY_MEMORY_MIGRATION dirty bits for the convenience of tracking dirty bitmap when calculating dirtyrate. Signed-off-by: Hyman Huang(=E9=BB=84=E5=8B=87) --- include/exec/ram_addr.h | 87 ++++++++++++++++++++++++++++++++++++++++++++-= ---- softmmu/physmem.c | 35 ++++++++++++++++++++ 2 files changed, 113 insertions(+), 9 deletions(-) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 45c9132..c47d1a7 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -26,6 +26,8 @@ #include "exec/ramlist.h" #include "exec/ramblock.h" =20 +static uint64_t DirtyRateIncreasedPages =3D 0; + /** * clear_bmap_size: calculate clear bitmap size * @@ -422,6 +424,9 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_= t start, ram_addr_t length, unsigned client); =20 +void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start, + ram_addr_t length); + DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client); =20 @@ -449,6 +454,8 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock= *rb, uint64_t num_dirty =3D 0; unsigned long *dest =3D rb->bmap; =20 + assert(global_dirty_tracking); + /* start address and length is aligned at the start of a word? */ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) =3D=3D (start + rb->offset) && @@ -466,12 +473,20 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlo= ck *rb, =20 for (k =3D page; k < page + nr; k++) { if (src[idx][offset]) { - unsigned long bits =3D qatomic_xchg(&src[idx][offset], 0); - unsigned long new_dirty; - new_dirty =3D ~dest[k]; - dest[k] |=3D bits; - new_dirty &=3D bits; - num_dirty +=3D ctpopl(new_dirty); + unsigned long bits; + if (global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE) { + bits =3D qatomic_read(&src[idx][offset]); + DirtyRateIncreasedPages +=3D ctpopl(bits); + } + + if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { + unsigned long new_dirty; + bits =3D qatomic_xchg(&src[idx][offset], 0); + new_dirty =3D ~dest[k]; + dest[k] |=3D bits; + new_dirty &=3D bits; + num_dirty +=3D ctpopl(new_dirty); + } } =20 if (++offset >=3D BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { @@ -500,9 +515,15 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBloc= k *rb, start + addr + offset, TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION)) { - long k =3D (start + addr) >> TARGET_PAGE_BITS; - if (!test_and_set_bit(k, dest)) { - num_dirty++; + if (global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE) { + DirtyRateIncreasedPages++; + } + + if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { + long k =3D (start + addr) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, dest)) { + num_dirty++; + } } } } @@ -510,5 +531,53 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBloc= k *rb, =20 return num_dirty; } + +static inline +void cpu_physical_memory_dirtyrate_clear_dirty_bits(RAMBlock *rb) +{ + ram_addr_t addr; + ram_addr_t length =3D rb->used_length; + unsigned long word =3D BIT_WORD(rb->offset >> TARGET_PAGE_BITS); + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) =3D=3D rb->offset && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + int k; + int nr =3D BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long * const *src; + unsigned long idx =3D (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_= SIZE; + unsigned long offset =3D BIT_WORD((word * BITS_PER_LONG) % + DIRTY_MEMORY_BLOCK_SIZE); + + src =3D qatomic_rcu_read( + &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; + + for (k =3D 0; k < nr; k++) { + if (src[idx][offset]) { + qatomic_set(&src[idx][offset], 0); + } + if (++offset >=3D BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset =3D 0; + idx++; + } + } + } else { + ram_addr_t offset =3D rb->offset; + + for (addr =3D 0; addr < length; addr +=3D TARGET_PAGE_SIZE) { + cpu_physical_memory_dirtyrate_clear_bit(addr + offset, + TARGET_PAGE_SIZE); + } + } + + return; +} + +static inline +void cpu_physical_memory_dirtyrate_reset_protect(RAMBlock *rb) +{ + memory_region_clear_dirty_bitmap(rb->mr, 0, rb->used_length); + cpu_physical_memory_dirtyrate_clear_dirty_bits(rb); +} #endif #endif diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 3c1912a..67cff31 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -1068,6 +1068,41 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_ad= dr_t start, return dirty; } =20 +void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start, + ram_addr_t length) +{ + DirtyMemoryBlocks *blocks; + unsigned long end, page; + RAMBlock *ramblock; + + if (length =3D=3D 0) { + return; + } + + end =3D TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page =3D start >> TARGET_PAGE_BITS; + + WITH_RCU_READ_LOCK_GUARD() { + blocks =3D + qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION= ]); + ramblock =3D qemu_get_ram_block(start); + /* Range sanity check on the ramblock */ + assert(start >=3D ramblock->offset && + start + length <=3D ramblock->offset + ramblock->used_lengt= h); + while (page < end) { + unsigned long idx =3D page / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset =3D page % DIRTY_MEMORY_BLOCK_SIZE; + unsigned long num =3D MIN(end - page, + DIRTY_MEMORY_BLOCK_SIZE - offset); + + clear_bit(num, blocks->blocks[idx]); + page +=3D num; + } + } + + return; +} + DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) { --=20 1.8.3.1