From nobody Mon Feb 9 21:42:12 2026 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1624772468928419.2976041437372; Sat, 26 Jun 2021 22:41:08 -0700 (PDT) Received: from localhost ([::1]:53732 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1lxNXX-0000cR-Pq for importer@patchew.org; Sun, 27 Jun 2021 01:41:07 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:53292) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1lxNVH-0007dw-3e for qemu-devel@nongnu.org; Sun, 27 Jun 2021 01:38:47 -0400 Received: from prt-mail.chinatelecom.cn ([42.123.76.227]:44593 helo=chinatelecom.cn) by eggs.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1lxNVB-0001QG-Pm for qemu-devel@nongnu.org; Sun, 27 Jun 2021 01:38:46 -0400 Received: from clientip-171.223.99.176?logid-0210caab79734a8b87be68778a878dff (unknown [172.18.0.218]) by chinatelecom.cn (HERMES) with SMTP id 18BDB28009A; Sun, 27 Jun 2021 13:38:39 +0800 (CST) Received: from ([172.18.0.218]) by app0025 with ESMTP id 29f13c49ed394465816ab1302c0509a2 for qemu-devel@nongnu.org; Sun Jun 27 13:38:39 2021 HMM_SOURCE_IP: 172.18.0.218:60506.95674552 HMM_ATTACHE_NUM: 0000 HMM_SOURCE_TYPE: SMTP X-189-SAVE-TO-SEND: +huangy81@chinatelecom.cn X-Transaction-ID: 29f13c49ed394465816ab1302c0509a2 X-filter-score: X-Real-From: huangy81@chinatelecom.cn X-Receive-IP: 172.18.0.218 X-MEDUSA-Status: 0 From: huangy81@chinatelecom.cn To: qemu-devel@nongnu.org Subject: [PATCH 3/4] memory: introduce DIRTY_MEMORY_DIRTY_RATE dirty bits functions Date: Sun, 27 Jun 2021 13:38:16 +0800 Message-Id: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=42.123.76.227; envelope-from=huangy81@chinatelecom.cn; helo=chinatelecom.cn X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_PASS=-0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Eduardo Habkost , Juan Quintela , Hyman , "Dr. David Alan Gilbert" , Peter Xu , Chuan Zheng , Paolo Bonzini Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" From: Hyman Huang(=E9=BB=84=E5=8B=87) introduce util functions to setup the DIRTY_MEMORY_DIRTY_RATE dirty bits for the convenience of tracking dirty bitmap when calculating dirtyrate. Signed-off-by: Hyman Huang(=E9=BB=84=E5=8B=87) --- include/exec/ram_addr.h | 121 ++++++++++++++++++++++++++++++++++++++++++++= ++++ softmmu/physmem.c | 61 ++++++++++++++++++++++++ 2 files changed, 182 insertions(+) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 6070a52..57dc96b 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -435,6 +435,12 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr= _t start, ram_addr_t length, unsigned client); =20 +void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start, + ram_addr_t length); + +void cpu_physical_memory_dirtyrate_reprotect_bit(ram_addr_t start, + ram_addr_t length); + DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client); =20 @@ -523,5 +529,120 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlo= ck *rb, =20 return num_dirty; } + +/* Called with RCU critical section */ +static inline +void cpu_physical_memory_dirtyrate_clear_dirty_bits(RAMBlock *rb) +{ + ram_addr_t addr; + ram_addr_t length =3D rb->used_length; + unsigned long word =3D BIT_WORD(rb->offset >> TARGET_PAGE_BITS); + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) =3D=3D rb->offset && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + int k; + int nr =3D BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long * const *src; + unsigned long idx =3D (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_= SIZE; + unsigned long offset =3D BIT_WORD((word * BITS_PER_LONG) % + DIRTY_MEMORY_BLOCK_SIZE); + + src =3D qatomic_rcu_read( + &ram_list.dirty_memory[DIRTY_MEMORY_DIRTY_RATE])->blocks; + + for (k =3D 0; k < nr; k++) { + if (src[idx][offset]) { + qatomic_set(&src[idx][offset], 0); + } + if (++offset >=3D BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset =3D 0; + idx++; + } + } + } else { + ram_addr_t offset =3D rb->offset; + + for (addr =3D 0; addr < length; addr +=3D TARGET_PAGE_SIZE) { + cpu_physical_memory_dirtyrate_clear_bit(addr + offset, + TARGET_PAGE_SIZE); + } + } + + return; +} + +/* Called with RCU critical section */ +static inline +uint64_t cpu_physical_memory_dirtyrate_stat_dirty_bits(RAMBlock *rb) +{ + uint64_t dirty_pages =3D 0; + ram_addr_t addr; + ram_addr_t length =3D rb->used_length; + unsigned long word =3D BIT_WORD(rb->offset >> TARGET_PAGE_BITS); + unsigned long bits; + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) =3D=3D rb->offset && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + int k; + int nr =3D BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long * const *src; + unsigned long idx =3D (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_= SIZE; + unsigned long offset =3D BIT_WORD((word * BITS_PER_LONG) % + DIRTY_MEMORY_BLOCK_SIZE); + + src =3D qatomic_rcu_read( + &ram_list.dirty_memory[DIRTY_MEMORY_DIRTY_RATE])->blocks; + + for (k =3D 0; k < nr; k++) { + if (src[idx][offset]) { + bits =3D qatomic_read(&src[idx][offset]); + dirty_pages +=3D ctpopl(bits); + } + + if (++offset >=3D BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset =3D 0; + idx++; + } + } + } else { + ram_addr_t offset =3D rb->offset; + + for (addr =3D 0; addr < length; addr +=3D TARGET_PAGE_SIZE) { + if (cpu_physical_memory_get_dirty(offset + addr, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_DIRTY_RATE)) { + dirty_pages++; + } + } + } + + return dirty_pages; +} + +static inline +void cpu_physical_memory_dirtyrate_reset_protect(RAMBlock *rb) +{ + ram_addr_t addr; + ram_addr_t length =3D rb->used_length; + unsigned long word =3D BIT_WORD(rb->offset >> TARGET_PAGE_BITS); + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) =3D=3D rb->offset && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + memory_region_clear_dirty_bitmap(rb->mr, 0, length); + } else { + ram_addr_t offset =3D rb->offset; + + for (addr =3D 0; addr < length; addr +=3D TARGET_PAGE_SIZE) { + cpu_physical_memory_dirtyrate_reprotect_bit(offset + addr, + TARGET_PAGE_SIZE); + } + } + + return; +} + #endif #endif diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 9b171c9..d68649a 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -1068,6 +1068,67 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_ad= dr_t start, return dirty; } =20 +void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start, + ram_addr_t length) +{ + DirtyMemoryBlocks *blocks; + unsigned long end, page; + RAMBlock *ramblock; + + if (length =3D=3D 0) { + return; + } + + end =3D TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page =3D start >> TARGET_PAGE_BITS; + + WITH_RCU_READ_LOCK_GUARD() { + blocks =3D + qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_DIRTY_RAT= E]); + ramblock =3D qemu_get_ram_block(start); + /* Range sanity check on the ramblock */ + assert(start >=3D ramblock->offset && + start + length <=3D ramblock->offset + ramblock->used_lengt= h); + while (page < end) { + unsigned long idx =3D page / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset =3D page % DIRTY_MEMORY_BLOCK_SIZE; + unsigned long num =3D MIN(end - page, + DIRTY_MEMORY_BLOCK_SIZE - offset); + + clear_bit(num, blocks->blocks[idx]); + page +=3D num; + } + } + + return; +} + +void cpu_physical_memory_dirtyrate_reprotect_bit(ram_addr_t start, + ram_addr_t length) +{ + unsigned long end, start_page; + RAMBlock *ramblock; + uint64_t mr_offset, mr_size; + + if (length =3D=3D 0) { + return; + } + + end =3D TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + start_page =3D start >> TARGET_PAGE_BITS; + + ramblock =3D qemu_get_ram_block(start); + /* Range sanity check on the ramblock */ + assert(start >=3D ramblock->offset && + start + length <=3D ramblock->offset + ramblock->used_length); + + mr_offset =3D (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock-= >offset; + mr_size =3D (end - start_page) << TARGET_PAGE_BITS; + memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); + + return; +} + DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) { --=20 1.8.3.1