It's used for discarding oppsite memory after memory conversion to
shared/private.
Note, private-shared page conversion is done at 4KB granularity. Don't
check alignment with rb->page_size, instead qemu_host_page_size, which
is 4K.
Originally-from: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
---
include/exec/cpu-common.h | 2 ++
softmmu/physmem.c | 36 ++++++++++++++++++++++++++++++++++++
2 files changed, 38 insertions(+)
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 87dc9a752c9a..558684b9f246 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -157,6 +157,8 @@ typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
+int ram_block_convert_range(RAMBlock *rb, uint64_t start, size_t length,
+ bool shared_to_private);
#endif
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 6ee6bc794f44..dab3247d461c 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3733,3 +3733,39 @@ bool ram_block_discard_is_required(void)
return qatomic_read(&ram_block_discard_required_cnt) ||
qatomic_read(&ram_block_coordinated_discard_required_cnt);
}
+
+int ram_block_convert_range(RAMBlock *rb, uint64_t start, size_t length,
+ bool shared_to_private)
+{
+ int fd;
+
+ if (!rb || rb->gmem_fd < 0) {
+ return -1;
+ }
+
+ if (!QEMU_PTR_IS_ALIGNED(start, qemu_host_page_size) ||
+ !QEMU_PTR_IS_ALIGNED(length, qemu_host_page_size)) {
+ return -1;
+ }
+
+ if (!length) {
+ return -1;
+ }
+
+ if (start + length > rb->max_length) {
+ return -1;
+ }
+
+ if (shared_to_private) {
+ void *host_startaddr = rb->host + start;
+
+ if (!QEMU_PTR_IS_ALIGNED(host_startaddr, qemu_host_page_size)) {
+ return -1;
+ }
+ fd = rb->fd;
+ } else {
+ fd = rb->gmem_fd;
+ }
+
+ return ram_block_discard_range_fd(rb, start, length, fd);
+}
--
2.34.1