Currently, when setting memory attributes, KVM provides no guarantees about
the memory contents.
Introduce default handlers for applying memory content modes, which
different architectures should override.
These handlers will be used later to apply memory content modes during set
memory attributes requests.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
include/linux/kvm_host.h | 12 +++++++++
virt/kvm/guest_memfd.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 79 insertions(+)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1ea14c66fc82e..4ba3c4b303f4a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -740,6 +740,18 @@ static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
return flags;
}
+
+u64 kvm_arch_gmem_supported_content_modes(struct kvm *kvm);
+int kvm_gmem_apply_content_mode_zero(struct inode *inode, pgoff_t start,
+ pgoff_t end);
+int kvm_arch_gmem_apply_content_mode_zero(struct kvm *kvm, struct inode *inode,
+ pgoff_t start, pgoff_t end);
+int kvm_arch_gmem_apply_content_mode_preserve(struct kvm *kvm,
+ struct inode *inode,
+ pgoff_t start, pgoff_t end);
+int kvm_arch_gmem_apply_content_mode_unspecified(struct kvm *kvm,
+ struct inode *inode,
+ pgoff_t start, pgoff_t end);
#endif
#ifndef kvm_arch_has_readonly_mem
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 20a09d9bbcd2b..e270e54e030f0 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include "asm-generic/errno.h"
+#include "linux/page-flags.h"
#include <linux/anon_inodes.h>
#include <linux/backing-dev.h>
#include <linux/falloc.h>
@@ -669,6 +671,71 @@ static void kvm_gmem_invalidate(struct inode *inode, pgoff_t start, pgoff_t end)
static void kvm_gmem_invalidate(struct inode *inode, pgoff_t start, pgoff_t end) {}
#endif
+u64 __weak kvm_arch_gmem_supported_content_modes(struct kvm *kvm)
+{
+ /* Architectures must override with supported modes. */
+ return 0;
+}
+
+int kvm_gmem_apply_content_mode_zero(struct inode *inode, pgoff_t start,
+ pgoff_t end)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct folio_batch fbatch;
+ int ret = 0;
+ int i;
+
+ folio_batch_init(&fbatch);
+ while (!ret && filemap_get_folios(mapping, &start, end - 1, &fbatch)) {
+ for (i = 0; !ret && i < folio_batch_count(&fbatch); ++i) {
+ struct folio *folio = fbatch.folios[i];
+
+ folio_lock(folio);
+
+ if (folio_test_hwpoison(folio)) {
+ ret = -EHWPOISON;
+ } else {
+ /*
+ * Hard-coding zeroed range since
+ * guest_memfd only supports PAGE_SIZE
+ * folios and start and end have been
+ * checked to be PAGE_SIZE aligned.
+ */
+ folio_zero_segment(folio, 0, PAGE_SIZE);
+ }
+
+ folio_unlock(folio);
+ }
+
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return ret;
+}
+
+int __weak kvm_arch_gmem_apply_content_mode_unspecified(struct kvm *kvm,
+ struct inode *inode,
+ pgoff_t start,
+ pgoff_t end)
+{
+ return 0;
+}
+
+int __weak kvm_arch_gmem_apply_content_mode_zero(struct kvm *kvm,
+ struct inode *inode,
+ pgoff_t start, pgoff_t end)
+{
+ return kvm_gmem_apply_content_mode_zero(inode, start, end);
+}
+
+int __weak kvm_arch_gmem_apply_content_mode_preserve(struct kvm *kvm,
+ struct inode *inode,
+ pgoff_t start, pgoff_t end)
+{
+ return -EOPNOTSUPP;
+}
+
static int __kvm_gmem_set_attributes(struct inode *inode, pgoff_t start,
size_t nr_pages, uint64_t attrs,
pgoff_t *err_index)
--
2.53.0.1018.g2bb0e51243-goog