arch/x86/kvm/svm/sev.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-)
From: yangge <yangge1116@126.com>
In the sev_mem_enc_register_region() function, we need to call
sev_pin_memory() to pin memory for the long term. However, when
calling sev_pin_memory(), the FOLL_LONGTERM flag is not passed, causing
the allocated pages not to be migrated out of MIGRATE_CMA/ZONE_MOVABLE,
violating these mechanisms to avoid fragmentation with unmovable pages,
for example making CMA allocations fail.
To address the aforementioned problem, we should add the FOLL_LONGTERM
flag when calling sev_pin_memory() within the sev_mem_enc_register_region()
function.
Signed-off-by: yangge <yangge1116@126.com>
---
V3:
- the fix only needed for sev_mem_enc_register_region()
V2:
- update code and commit message suggested by David
arch/x86/kvm/svm/sev.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 943bd07..04a125c 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -622,7 +622,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long ulen, unsigned long *n,
- int write)
+ int flags)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
unsigned long npages, size;
@@ -663,7 +663,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return ERR_PTR(-ENOMEM);
/* Pin the user virtual address. */
- npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ npinned = pin_user_pages_fast(uaddr, npages, flags, pages);
if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages);
ret = -ENOMEM;
@@ -751,7 +751,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
vaddr_end = vaddr + size;
/* Lock the user memory. */
- inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+ inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
if (IS_ERR(inpages))
return PTR_ERR(inpages);
@@ -1250,7 +1250,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (IS_ERR(src_p))
return PTR_ERR(src_p);
- dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+ dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n,
+ FOLL_WRITE);
if (IS_ERR(dst_p)) {
sev_unpin_memory(kvm, src_p, n);
return PTR_ERR(dst_p);
@@ -1316,7 +1317,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
- pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+ pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, FOLL_WRITE);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -1798,7 +1799,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
- PAGE_SIZE, &n, 1);
+ PAGE_SIZE, &n, FOLL_WRITE);
if (IS_ERR(guest_page)) {
ret = PTR_ERR(guest_page);
goto e_free_trans;
@@ -2696,7 +2697,8 @@ int sev_mem_enc_register_region(struct kvm *kvm,
return -ENOMEM;
mutex_lock(&kvm->lock);
- region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
+ region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages,
+ FOLL_WRITE | FOLL_LONGTERM);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
mutex_unlock(&kvm->lock);
--
2.7.4
On 1/11/25 00:15, yangge1116@126.com wrote:
> From: yangge <yangge1116@126.com>
>
> In the sev_mem_enc_register_region() function, we need to call
> sev_pin_memory() to pin memory for the long term. However, when
> calling sev_pin_memory(), the FOLL_LONGTERM flag is not passed, causing
> the allocated pages not to be migrated out of MIGRATE_CMA/ZONE_MOVABLE,
> violating these mechanisms to avoid fragmentation with unmovable pages,
> for example making CMA allocations fail.
>
> To address the aforementioned problem, we should add the FOLL_LONGTERM
> flag when calling sev_pin_memory() within the sev_mem_enc_register_region()
> function.
Seems reasonable, some minor comments below, otherwise:
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
>
> Signed-off-by: yangge <yangge1116@126.com>
> ---
>
> V3:
> - the fix only needed for sev_mem_enc_register_region()
>
> V2:
> - update code and commit message suggested by David
>
> arch/x86/kvm/svm/sev.c | 16 +++++++++-------
> 1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 943bd07..04a125c 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -622,7 +622,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> unsigned long ulen, unsigned long *n,
> - int write)
> + int flags)
> {
> struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> unsigned long npages, size;
> @@ -663,7 +663,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> return ERR_PTR(-ENOMEM);
>
> /* Pin the user virtual address. */
> - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
> + npinned = pin_user_pages_fast(uaddr, npages, flags, pages);
> if (npinned != npages) {
> pr_err("SEV: Failure locking %lu pages.\n", npages);
> ret = -ENOMEM;
> @@ -751,7 +751,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> vaddr_end = vaddr + size;
>
> /* Lock the user memory. */
> - inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
> + inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
> if (IS_ERR(inpages))
> return PTR_ERR(inpages);
>
> @@ -1250,7 +1250,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
> if (IS_ERR(src_p))
> return PTR_ERR(src_p);
>
> - dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
> + dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n,
> + FOLL_WRITE);
You can keep this as one line.
> if (IS_ERR(dst_p)) {
> sev_unpin_memory(kvm, src_p, n);
> return PTR_ERR(dst_p);
> @@ -1316,7 +1317,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
> if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
> return -EFAULT;
>
> - pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
> + pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, FOLL_WRITE);
> if (IS_ERR(pages))
> return PTR_ERR(pages);
>
> @@ -1798,7 +1799,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> /* Pin guest memory */
> guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
> - PAGE_SIZE, &n, 1);
> + PAGE_SIZE, &n, FOLL_WRITE);
> if (IS_ERR(guest_page)) {
> ret = PTR_ERR(guest_page);
> goto e_free_trans;
> @@ -2696,7 +2697,8 @@ int sev_mem_enc_register_region(struct kvm *kvm,
> return -ENOMEM;
>
> mutex_lock(&kvm->lock);
> - region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
> + region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages,
> + FOLL_WRITE | FOLL_LONGTERM);
Need proper alignment of these parameters.
Thanks,
Tom
> if (IS_ERR(region->pages)) {
> ret = PTR_ERR(region->pages);
> mutex_unlock(&kvm->lock);
© 2016 - 2026 Red Hat, Inc.