[PATCH v11 2/3] x86/mm: refactor __set_clr_pte_enc()

Ashish Kalra posted 3 patches 1 year, 5 months ago
There is a newer version of this series
[PATCH v11 2/3] x86/mm: refactor __set_clr_pte_enc()
Posted by Ashish Kalra 1 year, 5 months ago
From: Ashish Kalra <ashish.kalra@amd.com>

Refactor __set_clr_pte_enc() and add two new helper functions to
set/clear PTE C-bit from early SEV/SNP initialization code and
later during normal system operations and shutdown/kexec.

Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 arch/x86/include/asm/sev.h    |  9 +++++++
 arch/x86/mm/mem_encrypt_amd.c | 47 +++++++++++++++++++++++++++++------
 2 files changed, 48 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ac5886ce252e..4f3fd913aadb 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -348,6 +348,10 @@ u64 snp_get_unsupported_features(u64 status);
 u64 sev_get_status(void);
 void sev_show_status(void);
 void snp_update_svsm_ca(void);
+int prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
+			 unsigned long *ret_pfn, unsigned long *ret_pa,
+			 unsigned long *ret_size, pgprot_t *ret_new_prot);
+void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot);
 
 #else	/* !CONFIG_AMD_MEM_ENCRYPT */
 
@@ -384,6 +388,11 @@ static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
 static inline u64 sev_get_status(void) { return 0; }
 static inline void sev_show_status(void) { }
 static inline void snp_update_svsm_ca(void) { }
+static inline int
+prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
+		     unsigned long *ret_pfn, unsigned long *ret_pa,
+		     unsigned long *ret_size, pgprot_t *ret_new_prot) { }
+static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { }
 
 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
 
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 86a476a426c2..42a35040aaf9 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -311,15 +311,16 @@ static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool en
 	return 0;
 }
 
-static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+int prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
+			 unsigned long *ret_pfn, unsigned long *ret_pa,
+			 unsigned long *ret_size, pgprot_t *ret_new_prot)
 {
 	pgprot_t old_prot, new_prot;
 	unsigned long pfn, pa, size;
-	pte_t new_pte;
 
 	pfn = pg_level_to_pfn(level, kpte, &old_prot);
 	if (!pfn)
-		return;
+		return 1;
 
 	new_prot = old_prot;
 	if (enc)
@@ -329,7 +330,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 
 	/* If prot is same then do nothing. */
 	if (pgprot_val(old_prot) == pgprot_val(new_prot))
-		return;
+		return 1;
 
 	pa = pfn << PAGE_SHIFT;
 	size = page_level_size(level);
@@ -339,7 +340,39 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 	 * physical page attribute from C=1 to C=0 or vice versa. Flush the
 	 * caches to ensure that data gets accessed with the correct C-bit.
 	 */
-	clflush_cache_range(__va(pa), size);
+	if (va)
+		clflush_cache_range(va, size);
+	else
+		clflush_cache_range(__va(pa), size);
+
+	if (ret_new_prot)
+		*ret_new_prot = new_prot;
+	if (ret_size)
+		*ret_size = size;
+	if (ret_pfn)
+		*ret_pfn = pfn;
+	if (ret_pa)
+		*ret_pa = pa;
+
+	return 0;
+}
+
+void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot)
+{
+	pte_t new_pte;
+
+	/* Change the page encryption mask. */
+	new_pte = pfn_pte(pfn, new_prot);
+	set_pte_atomic(kpte, new_pte);
+}
+
+static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+{
+	unsigned long pfn, pa, size;
+	pgprot_t new_prot;
+
+	if (prep_set_clr_pte_enc(kpte, level, enc, NULL, &pfn, &pa, &size, &new_prot))
+		return;
 
 	/* Encrypt/decrypt the contents in-place */
 	if (enc) {
@@ -354,9 +387,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 		early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
 	}
 
-	/* Change the page encryption mask. */
-	new_pte = pfn_pte(pfn, new_prot);
-	set_pte_atomic(kpte, new_pte);
+	set_pte_enc_mask(kpte, pfn, new_prot);
 
 	/*
 	 * If page is set encrypted in the page table, then update the RMP table to
-- 
2.34.1
Re: [PATCH v11 2/3] x86/mm: refactor __set_clr_pte_enc()
Posted by Borislav Petkov 1 year, 5 months ago
On Tue, Jul 02, 2024 at 07:57:54PM +0000, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> Refactor __set_clr_pte_enc() and add two new helper functions to
> set/clear PTE C-bit from early SEV/SNP initialization code and
> later during normal system operations and shutdown/kexec.
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/include/asm/sev.h    |  9 +++++++
>  arch/x86/mm/mem_encrypt_amd.c | 47 +++++++++++++++++++++++++++++------
>  2 files changed, 48 insertions(+), 8 deletions(-)

Some serious cleanups ontop which reduce the diffstat even more. Untested ofc.

Holler if something's unclear.

---
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 0c90a8a74a88..5013c3afb0c4 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -1012,11 +1012,14 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
 
 static void set_pte_enc(pte_t *kpte, int level, void *va)
 {
-	unsigned long pfn;
-	pgprot_t new_prot;
-
-	prep_set_clr_pte_enc(kpte, level, 1, va, &pfn, NULL, NULL, &new_prot);
-	set_pte_enc_mask(kpte, pfn, new_prot);
+	struct pte_enc_desc d = {
+		.kpte	   = kpte,
+		.pte_level = level,
+		.va	   = va
+	};
+
+	prepare_pte_enc(&d);
+	set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
 }
 
 static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int level)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 4f1a6d1e3f4c..68a03fd07665 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -234,6 +234,22 @@ struct svsm_attest_call {
 	u8 rsvd[4];
 };
 
+/* PTE descriptor used for the prepare_pte_enc() operations. */
+struct pte_enc_desc {
+	pte_t *kpte;
+	int pte_level;
+	bool encrypt;
+	/* pfn of the kpte above */
+	unsigned long pfn;
+	/* physical address of @pfn */
+	unsigned long pa;
+	/* virtual address of @pfn */
+	void *va;
+	/* memory covered by the pte */
+	unsigned long size;
+	pgprot_t new_pgprot;
+};
+
 /*
  * SVSM protocol structure
  */
@@ -348,9 +364,7 @@ u64 snp_get_unsupported_features(u64 status);
 u64 sev_get_status(void);
 void sev_show_status(void);
 void snp_update_svsm_ca(void);
-int prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
-			 unsigned long *ret_pfn, unsigned long *ret_pa,
-			 unsigned long *ret_size, pgprot_t *ret_new_prot);
+int prepare_pte_enc(struct pte_enc_desc *d);
 void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot);
 void snp_kexec_finish(void);
 void snp_kexec_begin(void);
@@ -390,10 +404,7 @@ static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
 static inline u64 sev_get_status(void) { return 0; }
 static inline void sev_show_status(void) { }
 static inline void snp_update_svsm_ca(void) { }
-static inline int
-prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
-		     unsigned long *ret_pfn, unsigned long *ret_pa,
-		     unsigned long *ret_size, pgprot_t *ret_new_prot) { }
+static inline int prepare_pte_enc(struct pte_enc_desc *d) { }
 static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { }
 static inline void snp_kexec_finish(void) { }
 static inline void snp_kexec_begin(void) { }
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index dec24bb08b09..774f9677458f 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -311,48 +311,37 @@ static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool en
 	return 0;
 }
 
-int prep_set_clr_pte_enc(pte_t *kpte, int level, int enc, void *va,
-			 unsigned long *ret_pfn, unsigned long *ret_pa,
-			 unsigned long *ret_size, pgprot_t *ret_new_prot)
+int prepare_pte_enc(struct pte_enc_desc *d)
 {
-	pgprot_t old_prot, new_prot;
-	unsigned long pfn, pa, size;
+	pgprot_t old_prot;
 
-	pfn = pg_level_to_pfn(level, kpte, &old_prot);
-	if (!pfn)
+	d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot);
+	if (!d->pfn)
 		return 1;
 
-	new_prot = old_prot;
-	if (enc)
-		pgprot_val(new_prot) |= _PAGE_ENC;
+	d->new_pgprot = old_prot;
+	if (d->encrypt)
+		pgprot_val(d->new_pgprot) |= _PAGE_ENC;
 	else
-		pgprot_val(new_prot) &= ~_PAGE_ENC;
+		pgprot_val(d->new_pgprot) &= ~_PAGE_ENC;
 
 	/* If prot is same then do nothing. */
-	if (pgprot_val(old_prot) == pgprot_val(new_prot))
+	if (pgprot_val(old_prot) == pgprot_val(d->new_pgprot))
 		return 1;
 
-	pa = pfn << PAGE_SHIFT;
-	size = page_level_size(level);
+	d->pa = d->pfn << PAGE_SHIFT;
+	d->size = page_level_size(d->pte_level);
 
 	/*
-	 * We are going to perform in-place en-/decryption and change the
-	 * physical page attribute from C=1 to C=0 or vice versa. Flush the
-	 * caches to ensure that data gets accessed with the correct C-bit.
+	 * In-place en-/decryption and physical page attribute change
+	 * from C=1 to C=0 or vice versa will be performed. Flush the
+	 * caches to ensure that data gets accessed with the correct
+	 * C-bit.
 	 */
-	if (va)
-		clflush_cache_range(va, size);
+	if (d->va)
+		clflush_cache_range(d->va, d->size);
 	else
-		clflush_cache_range(__va(pa), size);
-
-	if (ret_new_prot)
-		*ret_new_prot = new_prot;
-	if (ret_size)
-		*ret_size = size;
-	if (ret_pfn)
-		*ret_pfn = pfn;
-	if (ret_pa)
-		*ret_pa = pa;
+		clflush_cache_range(__va(d->pa), d->size);
 
 	return 0;
 }
@@ -368,33 +357,36 @@ void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot)
 
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
-	unsigned long pfn, pa, size;
-	pgprot_t new_prot;
+	struct pte_enc_desc d = {
+		.kpte	     = kpte,
+		.pte_level   = level,
+		.encrypt     = enc
+	};
 
-	if (prep_set_clr_pte_enc(kpte, level, enc, NULL, &pfn, &pa, &size, &new_prot))
+	if (prepare_pte_enc(&d))
 		return;
 
 	/* Encrypt/decrypt the contents in-place */
 	if (enc) {
-		sme_early_encrypt(pa, size);
+		sme_early_encrypt(d.pa, d.size);
 	} else {
-		sme_early_decrypt(pa, size);
+		sme_early_decrypt(d.pa, d.size);
 
 		/*
 		 * ON SNP, the page state in the RMP table must happen
 		 * before the page table updates.
 		 */
-		early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
+		early_snp_set_memory_shared((unsigned long)__va(d.pa), d.pa, 1);
 	}
 
-	set_pte_enc_mask(kpte, pfn, new_prot);
+	set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
 
 	/*
 	 * If page is set encrypted in the page table, then update the RMP table to
 	 * add this page as private.
 	 */
 	if (enc)
-		early_snp_set_memory_private((unsigned long)__va(pa), pa, 1);
+		early_snp_set_memory_private((unsigned long)__va(d.pa), d.pa, 1);
 }
 
 static int __init early_set_memory_enc_dec(unsigned long vaddr,

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette