Instead of having another copy of a page table walk in set_readonly(),
just use walk_pt().
As it will be needed later anyway, split out the TLB flushing into a
dedicated function.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- clear count after doing an mmu_update call (Samuel Thibault)
- do final mmu_update call from set_readonly() if needed (Samuel Thibault)
---
arch/x86/mm.c | 124 +++++++++++++++++++++++---------------------------
1 file changed, 56 insertions(+), 68 deletions(-)
diff --git a/arch/x86/mm.c b/arch/x86/mm.c
index 84a6d7f0..85827d93 100644
--- a/arch/x86/mm.c
+++ b/arch/x86/mm.c
@@ -402,92 +402,80 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
* Mark portion of the address space read only.
*/
extern struct shared_info shared_info;
-static void set_readonly(void *text, void *etext)
-{
- unsigned long start_address =
- ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
- unsigned long end_address = (unsigned long) etext;
- pgentry_t *tab = pt_base, page;
- unsigned long mfn = pfn_to_mfn(virt_to_pfn(pt_base));
- unsigned long offset;
- unsigned long page_size = PAGE_SIZE;
+
+struct set_readonly_par {
+ unsigned long etext;
#ifdef CONFIG_PARAVIRT
- int count = 0;
- int rc;
+ unsigned int count;
#endif
+};
- printk("setting %p-%p readonly\n", text, etext);
+static int set_readonly_func(unsigned long va, unsigned int lvl, bool is_leaf,
+ pgentry_t *pte, void *par)
+{
+ struct set_readonly_par *ro = par;
- while ( start_address + page_size <= end_address )
- {
- tab = pt_base;
- mfn = pfn_to_mfn(virt_to_pfn(pt_base));
+ if ( !is_leaf )
+ return 0;
-#if defined(__x86_64__)
- offset = l4_table_offset(start_address);
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l3_table_offset(start_address);
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
- offset = l2_table_offset(start_address);
- if ( !(tab[offset] & _PAGE_PSE) )
- {
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+ if ( va + (1UL << ptdata[lvl].shift) > ro->etext )
+ return 1;
- offset = l1_table_offset(start_address);
- }
+ if ( va == (unsigned long)&shared_info )
+ {
+ printk("skipped %lx\n", va);
+ return 0;
+ }
- if ( start_address != (unsigned long)&shared_info )
- {
#ifdef CONFIG_PARAVIRT
- mmu_updates[count].ptr =
- ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
- mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
- count++;
+ mmu_updates[ro->count].ptr = virt_to_mach(pte);
+ mmu_updates[ro->count].val = *pte & ~_PAGE_RW;
+ ro->count++;
+
+ if ( ro->count == L1_PAGETABLE_ENTRIES )
+ {
+ ro->count = 0;
+ if ( HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL,
+ DOMID_SELF) < 0 )
+ BUG();
+ }
#else
- tab[offset] &= ~_PAGE_RW;
+ *pte &= ~_PAGE_RW;
#endif
- }
- else
- printk("skipped %lx\n", start_address);
- start_address += page_size;
+ return 0;
+}
#ifdef CONFIG_PARAVIRT
- if ( count == L1_PAGETABLE_ENTRIES ||
- start_address + page_size > end_address )
- {
- rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
- if ( rc < 0 )
- {
- printk("ERROR: set_readonly(): PTE could not be updated\n");
- do_exit();
- }
- count = 0;
- }
+static void tlb_flush(void)
+{
+ mmuext_op_t op = { .cmd = MMUEXT_TLB_FLUSH_ALL };
+ int count;
+
+ HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
+}
#else
- if ( start_address == (1UL << L2_PAGETABLE_SHIFT) )
- page_size = 1UL << L2_PAGETABLE_SHIFT;
+static void tlb_flush(void)
+{
+ write_cr3((unsigned long)pt_base);
+}
#endif
- }
+
+static void set_readonly(void *text, void *etext)
+{
+ struct set_readonly_par setro = { .etext = (unsigned long)etext };
+ unsigned long start_address = PAGE_ALIGN((unsigned long)text);
+
+ printk("setting %p-%p readonly\n", text, etext);
+ walk_pt(start_address, setro.etext, set_readonly_func, &setro);
#ifdef CONFIG_PARAVIRT
- {
- mmuext_op_t op = {
- .cmd = MMUEXT_TLB_FLUSH_ALL,
- };
- int count;
- HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
- }
-#else
- write_cr3((unsigned long)pt_base);
+ if ( setro.count &&
+ HYPERVISOR_mmu_update(mmu_updates, setro.count, NULL, DOMID_SELF) < 0)
+ BUG();
#endif
+
+ tlb_flush();
}
/*
--
2.43.0
Juergen Gross, le mar. 13 août 2024 15:41:58 +0200, a ecrit: > + if ( ro->count == L1_PAGETABLE_ENTRIES ) > + { > + ro->count = 0; > + if ( HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL, > + DOMID_SELF) < 0 ) You need to set ro->count *after* calling mmu_update. Apart from this, Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org> Samuel
On 20.08.2024 13:57, Samuel Thibault wrote: > Juergen Gross, le mar. 13 août 2024 15:41:58 +0200, a ecrit: >> + if ( ro->count == L1_PAGETABLE_ENTRIES ) >> + { >> + ro->count = 0; >> + if ( HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL, >> + DOMID_SELF) < 0 ) > > You need to set ro->count *after* calling mmu_update. Happy to move the line while committing, so long as Jürgen agrees. Jan > Apart from this, > > Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org> > > Samuel >
On 20.08.24 14:53, Jan Beulich wrote: > On 20.08.2024 13:57, Samuel Thibault wrote: >> Juergen Gross, le mar. 13 août 2024 15:41:58 +0200, a ecrit: >>> + if ( ro->count == L1_PAGETABLE_ENTRIES ) >>> + { >>> + ro->count = 0; >>> + if ( HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL, >>> + DOMID_SELF) < 0 ) >> >> You need to set ro->count *after* calling mmu_update. > > Happy to move the line while committing, so long as Jürgen agrees. Oh yes, of course I do. Thanks, Juergen
© 2016 - 2024 Red Hat, Inc.