xen/arch/x86/Kconfig | 23 ----------------------- xen/arch/x86/mm.c | 31 ------------------------------- xen/arch/x86/pv/descriptor-tables.c | 15 --------------- xen/arch/x86/pv/domain.c | 4 ---- xen/arch/x86/pv/mm.c | 9 --------- xen/include/asm-x86/domain.h | 6 ------ 6 files changed, 88 deletions(-)
... in accordance with the timeline layed out in the Kconfig message. There
has been no comment since it was disabled by default.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/Kconfig | 23 -----------------------
xen/arch/x86/mm.c | 31 -------------------------------
xen/arch/x86/pv/descriptor-tables.c | 15 ---------------
xen/arch/x86/pv/domain.c | 4 ----
xen/arch/x86/pv/mm.c | 9 ---------
xen/include/asm-x86/domain.h | 6 ------
6 files changed, 88 deletions(-)
diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index 8149362bde..a69be983d6 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -225,26 +225,3 @@ endmenu
source "common/Kconfig"
source "drivers/Kconfig"
-
-menu "Deprecated Functionality"
-
-config PV_LDT_PAGING
- bool "PV LDT Paging-out support"
- depends on PV
- ---help---
- For a very long time, the PV ABI has included the ability to page
- out the LDT by transitioning its mapping to not-present. This
- functionality is believed to only exist for the PV Windows XP port
- which never came to anything.
-
- The implementation contains a vCPU scalability limitation in a
- position which is prohibitively complicated to resolve. As the
- feature is believed to be unused in practice, removing the feature
- is the easiest remediation.
-
- If you discover a usecase which is broken by this option being off,
- please contact xen-devel@lists.xenproject.org urgently. Baring
- something unexpected, the code and this option will be deleted 2
- releases after Xen 4.12.
-
-endmenu
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index fb53d62abc..ee56e053e1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1251,40 +1251,9 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
*/
if ( (l1e_get_flags(l1e) & _PAGE_RW) &&
((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
- {
put_page_and_type(page);
- }
else
- {
-#ifdef CONFIG_PV_LDT_PAGING
- /* We expect this is rare so we blow the entire shadow LDT. */
- if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) ==
- PGT_seg_desc_page)) &&
- unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
- (l1e_owner == pg_owner) )
- {
- struct vcpu *v;
- cpumask_t *mask = this_cpu(scratch_cpumask);
-
- cpumask_clear(mask);
-
- for_each_vcpu ( pg_owner, v )
- {
- unsigned int cpu;
-
- if ( !pv_destroy_ldt(v) )
- continue;
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- if ( !cpumask_empty(mask) )
- flush_tlb_mask(mask);
- }
-#endif /* CONFIG_PV_LDT_PAGING */
put_page(page);
- }
}
#ifdef CONFIG_PV
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index 940804b18a..090f901b5b 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -37,14 +37,7 @@ bool pv_destroy_ldt(struct vcpu *v)
ASSERT(!in_irq());
-#ifdef CONFIG_PV_LDT_PAGING
- spin_lock(&v->arch.pv.shadow_ldt_lock);
-
- if ( v->arch.pv.shadow_ldt_mapcnt == 0 )
- goto out;
-#else
ASSERT(v == current || !vcpu_cpu_dirty(v));
-#endif
pl1e = pv_ldt_ptes(v);
@@ -62,14 +55,6 @@ bool pv_destroy_ldt(struct vcpu *v)
put_page_and_type(page);
}
-#ifdef CONFIG_PV_LDT_PAGING
- ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped);
- v->arch.pv.shadow_ldt_mapcnt = 0;
-
- out:
- spin_unlock(&v->arch.pv.shadow_ldt_lock);
-#endif
-
return mappings_dropped;
}
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 70fae43965..43da5c179f 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -243,10 +243,6 @@ int pv_vcpu_initialise(struct vcpu *v)
ASSERT(!is_idle_domain(d));
-#ifdef CONFIG_PV_LDT_PAGING
- spin_lock_init(&v->arch.pv.shadow_ldt_lock);
-#endif
-
rc = pv_create_gdt_ldt_l1tab(v);
if ( rc )
return rc;
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index 2b0dadc8da..5d4cd00941 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -123,17 +123,8 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
l1e_add_flags(gl1e, _PAGE_RW);
-#ifdef CONFIG_PV_LDT_PAGING
- spin_lock(&curr->arch.pv.shadow_ldt_lock);
-#endif
-
l1e_write(pl1e, gl1e);
-#ifdef CONFIG_PV_LDT_PAGING
- curr->arch.pv.shadow_ldt_mapcnt++;
- spin_unlock(&curr->arch.pv.shadow_ldt_lock);
-#endif
-
return true;
}
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 4192c636b1..554b8dddcc 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -520,12 +520,6 @@ struct pv_vcpu
unsigned int iopl; /* Current IOPL for this VCPU, shifted left by
* 12 to match the eflags register. */
-#ifdef CONFIG_PV_LDT_PAGING
- /* Current LDT details. */
- unsigned long shadow_ldt_mapcnt;
- spinlock_t shadow_ldt_lock;
-#endif
-
/*
* %dr7 bits the guest has set, but aren't loaded into hardware, and are
* completely emulated.
--
2.11.0
On Fri, Apr 17, 2020 at 12:34:23PM +0100, Andrew Cooper wrote: > ... in accordance with the timeline layed out in the Kconfig message. There > has been no comment since it was disabled by default. layed -> laid Code looks good to me: Reviewed-by: Wei Liu <wl@xen.org>
© 2016 - 2024 Red Hat, Inc.