To better describe the underlying implementation.  Define
cache_flush_permitted() as an alias of has_arch_io_resources(), so that
current users of cache_flush_permitted() are not effectively modified.
With the introduction of the new handler, change some of the call sites of
cache_flush_permitted() to instead use has_arch_io_resources() as such
callers are not after whether cache flush is enabled, but rather whether
the domain has any IO resources assigned.
Take the opportunity to adjust l1_disallow_mask() to use the newly
introduced has_arch_io_resources() macro.
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/include/asm/iocap.h | 4 +++-
 xen/arch/x86/mm.c                | 3 +--
 xen/arch/x86/mm/p2m-pod.c        | 4 ++--
 xen/common/memory.c              | 2 +-
 xen/include/asm-generic/iocap.h  | 4 +++-
 5 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/xen/arch/x86/include/asm/iocap.h b/xen/arch/x86/include/asm/iocap.h
index 53d87ae8a334..61d026dbf5f6 100644
--- a/xen/arch/x86/include/asm/iocap.h
+++ b/xen/arch/x86/include/asm/iocap.h
@@ -15,10 +15,12 @@
 #define ioports_access_permitted(d, s, e)               \
     rangeset_contains_range((d)->arch.ioport_caps, s, e)
 
-#define cache_flush_permitted(d)                        \
+#define has_arch_io_resources(d)                        \
     (!rangeset_is_empty((d)->iomem_caps) ||             \
      !rangeset_is_empty((d)->arch.ioport_caps))
 
+#define cache_flush_permitted has_arch_io_resources
+
 static inline int ioports_permit_access(struct domain *d, unsigned long s,
                                         unsigned long e)
 {
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 38e214352201..59b60b1e62a7 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock);
 
 #define l1_disallow_mask(d)                                     \
     (((d) != dom_io) &&                                         \
-     (rangeset_is_empty((d)->iomem_caps) &&                     \
-      rangeset_is_empty((d)->arch.ioport_caps) &&               \
+     (!has_arch_io_resources(d) &&                              \
       !has_arch_pdevs(d) &&                                     \
       is_pv_domain(d)) ?                                        \
      L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index df2a1cc0749b..05633fe2ac88 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -338,7 +338,7 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target)
 
     ASSERT( pod_target >= p2m->pod.count );
 
-    if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
+    if ( has_arch_pdevs(d) || has_arch_io_resources(d) )
         ret = -ENOTEMPTY;
     else
         ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
@@ -1395,7 +1395,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
     if ( !paging_mode_translate(d) )
         return -EINVAL;
 
-    if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
+    if ( has_arch_pdevs(d) || has_arch_io_resources(d) )
         return -ENOTEMPTY;
 
     do {
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 8ca4e1a8425b..46620ed8253d 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -86,7 +86,7 @@ static unsigned int max_order(const struct domain *d)
     unsigned int order = domu_max_order;
 
 #ifdef CONFIG_HAS_PASSTHROUGH
-    if ( cache_flush_permitted(d) && order < ptdom_max_order )
+    if ( has_arch_io_resources(d) && order < ptdom_max_order )
         order = ptdom_max_order;
 #endif
 
diff --git a/xen/include/asm-generic/iocap.h b/xen/include/asm-generic/iocap.h
index dd7cb45488f7..664bbc8971fe 100644
--- a/xen/include/asm-generic/iocap.h
+++ b/xen/include/asm-generic/iocap.h
@@ -2,9 +2,11 @@
 #ifndef __ASM_GENERIC_IOCAP_H__
 #define __ASM_GENERIC_IOCAP_H__
 
-#define cache_flush_permitted(d)                        \
+#define has_arch_io_resources(d)                        \
     (!rangeset_is_empty((d)->iomem_caps))
 
+#define cache_flush_permitted has_arch_io_resources
+
 #endif /* __ASM_GENERIC_IOCAP_H__ */
 
 /*
-- 
2.48.1
On 06.05.2025 10:31, Roger Pau Monne wrote: > To better describe the underlying implementation. Define > cache_flush_permitted() as an alias of has_arch_io_resources(), so that > current users of cache_flush_permitted() are not effectively modified. > > With the introduction of the new handler, change some of the call sites of > cache_flush_permitted() to instead use has_arch_io_resources() as such > callers are not after whether cache flush is enabled, but rather whether > the domain has any IO resources assigned. > > Take the opportunity to adjust l1_disallow_mask() to use the newly > introduced has_arch_io_resources() macro. While I'm happy with everything else here, to me it's at least on the edge whether cache_flush_permitted() wouldn't be the better predicate to use there, for this being about ... > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); > > #define l1_disallow_mask(d) \ > (((d) != dom_io) && \ > - (rangeset_is_empty((d)->iomem_caps) && \ > - rangeset_is_empty((d)->arch.ioport_caps) && \ > + (!has_arch_io_resources(d) && \ > !has_arch_pdevs(d) && \ > is_pv_domain(d)) ? \ > L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) ... cachability, which goes hand in hand with the ability to also flush cache contents. Tangentially - is it plausible for has_arch_io_resources() to return false when has_arch_pdevs() returns true? Perhaps there are exotic PCI devices (but non-bridges) which work with no BARs at all ... Jan
On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: > On 06.05.2025 10:31, Roger Pau Monne wrote: > > To better describe the underlying implementation. Define > > cache_flush_permitted() as an alias of has_arch_io_resources(), so that > > current users of cache_flush_permitted() are not effectively modified. > > > > With the introduction of the new handler, change some of the call sites of > > cache_flush_permitted() to instead use has_arch_io_resources() as such > > callers are not after whether cache flush is enabled, but rather whether > > the domain has any IO resources assigned. > > > > Take the opportunity to adjust l1_disallow_mask() to use the newly > > introduced has_arch_io_resources() macro. > > While I'm happy with everything else here, to me it's at least on the > edge whether cache_flush_permitted() wouldn't be the better predicate > to use there, for this being about ... > > > --- a/xen/arch/x86/mm.c > > +++ b/xen/arch/x86/mm.c > > @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); > > > > #define l1_disallow_mask(d) \ > > (((d) != dom_io) && \ > > - (rangeset_is_empty((d)->iomem_caps) && \ > > - rangeset_is_empty((d)->arch.ioport_caps) && \ > > + (!has_arch_io_resources(d) && \ > > !has_arch_pdevs(d) && \ > > is_pv_domain(d)) ? \ > > L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) > > ... cachability, which goes hand in hand with the ability to also > flush cache contents. Hm, I was on the edge here, in fact I've previously coded this using cache_flush_permitted(), just to the change back to has_arch_io_resources(). If you think cache_flush_permitted() is better I'm fine with that. > Tangentially - is it plausible for has_arch_io_resources() to return > false when has_arch_pdevs() returns true? Perhaps there are exotic > PCI devices (but non-bridges) which work with no BARs at all ... I guess it's technically possible, albeit very unlikely? How would the OS interact with such device then, exclusively with PCI config space accesses? I'm happy to just use cache_flush_permitted() which is likely more correct given the context here. Thanks, Roger.
On 15.05.2025 12:28, Roger Pau Monné wrote: > On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: >> On 06.05.2025 10:31, Roger Pau Monne wrote: >>> To better describe the underlying implementation. Define >>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that >>> current users of cache_flush_permitted() are not effectively modified. >>> >>> With the introduction of the new handler, change some of the call sites of >>> cache_flush_permitted() to instead use has_arch_io_resources() as such >>> callers are not after whether cache flush is enabled, but rather whether >>> the domain has any IO resources assigned. >>> >>> Take the opportunity to adjust l1_disallow_mask() to use the newly >>> introduced has_arch_io_resources() macro. >> >> While I'm happy with everything else here, to me it's at least on the >> edge whether cache_flush_permitted() wouldn't be the better predicate >> to use there, for this being about ... >> >>> --- a/xen/arch/x86/mm.c >>> +++ b/xen/arch/x86/mm.c >>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); >>> >>> #define l1_disallow_mask(d) \ >>> (((d) != dom_io) && \ >>> - (rangeset_is_empty((d)->iomem_caps) && \ >>> - rangeset_is_empty((d)->arch.ioport_caps) && \ >>> + (!has_arch_io_resources(d) && \ >>> !has_arch_pdevs(d) && \ >>> is_pv_domain(d)) ? \ >>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) >> >> ... cachability, which goes hand in hand with the ability to also >> flush cache contents. > > Hm, I was on the edge here, in fact I've previously coded this using > cache_flush_permitted(), just to the change back to > has_arch_io_resources(). If you think cache_flush_permitted() is > better I'm fine with that. I think that would be better here, yet as you say - it's not entirely clear cut either way. >> Tangentially - is it plausible for has_arch_io_resources() to return >> false when has_arch_pdevs() returns true? Perhaps there are exotic >> PCI devices (but non-bridges) which work with no BARs at all ... > > I guess it's technically possible, albeit very unlikely? How would > the OS interact with such device then, exclusively with PCI config > space accesses? Yes, that's what I'd expect for such devices. Looking around, there are numerous such devices (leaving aside bridges). Just that it looks implausible to me that one would want to pass those through to a guest. Jan
On Fri, May 16, 2025 at 09:07:43AM +0200, Jan Beulich wrote: > On 15.05.2025 12:28, Roger Pau Monné wrote: > > On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: > >> On 06.05.2025 10:31, Roger Pau Monne wrote: > >>> To better describe the underlying implementation. Define > >>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that > >>> current users of cache_flush_permitted() are not effectively modified. > >>> > >>> With the introduction of the new handler, change some of the call sites of > >>> cache_flush_permitted() to instead use has_arch_io_resources() as such > >>> callers are not after whether cache flush is enabled, but rather whether > >>> the domain has any IO resources assigned. > >>> > >>> Take the opportunity to adjust l1_disallow_mask() to use the newly > >>> introduced has_arch_io_resources() macro. > >> > >> While I'm happy with everything else here, to me it's at least on the > >> edge whether cache_flush_permitted() wouldn't be the better predicate > >> to use there, for this being about ... > >> > >>> --- a/xen/arch/x86/mm.c > >>> +++ b/xen/arch/x86/mm.c > >>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); > >>> > >>> #define l1_disallow_mask(d) \ > >>> (((d) != dom_io) && \ > >>> - (rangeset_is_empty((d)->iomem_caps) && \ > >>> - rangeset_is_empty((d)->arch.ioport_caps) && \ > >>> + (!has_arch_io_resources(d) && \ > >>> !has_arch_pdevs(d) && \ > >>> is_pv_domain(d)) ? \ > >>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) > >> > >> ... cachability, which goes hand in hand with the ability to also > >> flush cache contents. > > > > Hm, I was on the edge here, in fact I've previously coded this using > > cache_flush_permitted(), just to the change back to > > has_arch_io_resources(). If you think cache_flush_permitted() is > > better I'm fine with that. > > I think that would be better here, yet as you say - it's not entirely > clear cut either way. I've reverted this chunk of the change and left the code as-is for the time being. > >> Tangentially - is it plausible for has_arch_io_resources() to return > >> false when has_arch_pdevs() returns true? Perhaps there are exotic > >> PCI devices (but non-bridges) which work with no BARs at all ... > > > > I guess it's technically possible, albeit very unlikely? How would > > the OS interact with such device then, exclusively with PCI config > > space accesses? > > Yes, that's what I'd expect for such devices. Looking around, there > are numerous such devices (leaving aside bridges). Just that it looks > implausible to me that one would want to pass those through to a guest. Well, we also need to consider dom0 here (either PV or PVH), which will get those devices passed through. I assume those are mostly system devices, and hence there's usually no interaction of the OS with them. I'm thinking that our definition of cache_flush_permitted() is not fully accurate then, we would need to also account for any PCI devices being assigned to the guest, even if those have no IO resources? Thanks, Roger.
On 16.05.2025 10:02, Roger Pau Monné wrote: > On Fri, May 16, 2025 at 09:07:43AM +0200, Jan Beulich wrote: >> On 15.05.2025 12:28, Roger Pau Monné wrote: >>> On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: >>>> On 06.05.2025 10:31, Roger Pau Monne wrote: >>>>> To better describe the underlying implementation. Define >>>>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that >>>>> current users of cache_flush_permitted() are not effectively modified. >>>>> >>>>> With the introduction of the new handler, change some of the call sites of >>>>> cache_flush_permitted() to instead use has_arch_io_resources() as such >>>>> callers are not after whether cache flush is enabled, but rather whether >>>>> the domain has any IO resources assigned. >>>>> >>>>> Take the opportunity to adjust l1_disallow_mask() to use the newly >>>>> introduced has_arch_io_resources() macro. >>>> >>>> While I'm happy with everything else here, to me it's at least on the >>>> edge whether cache_flush_permitted() wouldn't be the better predicate >>>> to use there, for this being about ... >>>> >>>>> --- a/xen/arch/x86/mm.c >>>>> +++ b/xen/arch/x86/mm.c >>>>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); >>>>> >>>>> #define l1_disallow_mask(d) \ >>>>> (((d) != dom_io) && \ >>>>> - (rangeset_is_empty((d)->iomem_caps) && \ >>>>> - rangeset_is_empty((d)->arch.ioport_caps) && \ >>>>> + (!has_arch_io_resources(d) && \ >>>>> !has_arch_pdevs(d) && \ >>>>> is_pv_domain(d)) ? \ >>>>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) >>>> >>>> ... cachability, which goes hand in hand with the ability to also >>>> flush cache contents. >>> >>> Hm, I was on the edge here, in fact I've previously coded this using >>> cache_flush_permitted(), just to the change back to >>> has_arch_io_resources(). If you think cache_flush_permitted() is >>> better I'm fine with that. >> >> I think that would be better here, yet as you say - it's not entirely >> clear cut either way. > > I've reverted this chunk of the change and left the code as-is for the > time being. Didn't we agree to use cache_flush_permitted() here instead? >>>> Tangentially - is it plausible for has_arch_io_resources() to return >>>> false when has_arch_pdevs() returns true? Perhaps there are exotic >>>> PCI devices (but non-bridges) which work with no BARs at all ... >>> >>> I guess it's technically possible, albeit very unlikely? How would >>> the OS interact with such device then, exclusively with PCI config >>> space accesses? >> >> Yes, that's what I'd expect for such devices. Looking around, there >> are numerous such devices (leaving aside bridges). Just that it looks >> implausible to me that one would want to pass those through to a guest. > > Well, we also need to consider dom0 here (either PV or PVH), which > will get those devices passed through. I assume those are mostly > system devices, and hence there's usually no interaction of the OS > with them. > > I'm thinking that our definition of cache_flush_permitted() is not > fully accurate then, we would need to also account for any PCI devices > being assigned to the guest, even if those have no IO resources? I think so, yes. Jan
On Fri, May 16, 2025 at 10:08:35AM +0200, Jan Beulich wrote: > On 16.05.2025 10:02, Roger Pau Monné wrote: > > On Fri, May 16, 2025 at 09:07:43AM +0200, Jan Beulich wrote: > >> On 15.05.2025 12:28, Roger Pau Monné wrote: > >>> On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: > >>>> On 06.05.2025 10:31, Roger Pau Monne wrote: > >>>>> To better describe the underlying implementation. Define > >>>>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that > >>>>> current users of cache_flush_permitted() are not effectively modified. > >>>>> > >>>>> With the introduction of the new handler, change some of the call sites of > >>>>> cache_flush_permitted() to instead use has_arch_io_resources() as such > >>>>> callers are not after whether cache flush is enabled, but rather whether > >>>>> the domain has any IO resources assigned. > >>>>> > >>>>> Take the opportunity to adjust l1_disallow_mask() to use the newly > >>>>> introduced has_arch_io_resources() macro. > >>>> > >>>> While I'm happy with everything else here, to me it's at least on the > >>>> edge whether cache_flush_permitted() wouldn't be the better predicate > >>>> to use there, for this being about ... > >>>> > >>>>> --- a/xen/arch/x86/mm.c > >>>>> +++ b/xen/arch/x86/mm.c > >>>>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); > >>>>> > >>>>> #define l1_disallow_mask(d) \ > >>>>> (((d) != dom_io) && \ > >>>>> - (rangeset_is_empty((d)->iomem_caps) && \ > >>>>> - rangeset_is_empty((d)->arch.ioport_caps) && \ > >>>>> + (!has_arch_io_resources(d) && \ > >>>>> !has_arch_pdevs(d) && \ > >>>>> is_pv_domain(d)) ? \ > >>>>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) > >>>> > >>>> ... cachability, which goes hand in hand with the ability to also > >>>> flush cache contents. > >>> > >>> Hm, I was on the edge here, in fact I've previously coded this using > >>> cache_flush_permitted(), just to the change back to > >>> has_arch_io_resources(). If you think cache_flush_permitted() is > >>> better I'm fine with that. > >> > >> I think that would be better here, yet as you say - it's not entirely > >> clear cut either way. > > > > I've reverted this chunk of the change and left the code as-is for the > > time being. > > Didn't we agree to use cache_flush_permitted() here instead? I think it would be a bit weird, if we want this to be a non-functional change we would need to keep the has_arch_pdevs() condition because cache_flush_permitted() doesn't take that into account. Or we need to adjust cache_flush_permitted() to also take has_arch_pdevs() into consideration. Thanks, Roger.
On 16.05.2025 10:27, Roger Pau Monné wrote: > On Fri, May 16, 2025 at 10:08:35AM +0200, Jan Beulich wrote: >> On 16.05.2025 10:02, Roger Pau Monné wrote: >>> On Fri, May 16, 2025 at 09:07:43AM +0200, Jan Beulich wrote: >>>> On 15.05.2025 12:28, Roger Pau Monné wrote: >>>>> On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: >>>>>> On 06.05.2025 10:31, Roger Pau Monne wrote: >>>>>>> To better describe the underlying implementation. Define >>>>>>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that >>>>>>> current users of cache_flush_permitted() are not effectively modified. >>>>>>> >>>>>>> With the introduction of the new handler, change some of the call sites of >>>>>>> cache_flush_permitted() to instead use has_arch_io_resources() as such >>>>>>> callers are not after whether cache flush is enabled, but rather whether >>>>>>> the domain has any IO resources assigned. >>>>>>> >>>>>>> Take the opportunity to adjust l1_disallow_mask() to use the newly >>>>>>> introduced has_arch_io_resources() macro. >>>>>> >>>>>> While I'm happy with everything else here, to me it's at least on the >>>>>> edge whether cache_flush_permitted() wouldn't be the better predicate >>>>>> to use there, for this being about ... >>>>>> >>>>>>> --- a/xen/arch/x86/mm.c >>>>>>> +++ b/xen/arch/x86/mm.c >>>>>>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); >>>>>>> >>>>>>> #define l1_disallow_mask(d) \ >>>>>>> (((d) != dom_io) && \ >>>>>>> - (rangeset_is_empty((d)->iomem_caps) && \ >>>>>>> - rangeset_is_empty((d)->arch.ioport_caps) && \ >>>>>>> + (!has_arch_io_resources(d) && \ >>>>>>> !has_arch_pdevs(d) && \ >>>>>>> is_pv_domain(d)) ? \ >>>>>>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) >>>>>> >>>>>> ... cachability, which goes hand in hand with the ability to also >>>>>> flush cache contents. >>>>> >>>>> Hm, I was on the edge here, in fact I've previously coded this using >>>>> cache_flush_permitted(), just to the change back to >>>>> has_arch_io_resources(). If you think cache_flush_permitted() is >>>>> better I'm fine with that. >>>> >>>> I think that would be better here, yet as you say - it's not entirely >>>> clear cut either way. >>> >>> I've reverted this chunk of the change and left the code as-is for the >>> time being. >> >> Didn't we agree to use cache_flush_permitted() here instead? > > I think it would be a bit weird, if we want this to be a > non-functional change we would need to keep the has_arch_pdevs() > condition because cache_flush_permitted() doesn't take that into > account. Or we need to adjust cache_flush_permitted() to also take > has_arch_pdevs() into consideration. Which is what you suggested elsewhere, or did I misunderstand that? Jan
On Fri, May 16, 2025 at 10:36:19AM +0200, Jan Beulich wrote: > On 16.05.2025 10:27, Roger Pau Monné wrote: > > On Fri, May 16, 2025 at 10:08:35AM +0200, Jan Beulich wrote: > >> On 16.05.2025 10:02, Roger Pau Monné wrote: > >>> On Fri, May 16, 2025 at 09:07:43AM +0200, Jan Beulich wrote: > >>>> On 15.05.2025 12:28, Roger Pau Monné wrote: > >>>>> On Mon, May 12, 2025 at 05:16:02PM +0200, Jan Beulich wrote: > >>>>>> On 06.05.2025 10:31, Roger Pau Monne wrote: > >>>>>>> To better describe the underlying implementation. Define > >>>>>>> cache_flush_permitted() as an alias of has_arch_io_resources(), so that > >>>>>>> current users of cache_flush_permitted() are not effectively modified. > >>>>>>> > >>>>>>> With the introduction of the new handler, change some of the call sites of > >>>>>>> cache_flush_permitted() to instead use has_arch_io_resources() as such > >>>>>>> callers are not after whether cache flush is enabled, but rather whether > >>>>>>> the domain has any IO resources assigned. > >>>>>>> > >>>>>>> Take the opportunity to adjust l1_disallow_mask() to use the newly > >>>>>>> introduced has_arch_io_resources() macro. > >>>>>> > >>>>>> While I'm happy with everything else here, to me it's at least on the > >>>>>> edge whether cache_flush_permitted() wouldn't be the better predicate > >>>>>> to use there, for this being about ... > >>>>>> > >>>>>>> --- a/xen/arch/x86/mm.c > >>>>>>> +++ b/xen/arch/x86/mm.c > >>>>>>> @@ -172,8 +172,7 @@ static DEFINE_SPINLOCK(subpage_ro_lock); > >>>>>>> > >>>>>>> #define l1_disallow_mask(d) \ > >>>>>>> (((d) != dom_io) && \ > >>>>>>> - (rangeset_is_empty((d)->iomem_caps) && \ > >>>>>>> - rangeset_is_empty((d)->arch.ioport_caps) && \ > >>>>>>> + (!has_arch_io_resources(d) && \ > >>>>>>> !has_arch_pdevs(d) && \ > >>>>>>> is_pv_domain(d)) ? \ > >>>>>>> L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) > >>>>>> > >>>>>> ... cachability, which goes hand in hand with the ability to also > >>>>>> flush cache contents. > >>>>> > >>>>> Hm, I was on the edge here, in fact I've previously coded this using > >>>>> cache_flush_permitted(), just to the change back to > >>>>> has_arch_io_resources(). If you think cache_flush_permitted() is > >>>>> better I'm fine with that. > >>>> > >>>> I think that would be better here, yet as you say - it's not entirely > >>>> clear cut either way. > >>> > >>> I've reverted this chunk of the change and left the code as-is for the > >>> time being. > >> > >> Didn't we agree to use cache_flush_permitted() here instead? > > > > I think it would be a bit weird, if we want this to be a > > non-functional change we would need to keep the has_arch_pdevs() > > condition because cache_flush_permitted() doesn't take that into > > account. Or we need to adjust cache_flush_permitted() to also take > > has_arch_pdevs() into consideration. > > Which is what you suggested elsewhere, or did I misunderstand that? Yes, I missed that you agreed to that then, sorry. To many messages on the thread I'm afraid. Thanks, Roger.
© 2016 - 2025 Red Hat, Inc.