For implementers, the fast_only bool indicates that the age information
needs to be harvested such that we do not slow down other MMU operations,
and ideally that we are not ourselves slowed down by other MMU
operations. Usually this means that the implementation should be
lockless.
Also add mmu_notifier_test_young_fast_only() and
mmu_notifier_clear_young_fast_only() helpers to set fast_only for these
notifiers.
Signed-off-by: James Houghton <jthoughton@google.com>
---
include/linux/mmu_notifier.h | 61 ++++++++++++++++++++++++++++++++----
include/trace/events/kvm.h | 19 ++++++-----
mm/mmu_notifier.c | 18 ++++++++---
virt/kvm/kvm_main.c | 12 ++++---
4 files changed, 88 insertions(+), 22 deletions(-)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 37643fa43687..7c17e2871c66 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -106,21 +106,38 @@ struct mmu_notifier_ops {
* clear_young is a lightweight version of clear_flush_young. Like the
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
+ *
+ * The fast_only parameter indicates that this call should not block,
+ * and this function should not cause other MMU notifier calls to
+ * block. Usually this means that the implementation should be
+ * lockless.
+ *
+ * When called with fast_only, this notifier will be a no-op (and
+ * return that the range is NOT young), unless has_fast_aging is set
+ * on the struct mmu_notifier.
+ *
+ * When fast_only is true, if the implementer cannot determine that a
+ * range is young without blocking, it should return 0 (i.e., that
+ * the range is NOT young).
*/
int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
- unsigned long end);
+ unsigned long end,
+ bool fast_only);
/*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
+ *
+ * The fast_only parameter has the same meaning as with clear_young.
*/
int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
- unsigned long address);
+ unsigned long address,
+ bool fast_only);
/*
* invalidate_range_start() and invalidate_range_end() must be
@@ -381,9 +398,11 @@ extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long end);
extern int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
- unsigned long end);
+ unsigned long end,
+ bool fast_only);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address);
+ unsigned long address,
+ bool fast_only);
extern bool __mm_has_fast_young_notifiers(struct mm_struct *mm);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
@@ -418,7 +437,16 @@ static inline int mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long end)
{
if (mm_has_notifiers(mm))
- return __mmu_notifier_clear_young(mm, start, end);
+ return __mmu_notifier_clear_young(mm, start, end, false);
+ return 0;
+}
+
+static inline int mmu_notifier_clear_young_fast_only(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_young(mm, start, end, true);
return 0;
}
@@ -426,7 +454,15 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
- return __mmu_notifier_test_young(mm, address);
+ return __mmu_notifier_test_young(mm, address, false);
+ return 0;
+}
+
+static inline int mmu_notifier_test_young_fast_only(struct mm_struct *mm,
+ unsigned long address)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_test_young(mm, address, true);
return 0;
}
@@ -622,12 +658,25 @@ static inline int mmu_notifier_clear_young(struct mm_struct *mm,
return 0;
}
+static inline int mmu_notifier_clear_young_fast_only(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
return 0;
}
+static inline int mmu_notifier_test_young_fast_only(struct mm_struct *mm,
+ unsigned long address)
+{
+ return 0;
+}
+
static inline bool mm_has_fast_young_notifiers(struct mm_struct *mm)
{
return 0;
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 74e40d5d4af4..6d9485cf3e51 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -457,36 +457,41 @@ TRACE_EVENT(kvm_unmap_hva_range,
);
TRACE_EVENT(kvm_age_hva,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
+ TP_PROTO(unsigned long start, unsigned long end, bool fast_only),
+ TP_ARGS(start, end, fast_only),
TP_STRUCT__entry(
__field( unsigned long, start )
__field( unsigned long, end )
+ __field( bool, fast_only )
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
+ __entry->fast_only = fast_only;
),
- TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
- __entry->start, __entry->end)
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx fast_only: %d",
+ __entry->start, __entry->end, __entry->fast_only)
);
TRACE_EVENT(kvm_test_age_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
+ TP_PROTO(unsigned long hva, bool fast_only),
+ TP_ARGS(hva, fast_only),
TP_STRUCT__entry(
__field( unsigned long, hva )
+ __field( bool, fast_only )
),
TP_fast_assign(
__entry->hva = hva;
+ __entry->fast_only = fast_only;
),
- TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx fast_only: %d",
+ __entry->hva, __entry->fast_only)
);
#endif /* _TRACE_KVM_MAIN_H */
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index c405e5b072cf..f9ec810c8a1b 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -404,7 +404,8 @@ bool __mm_has_fast_young_notifiers(struct mm_struct *mm)
int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool fast_only)
{
struct mmu_notifier *subscription;
int young = 0, id;
@@ -413,9 +414,13 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
hlist_for_each_entry_rcu(subscription,
&mm->notifier_subscriptions->list, hlist,
srcu_read_lock_held(&srcu)) {
+ if (fast_only && !subscription->has_fast_aging)
+ continue;
+
if (subscription->ops->clear_young)
young |= subscription->ops->clear_young(subscription,
- mm, start, end);
+ mm, start, end,
+ fast_only);
}
srcu_read_unlock(&srcu, id);
@@ -423,7 +428,8 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
}
int __mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address)
+ unsigned long address,
+ bool fast_only)
{
struct mmu_notifier *subscription;
int young = 0, id;
@@ -432,9 +438,13 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
hlist_for_each_entry_rcu(subscription,
&mm->notifier_subscriptions->list, hlist,
srcu_read_lock_held(&srcu)) {
+ if (fast_only && !subscription->has_fast_aging)
+ continue;
+
if (subscription->ops->test_young) {
young = subscription->ops->test_young(subscription, mm,
- address);
+ address,
+ fast_only);
if (young)
break;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f6c369eccd2a..ec07caaed6b6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -846,7 +846,7 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
IS_ENABLED(CONFIG_KVM_MMU_NOTIFIER_YOUNG_LOCKLESS),
};
- trace_kvm_age_hva(start, end);
+ trace_kvm_age_hva(start, end, false);
return kvm_handle_hva_range(kvm, &range).ret;
}
@@ -854,7 +854,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool fast_only)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
@@ -868,7 +869,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
IS_ENABLED(CONFIG_KVM_MMU_NOTIFIER_YOUNG_LOCKLESS),
};
- trace_kvm_age_hva(start, end);
+ trace_kvm_age_hva(start, end, fast_only);
/*
* Even though we do not flush TLB, this will still adversely
@@ -888,7 +889,8 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long address)
+ unsigned long address,
+ bool fast_only)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
@@ -902,7 +904,7 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
IS_ENABLED(CONFIG_KVM_MMU_NOTIFIER_YOUNG_LOCKLESS),
};
- trace_kvm_test_age_hva(address);
+ trace_kvm_test_age_hva(address, fast_only);
return kvm_handle_hva_range(kvm, &range).ret;
}
--
2.46.0.792.g87dc391469-goog