Auxiliary clocks will have their vDSO data in a dedicated 'struct vdso_clock',
which needs to be synchronized independently.
Add a helper to synchronize a single vDSO clock.
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
---
include/vdso/helpers.h | 40 +++++++++++++++++++++++++++-------------
1 file changed, 27 insertions(+), 13 deletions(-)
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 0a98fed550ba66a84a620fbbd6aee3e3029b4772..a5679f5efdfdcaaf6efd5f4a317d1f132c3dc617 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -28,32 +28,46 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
return seq != start;
}
-static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc, bool last)
{
- struct vdso_clock *vc = vd->clock_data;
-
/*
* WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
+ * updates to vc->seq and it is possible that the value seen by the
* reader is inconsistent.
*/
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
- smp_wmb();
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+
+ if (last)
+ smp_wmb();
}
-static __always_inline void vdso_write_end(struct vdso_time_data *vd)
+static __always_inline void vdso_write_end_clock(struct vdso_clock *vc, bool first)
{
- struct vdso_clock *vc = vd->clock_data;
+ if (first)
+ smp_wmb();
- smp_wmb();
/*
* WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
+ * updates to vc->seq and it is possible that the value seen by the
* reader is inconsistent.
*/
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+
+static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+{
+ struct vdso_clock *vc = vd->clock_data;
+
+ vdso_write_begin_clock(&vc[CS_HRES_COARSE], false);
+ vdso_write_begin_clock(&vc[CS_RAW], true);
+}
+
+static __always_inline void vdso_write_end(struct vdso_time_data *vd)
+{
+ struct vdso_clock *vc = vd->clock_data;
+
+ vdso_write_end_clock(&vc[CS_HRES_COARSE], true);
+ vdso_write_end_clock(&vc[CS_RAW], false);
}
#endif /* !__ASSEMBLY__ */
--
2.50.0
The following commit has been merged into the timers/ptp branch of tip:
Commit-ID: ad64d71d7409a0602b50ee71c7f9663a3385c286
Gitweb: https://git.kernel.org/tip/ad64d71d7409a0602b50ee71c7f9663a3385c286
Author: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
AuthorDate: Tue, 01 Jul 2025 10:57:58 +02:00
Committer: Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Wed, 09 Jul 2025 11:52:34 +02:00
vdso/helpers: Add helpers for seqlocks of single vdso_clock
Auxiliary clocks will have their vDSO data in a dedicated 'struct vdso_clock',
which needs to be synchronized independently.
Add a helper to synchronize a single vDSO clock.
[ tglx: Move the SMP memory barriers to the call sites and get rid of the
confusing first/last arguments and conditional barriers ]
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250701-vdso-auxclock-v1-4-df7d9f87b9b8@linutronix.de
---
include/vdso/helpers.h | 50 +++++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 12 deletions(-)
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 0a98fed..1a5ee9d 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -28,17 +28,47 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
return seq != start;
}
-static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
{
- struct vdso_clock *vc = vd->clock_data;
+ /*
+ * WRITE_ONCE() is required otherwise the compiler can validly tear
+ * updates to vc->seq and it is possible that the value seen by the
+ * reader is inconsistent.
+ */
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+static __always_inline void vdso_write_seq_end(struct vdso_clock *vc)
+{
/*
* WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
+ * updates to vc->seq and it is possible that the value seen by the
* reader is inconsistent.
*/
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+
+static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc)
+{
+ vdso_write_seq_begin(vc);
+ /* Ensure the sequence invalidation is visible before data is modified */
+ smp_wmb();
+}
+
+static __always_inline void vdso_write_end_clock(struct vdso_clock *vc)
+{
+ /* Ensure the data update is visible before the sequence is set valid again */
+ smp_wmb();
+ vdso_write_seq_end(vc);
+}
+
+static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+{
+ struct vdso_clock *vc = vd->clock_data;
+
+ vdso_write_seq_begin(&vc[CS_HRES_COARSE]);
+ vdso_write_seq_begin(&vc[CS_RAW]);
+ /* Ensure the sequence invalidation is visible before data is modified */
smp_wmb();
}
@@ -46,14 +76,10 @@ static __always_inline void vdso_write_end(struct vdso_time_data *vd)
{
struct vdso_clock *vc = vd->clock_data;
+ /* Ensure the data update is visible before the sequence is set valid again */
smp_wmb();
- /*
- * WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
- * reader is inconsistent.
- */
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+ vdso_write_seq_end(&vc[CS_HRES_COARSE]);
+ vdso_write_seq_end(&vc[CS_RAW]);
}
#endif /* !__ASSEMBLY__ */
The following commit has been merged into the timers/ptp branch of tip:
Commit-ID: 9916785ef2ce464edd83fce80eaa11fab7792547
Gitweb: https://git.kernel.org/tip/9916785ef2ce464edd83fce80eaa11fab7792547
Author: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
AuthorDate: Tue, 01 Jul 2025 10:57:58 +02:00
Committer: Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Mon, 07 Jul 2025 08:58:51 +02:00
vdso/helpers: Add helpers for seqlocks of single vdso_clock
Auxiliary clocks will have their vDSO data in a dedicated 'struct vdso_clock',
which needs to be synchronized independently.
Add a helper to synchronize a single vDSO clock.
[ tglx: Move the SMP memory barriers to the call sites and get rid of the
confusing first/last arguments and conditional barriers ]
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250701-vdso-auxclock-v1-4-df7d9f87b9b8@linutronix.de
---
include/vdso/helpers.h | 50 +++++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 12 deletions(-)
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 0a98fed..1a5ee9d 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -28,17 +28,47 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
return seq != start;
}
-static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
{
- struct vdso_clock *vc = vd->clock_data;
+ /*
+ * WRITE_ONCE() is required otherwise the compiler can validly tear
+ * updates to vc->seq and it is possible that the value seen by the
+ * reader is inconsistent.
+ */
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+static __always_inline void vdso_write_seq_end(struct vdso_clock *vc)
+{
/*
* WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
+ * updates to vc->seq and it is possible that the value seen by the
* reader is inconsistent.
*/
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+ WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+
+static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc)
+{
+ vdso_write_seq_begin(vc);
+ /* Ensure the sequence invalidation is visible before data is modified */
+ smp_wmb();
+}
+
+static __always_inline void vdso_write_end_clock(struct vdso_clock *vc)
+{
+ /* Ensure the data update is visible before the sequence is set valid again */
+ smp_wmb();
+ vdso_write_seq_end(vc);
+}
+
+static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+{
+ struct vdso_clock *vc = vd->clock_data;
+
+ vdso_write_seq_begin(&vc[CS_HRES_COARSE]);
+ vdso_write_seq_begin(&vc[CS_RAW]);
+ /* Ensure the sequence invalidation is visible before data is modified */
smp_wmb();
}
@@ -46,14 +76,10 @@ static __always_inline void vdso_write_end(struct vdso_time_data *vd)
{
struct vdso_clock *vc = vd->clock_data;
+ /* Ensure the data update is visible before the sequence is set valid again */
smp_wmb();
- /*
- * WRITE_ONCE() is required otherwise the compiler can validly tear
- * updates to vd[x].seq and it is possible that the value seen by the
- * reader is inconsistent.
- */
- WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
- WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+ vdso_write_seq_end(&vc[CS_HRES_COARSE]);
+ vdso_write_seq_end(&vc[CS_RAW]);
}
#endif /* !__ASSEMBLY__ */
© 2016 - 2025 Red Hat, Inc.