[PATCH v2 1/3] printk: Introduce console_flush_one_record

Andrew Murray posted 3 patches 4 months, 1 week ago
There is a newer version of this series
[PATCH v2 1/3] printk: Introduce console_flush_one_record
Posted by Andrew Murray 4 months, 1 week ago
console_flush_all prints all remaining records to all usable consoles
whilst its caller holds console_lock. This can result in large waiting
times for those waiting for console_lock especially where there is a
large volume of records or where the console is slow (e.g. serial).

Let's extract the parts of this function which print a single record
into a new function named console_flush_one_record. This can later
be used for functions that will release and reacquire console_lock
between records.

This commit should not change existing functionality.

Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Andrew Murray <amurray@thegoodpenguin.co.uk>
---
 kernel/printk/printk.c | 159 +++++++++++++++++++++++++++++++------------------
 1 file changed, 100 insertions(+), 59 deletions(-)

diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 0efbcdda9aaba9d8d877df5e4f1db002d3a596bc..060d4919de320fe21fd7aca73ba497e27c4ff334 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3161,6 +3161,100 @@ static inline void printk_kthreads_check_locked(void) { }
 
 #endif /* CONFIG_PRINTK */
 
+
+/*
+ * Print out one record for each console.
+ *
+ * @do_cond_resched is set by the caller. It can be true only in schedulable
+ * context.
+ *
+ * @next_seq is set to the sequence number after the last available record.
+ * The value is valid only when this function returns true.
+ *
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock. Otherwise it is set to false.
+ *
+ * @any_usable will be set to true if there are any usable consoles.
+ *
+ * Returns true when there was at least one usable console and a record was
+ * flushed. A returned false indicates there were no records to flush for any
+ * of the consoles. It may also indicate that there were no usable consoles,
+ * the context has been lost or there is a panic suitation. Regardless the
+ * reason, the caller should assume it is not useful to immediately try again.
+ *
+ * Requires the console_lock.
+ */
+static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
+				     bool *any_usable)
+{
+	struct console_flush_type ft;
+	struct console *con;
+	bool any_progress;
+	int cookie;
+
+	any_progress = false;
+
+	printk_get_console_flush_type(&ft);
+
+	cookie = console_srcu_read_lock();
+	for_each_console_srcu(con) {
+		short flags = console_srcu_read_flags(con);
+		u64 printk_seq;
+		bool progress;
+
+		/*
+		 * console_flush_one_record() is only responsible for
+		 * nbcon consoles when the nbcon consoles cannot print via
+		 * their atomic or threaded flushing.
+		 */
+		if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
+			continue;
+
+		if (!console_is_usable(con, flags, !do_cond_resched))
+			continue;
+		*any_usable = true;
+
+		if (flags & CON_NBCON) {
+			progress = nbcon_legacy_emit_next_record(con, handover, cookie,
+								 !do_cond_resched);
+			printk_seq = nbcon_seq_read(con);
+		} else {
+			progress = console_emit_next_record(con, handover, cookie);
+			printk_seq = con->seq;
+		}
+
+		/*
+		 * If a handover has occurred, the SRCU read lock
+		 * is already released.
+		 */
+		if (*handover)
+			return false;
+
+		/* Track the next of the highest seq flushed. */
+		if (printk_seq > *next_seq)
+			*next_seq = printk_seq;
+
+		if (!progress)
+			continue;
+		any_progress = true;
+
+		/* Allow panic_cpu to take over the consoles safely. */
+		if (other_cpu_in_panic())
+			goto abandon;
+
+		if (do_cond_resched)
+			cond_resched();
+	}
+	console_srcu_read_unlock(cookie);
+
+	return any_progress;
+
+abandon:
+	console_srcu_read_unlock(cookie);
+	return false;
+}
+
 /*
  * Print out all remaining records to all consoles.
  *
@@ -3186,77 +3280,24 @@ static inline void printk_kthreads_check_locked(void) { }
  */
 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
 {
-	struct console_flush_type ft;
 	bool any_usable = false;
-	struct console *con;
 	bool any_progress;
-	int cookie;
 
 	*next_seq = 0;
 	*handover = false;
 
 	do {
-		any_progress = false;
-
-		printk_get_console_flush_type(&ft);
-
-		cookie = console_srcu_read_lock();
-		for_each_console_srcu(con) {
-			short flags = console_srcu_read_flags(con);
-			u64 printk_seq;
-			bool progress;
+		any_progress = console_flush_one_record(do_cond_resched, next_seq, handover,
+							&any_usable);
 
-			/*
-			 * console_flush_all() is only responsible for nbcon
-			 * consoles when the nbcon consoles cannot print via
-			 * their atomic or threaded flushing.
-			 */
-			if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
-				continue;
-
-			if (!console_is_usable(con, flags, !do_cond_resched))
-				continue;
-			any_usable = true;
-
-			if (flags & CON_NBCON) {
-				progress = nbcon_legacy_emit_next_record(con, handover, cookie,
-									 !do_cond_resched);
-				printk_seq = nbcon_seq_read(con);
-			} else {
-				progress = console_emit_next_record(con, handover, cookie);
-				printk_seq = con->seq;
-			}
-
-			/*
-			 * If a handover has occurred, the SRCU read lock
-			 * is already released.
-			 */
-			if (*handover)
-				return false;
-
-			/* Track the next of the highest seq flushed. */
-			if (printk_seq > *next_seq)
-				*next_seq = printk_seq;
-
-			if (!progress)
-				continue;
-			any_progress = true;
-
-			/* Allow panic_cpu to take over the consoles safely. */
-			if (other_cpu_in_panic())
-				goto abandon;
+		if (*handover)
+			return false;
 
-			if (do_cond_resched)
-				cond_resched();
-		}
-		console_srcu_read_unlock(cookie);
+		if (other_cpu_in_panic())
+			return false;
 	} while (any_progress);
 
 	return any_usable;
-
-abandon:
-	console_srcu_read_unlock(cookie);
-	return false;
 }
 
 static void __console_flush_and_unlock(void)

-- 
2.34.1
Re: [PATCH v2 1/3] printk: Introduce console_flush_one_record
Posted by Petr Mladek 4 months ago
On Sat 2025-09-27 23:05:35, Andrew Murray wrote:
> console_flush_all prints all remaining records to all usable consoles
> whilst its caller holds console_lock. This can result in large waiting
> times for those waiting for console_lock especially where there is a
> large volume of records or where the console is slow (e.g. serial).
> 
> Let's extract the parts of this function which print a single record
> into a new function named console_flush_one_record. This can later
> be used for functions that will release and reacquire console_lock
> between records.
> 
> This commit should not change existing functionality.
> 
> Reviewed-by: Petr Mladek <pmladek@suse.com>
> Signed-off-by: Andrew Murray <amurray@thegoodpenguin.co.uk>
> ---
>  kernel/printk/printk.c | 159 +++++++++++++++++++++++++++++++------------------
>  1 file changed, 100 insertions(+), 59 deletions(-)
> 
> diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
> index 0efbcdda9aaba9d8d877df5e4f1db002d3a596bc..060d4919de320fe21fd7aca73ba497e27c4ff334 100644
> --- a/kernel/printk/printk.c
> +++ b/kernel/printk/printk.c
> @@ -3161,6 +3161,100 @@ static inline void printk_kthreads_check_locked(void) { }
>  
>  #endif /* CONFIG_PRINTK */
>  
> +
> +/*
> + * Print out one record for each console.
> + *
> + * @do_cond_resched is set by the caller. It can be true only in schedulable
> + * context.
> + *
> + * @next_seq is set to the sequence number after the last available record.
> + * The value is valid only when this function returns true.

This actually is not true. This function tries to flush one record on all usable
consoles. And @next_seq is set to the highest already emitted sequence
number. But some consoles might be (far) behind.

A more precise description would be something like:

 * @next_seq is set to the sequence number after the last available record.
 * The value is valid only when there is at least one usable console
 * (@any_usable == true) and all usable consoles were flushed
 * (function returns false && @handover == false &&
 * other_cpu_in_panic() == false)

Huh, it is complicated. This is why I suggested to change the semantic
of @any_usable in the 2nd patch. It would be cleared when the emit
was interrupted (handover or other_cpu_in_panic()).

Best Regards,
Petr

> + * @handover will be set to true if a printk waiter has taken over the
> + * console_lock, in which case the caller is no longer holding the
> + * console_lock. Otherwise it is set to false.
> + *
> + * @any_usable will be set to true if there are any usable consoles.
> + *
> + * Returns true when there was at least one usable console and a record was
> + * flushed. A returned false indicates there were no records to flush for any
> + * of the consoles. It may also indicate that there were no usable consoles,
> + * the context has been lost or there is a panic suitation. Regardless the
> + * reason, the caller should assume it is not useful to immediately try again.
> + *
> + * Requires the console_lock.
> + */
> +static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
> +				     bool *any_usable)
> +{
> +	struct console_flush_type ft;
> +	struct console *con;
> +	bool any_progress;
> +	int cookie;
> +
> +	any_progress = false;
> +
> +	printk_get_console_flush_type(&ft);
> +
> +	cookie = console_srcu_read_lock();
> +	for_each_console_srcu(con) {
> +		short flags = console_srcu_read_flags(con);
> +		u64 printk_seq;
> +		bool progress;
> +
> +		/*
> +		 * console_flush_one_record() is only responsible for
> +		 * nbcon consoles when the nbcon consoles cannot print via
> +		 * their atomic or threaded flushing.
> +		 */
> +		if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
> +			continue;
> +
> +		if (!console_is_usable(con, flags, !do_cond_resched))
> +			continue;
> +		*any_usable = true;
> +
> +		if (flags & CON_NBCON) {
> +			progress = nbcon_legacy_emit_next_record(con, handover, cookie,
> +								 !do_cond_resched);
> +			printk_seq = nbcon_seq_read(con);
> +		} else {
> +			progress = console_emit_next_record(con, handover, cookie);
> +			printk_seq = con->seq;
> +		}
> +
> +		/*
> +		 * If a handover has occurred, the SRCU read lock
> +		 * is already released.
> +		 */
> +		if (*handover)
> +			return false;
> +
> +		/* Track the next of the highest seq flushed. */
> +		if (printk_seq > *next_seq)
> +			*next_seq = printk_seq;
> +
> +		if (!progress)
> +			continue;
> +		any_progress = true;
> +
> +		/* Allow panic_cpu to take over the consoles safely. */
> +		if (other_cpu_in_panic())
> +			goto abandon;
> +
> +		if (do_cond_resched)
> +			cond_resched();
> +	}
> +	console_srcu_read_unlock(cookie);
> +
> +	return any_progress;
> +
> +abandon:
> +	console_srcu_read_unlock(cookie);
> +	return false;
> +}
> +
>  /*
>   * Print out all remaining records to all consoles.
>   *
> @@ -3186,77 +3280,24 @@ static inline void printk_kthreads_check_locked(void) { }
>   */
>  static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
>  {
> -	struct console_flush_type ft;
>  	bool any_usable = false;
> -	struct console *con;
>  	bool any_progress;
> -	int cookie;
>  
>  	*next_seq = 0;
>  	*handover = false;
>  
>  	do {
> -		any_progress = false;
> -
> -		printk_get_console_flush_type(&ft);
> -
> -		cookie = console_srcu_read_lock();
> -		for_each_console_srcu(con) {
> -			short flags = console_srcu_read_flags(con);
> -			u64 printk_seq;
> -			bool progress;
> +		any_progress = console_flush_one_record(do_cond_resched, next_seq, handover,
> +							&any_usable);
>  
> -			/*
> -			 * console_flush_all() is only responsible for nbcon
> -			 * consoles when the nbcon consoles cannot print via
> -			 * their atomic or threaded flushing.
> -			 */
> -			if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
> -				continue;
> -
> -			if (!console_is_usable(con, flags, !do_cond_resched))
> -				continue;
> -			any_usable = true;
> -
> -			if (flags & CON_NBCON) {
> -				progress = nbcon_legacy_emit_next_record(con, handover, cookie,
> -									 !do_cond_resched);
> -				printk_seq = nbcon_seq_read(con);
> -			} else {
> -				progress = console_emit_next_record(con, handover, cookie);
> -				printk_seq = con->seq;
> -			}
> -
> -			/*
> -			 * If a handover has occurred, the SRCU read lock
> -			 * is already released.
> -			 */
> -			if (*handover)
> -				return false;
> -
> -			/* Track the next of the highest seq flushed. */
> -			if (printk_seq > *next_seq)
> -				*next_seq = printk_seq;
> -
> -			if (!progress)
> -				continue;
> -			any_progress = true;
> -
> -			/* Allow panic_cpu to take over the consoles safely. */
> -			if (other_cpu_in_panic())
> -				goto abandon;
> +		if (*handover)
> +			return false;
>  
> -			if (do_cond_resched)
> -				cond_resched();
> -		}
> -		console_srcu_read_unlock(cookie);
> +		if (other_cpu_in_panic())
> +			return false;
>  	} while (any_progress);
>  
>  	return any_usable;
> -
> -abandon:
> -	console_srcu_read_unlock(cookie);
> -	return false;
>  }
>  
>  static void __console_flush_and_unlock(void)
> 
> -- 
> 2.34.1
Re: [PATCH v2 1/3] printk: Introduce console_flush_one_record
Posted by John Ogness 4 months, 1 week ago
On 2025-09-27, Andrew Murray <amurray@thegoodpenguin.co.uk> wrote:
> diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
> index 0efbcdda9aaba9d8d877df5e4f1db002d3a596bc..060d4919de320fe21fd7aca73ba497e27c4ff334 100644
> --- a/kernel/printk/printk.c
> +++ b/kernel/printk/printk.c
> @@ -3161,6 +3161,100 @@ static inline void printk_kthreads_check_locked(void) { }
>  
>  #endif /* CONFIG_PRINTK */
>  
> +
> +/*
> + * Print out one record for each console.
> + *
> + * @do_cond_resched is set by the caller. It can be true only in schedulable
> + * context.
> + *
> + * @next_seq is set to the sequence number after the last available record.
> + * The value is valid only when this function returns true.
> + *
> + * @handover will be set to true if a printk waiter has taken over the
> + * console_lock, in which case the caller is no longer holding the
> + * console_lock. Otherwise it is set to false.
> + *
> + * @any_usable will be set to true if there are any usable consoles.
> + *
> + * Returns true when there was at least one usable console and a record was
> + * flushed. A returned false indicates there were no records to flush for any
> + * of the consoles. It may also indicate that there were no usable consoles,
> + * the context has been lost or there is a panic suitation. Regardless the
> + * reason, the caller should assume it is not useful to immediately try again.
> + *
> + * Requires the console_lock.
> + */
> +static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
> +				     bool *any_usable)
> +{
> +	struct console_flush_type ft;
> +	struct console *con;
> +	bool any_progress;
> +	int cookie;
> +
> +	any_progress = false;

I would let this be a definition initializer. And then place it sorted
by length:

	struct console_flush_type ft;
	bool any_progress = false;
	struct console *con;
	int cookie;

John
Re: [PATCH v2 1/3] printk: Introduce console_flush_one_record
Posted by Andrew Murray 4 months, 1 week ago
On Tue, 30 Sept 2025 at 13:54, John Ogness <john.ogness@linutronix.de> wrote:
>
> On 2025-09-27, Andrew Murray <amurray@thegoodpenguin.co.uk> wrote:
> > diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
> > index 0efbcdda9aaba9d8d877df5e4f1db002d3a596bc..060d4919de320fe21fd7aca73ba497e27c4ff334 100644
> > --- a/kernel/printk/printk.c
> > +++ b/kernel/printk/printk.c
> > @@ -3161,6 +3161,100 @@ static inline void printk_kthreads_check_locked(void) { }
> >
> >  #endif /* CONFIG_PRINTK */
> >
> > +
> > +/*
> > + * Print out one record for each console.
> > + *
> > + * @do_cond_resched is set by the caller. It can be true only in schedulable
> > + * context.
> > + *
> > + * @next_seq is set to the sequence number after the last available record.
> > + * The value is valid only when this function returns true.
> > + *
> > + * @handover will be set to true if a printk waiter has taken over the
> > + * console_lock, in which case the caller is no longer holding the
> > + * console_lock. Otherwise it is set to false.
> > + *
> > + * @any_usable will be set to true if there are any usable consoles.
> > + *
> > + * Returns true when there was at least one usable console and a record was
> > + * flushed. A returned false indicates there were no records to flush for any
> > + * of the consoles. It may also indicate that there were no usable consoles,
> > + * the context has been lost or there is a panic suitation. Regardless the
> > + * reason, the caller should assume it is not useful to immediately try again.
> > + *
> > + * Requires the console_lock.
> > + */
> > +static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
> > +                                  bool *any_usable)
> > +{
> > +     struct console_flush_type ft;
> > +     struct console *con;
> > +     bool any_progress;
> > +     int cookie;
> > +
> > +     any_progress = false;
>
> I would let this be a definition initializer. And then place it sorted
> by length:
>
>         struct console_flush_type ft;
>         bool any_progress = false;
>         struct console *con;
>         int cookie;
>
> John

Sure, I'll update it on the next iteration.

Thanks,

Andrew Murray