drivers/char/random.c | 74 +++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 30 deletions(-)
Fix various style issues reported by checkpatch.pl:
- Fix indentation and alignment in conditional statements
- Remove multiple blank lines
- Add missing braces to if statements
- Fix include path from asm/ to linux/
- Replace BUG_ON with WARN_ON_ONCE where appropriate
- Add comments to spinlock_t definitions
- Use sizeof(*ptr) instead of sizeof(struct name)
- Fix const qualifier for ctl_table
This patch addresses all checkpatch.pl errors and warnings
while maintaining code functionality and readability.
Signed-off-by: Osama Abdelkader <osama.abdelkader@gmail.com>
---
drivers/char/random.c | 74 +++++++++++++++++++++++++------------------
1 file changed, 44 insertions(+), 30 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b8b24b6ed3fe..94c1168fd75e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -62,10 +62,10 @@
#include <vdso/vsyscall.h>
#endif
#include <asm/archrandom.h>
-#include <asm/processor.h>
+#include <linux/processor.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
-#include <asm/io.h>
+#include <linux/io.h>
/*********************************************************************
*
@@ -163,16 +163,19 @@ int __cold execute_with_initialized_rng(struct notifier_block *nb)
if (crng_ready())
nb->notifier_call(nb, 0, NULL);
else
- ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
+ ret = raw_notifier_chain_register((struct raw_notifier_head *)
+ &random_ready_notifier.head,
+ nb);
spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
return ret;
}
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
- __func__, (void *)_RET_IP_, crng_init)
-
+ do { \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred("random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init); \
+ } while (0)
/*********************************************************************
*
@@ -211,7 +214,7 @@ enum {
static struct {
u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
unsigned long generation;
- spinlock_t lock;
+ spinlock_t lock; /* Protects base_crng state */
} base_crng = {
.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
};
@@ -238,11 +241,12 @@ static unsigned int crng_reseed_interval(void)
if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
+
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
+ (unsigned int)uptime / 2 * HZ);
}
return CRNG_RESEED_INTERVAL;
}
@@ -318,8 +322,9 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
{
u8 first_block[CHACHA_BLOCK_SIZE];
- BUG_ON(random_data_len > 32);
-
+ WARN_ON_ONCE(random_data_len > 32);
+ if (random_data_len > 32)
+ return;
chacha_init_consts(chacha_state);
memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
@@ -341,8 +346,9 @@ static void crng_make_state(struct chacha_state *chacha_state,
unsigned long flags;
struct crng *crng;
- BUG_ON(random_data_len > 32);
-
+ WARN_ON_ONCE(random_data_len > 32);
+ if (random_data_len > 32)
+ return;
/*
* For the fast path, we check whether we're ready, unlocked first, and
* then re-check once locked later. In the case where we're really not
@@ -582,6 +588,7 @@ u32 __get_random_u32_below(u32 ceil)
mult = (u64)ceil * rand;
if (unlikely((u32)mult < ceil)) {
u32 bound = -ceil % ceil;
+
while (unlikely((u32)mult < bound))
mult = (u64)ceil * get_random_u32();
}
@@ -610,7 +617,6 @@ int __cold random_prepare_cpu(unsigned int cpu)
}
#endif
-
/**********************************************************************
*
* Entropy accumulation and extraction routines.
@@ -637,7 +643,7 @@ enum {
static struct {
struct blake2s_state hash;
- spinlock_t lock;
+ spinlock_t lock; /* Protects input_pool state */
unsigned int init_bits;
} input_pool = {
.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
@@ -720,7 +726,11 @@ static void extract_entropy(void *buf, size_t len)
memzero_explicit(&block, sizeof(block));
}
-#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#define credit_init_bits(bits) \
+ do { \
+ if (!crng_ready()) \
+ _credit_init_bits(bits); \
+ } while (0)
static void __cold _credit_init_bits(size_t bits)
{
@@ -764,7 +774,6 @@ static void __cold _credit_init_bits(size_t bits)
}
}
-
/**********************************************************************
*
* Entropy collection routines.
@@ -773,7 +782,8 @@ static void __cold _credit_init_bits(size_t bits)
* the above entropy accumulation routines:
*
* void add_device_randomness(const void *buf, size_t len);
- * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy,
+ * bool sleep_after);
* void add_bootloader_randomness(const void *buf, size_t len);
* void add_vmfork_randomness(const void *unique_vm_id, size_t len);
* void add_interrupt_randomness(int irq);
@@ -826,6 +836,7 @@ static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
}
+
static int __init parse_trust_bootloader(char *arg)
{
return kstrtobool(arg, &trust_bootloader);
@@ -849,12 +860,15 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
_mix_pool_bytes(&entropy, sizeof(entropy));
spin_unlock_irqrestore(&input_pool.lock, flags);
- if (crng_ready() && (action == PM_RESTORE_PREPARE ||
- (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
- !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
+ if (crng_ready() &&
+ (action == PM_RESTORE_PREPARE ||
+ (action == PM_POST_SUSPEND &&
+ !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
+ !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
crng_reseed(NULL);
pr_notice("crng reseeded on system resumption\n");
}
+
return 0;
}
@@ -871,6 +885,7 @@ void __init random_init_early(const char *command_line)
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
@@ -928,8 +943,7 @@ void __init random_init(void)
WARN_ON(register_pm_notifier(&pm_notifier));
- WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
- "entropy collection will consequently suffer.");
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG entropy collection will consequently suffer.");
}
/*
@@ -999,6 +1013,7 @@ void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
}
blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
}
+
#if IS_MODULE(CONFIG_VMGENID)
EXPORT_SYMBOL_GPL(add_vmfork_randomness);
#endif
@@ -1249,7 +1264,7 @@ void __cold rand_initialize_disk(struct gendisk *disk)
* If kzalloc returns null, we just won't use that entropy
* source.
*/
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
state->last_time = INITIAL_JIFFIES;
disk->random = state;
@@ -1326,7 +1341,8 @@ static void __cold try_to_generate_entropy(void)
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
- cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER),
+ cpu_online_mask);
num_cpus = cpumask_weight(&timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
@@ -1358,7 +1374,6 @@ static void __cold try_to_generate_entropy(void)
timer_destroy_on_stack(&stack->timer);
}
-
/**********************************************************************
*
* Userspace reader/writer interfaces.
@@ -1467,9 +1482,9 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
try_to_generate_entropy();
if (!crng_ready()) {
- if (!ratelimit_disable && maxwarn <= 0)
+ if (!ratelimit_disable && maxwarn <= 0) {
ratelimit_state_inc_miss(&urandom_warning);
- else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
+ } else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
current->comm, iov_iter_count(iter));
@@ -1585,7 +1600,6 @@ const struct file_operations urandom_fops = {
.splice_write = iter_file_splice_write,
};
-
/********************************************************************
*
* Sysctl interface.
@@ -1635,7 +1649,7 @@ static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
{
u8 tmp_uuid[UUID_SIZE], *uuid;
char uuid_string[UUID_STRING_LEN + 1];
- struct ctl_table fake_table = {
+ const struct ctl_table fake_table = {
.data = uuid_string,
.maxlen = UUID_STRING_LEN
};
--
2.43.0
Hi Osama, kernel test robot noticed the following build warnings: [auto build test WARNING on v6.17-rc2] [also build test WARNING on linus/master] [cannot apply to crng-random/master next-20250822] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Osama-Abdelkader/drivers-char-random-c-Clean-up-style-issues/20250821-010651 base: v6.17-rc2 patch link: https://lore.kernel.org/r/20250820170359.78811-1-osama.abdelkader%40gmail.com patch subject: [PATCH] drivers/char/random.c: Clean up style issues config: sparc-randconfig-r071-20250825 (https://download.01.org/0day-ci/archive/20250825/202508251623.uUGghjhZ-lkp@intel.com/config) compiler: sparc-linux-gcc (GCC) 8.5.0 If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202508251623.uUGghjhZ-lkp@intel.com/ smatch warnings: drivers/char/random.c:325 crng_fast_key_erasure() warn: inconsistent indenting drivers/char/random.c:349 crng_make_state() warn: inconsistent indenting vim +325 drivers/char/random.c 304 305 /* 306 * This generates a ChaCha block using the provided key, and then 307 * immediately overwrites that key with half the block. It returns 308 * the resultant ChaCha state to the user, along with the second 309 * half of the block containing 32 bytes of random data that may 310 * be used; random_data_len may not be greater than 32. 311 * 312 * The returned ChaCha state contains within it a copy of the old 313 * key value, at index 4, so the state should always be zeroed out 314 * immediately after using in order to maintain forward secrecy. 315 * If the state cannot be erased in a timely manner, then it is 316 * safer to set the random_data parameter to &chacha_state->x[4] 317 * so that this function overwrites it before returning. 318 */ 319 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], 320 struct chacha_state *chacha_state, 321 u8 *random_data, size_t random_data_len) 322 { 323 u8 first_block[CHACHA_BLOCK_SIZE]; 324 > 325 WARN_ON_ONCE(random_data_len > 32); 326 if (random_data_len > 32) 327 return; 328 chacha_init_consts(chacha_state); 329 memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); 330 memset(&chacha_state->x[12], 0, sizeof(u32) * 4); 331 chacha20_block(chacha_state, first_block); 332 333 memcpy(key, first_block, CHACHA_KEY_SIZE); 334 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); 335 memzero_explicit(first_block, sizeof(first_block)); 336 } 337 338 /* 339 * This function returns a ChaCha state that you may use for generating 340 * random data. It also returns up to 32 bytes on its own of random data 341 * that may be used; random_data_len may not be greater than 32. 342 */ 343 static void crng_make_state(struct chacha_state *chacha_state, 344 u8 *random_data, size_t random_data_len) 345 { 346 unsigned long flags; 347 struct crng *crng; 348 > 349 WARN_ON_ONCE(random_data_len > 32); 350 if (random_data_len > 32) 351 return; 352 /* 353 * For the fast path, we check whether we're ready, unlocked first, and 354 * then re-check once locked later. In the case where we're really not 355 * ready, we do fast key erasure with the base_crng directly, extracting 356 * when crng_init is CRNG_EMPTY. 357 */ 358 if (!crng_ready()) { 359 bool ready; 360 361 spin_lock_irqsave(&base_crng.lock, flags); 362 ready = crng_ready(); 363 if (!ready) { 364 if (crng_init == CRNG_EMPTY) 365 extract_entropy(base_crng.key, sizeof(base_crng.key)); 366 crng_fast_key_erasure(base_crng.key, chacha_state, 367 random_data, random_data_len); 368 } 369 spin_unlock_irqrestore(&base_crng.lock, flags); 370 if (!ready) 371 return; 372 } 373 374 local_lock_irqsave(&crngs.lock, flags); 375 crng = raw_cpu_ptr(&crngs); 376 377 /* 378 * If our per-cpu crng is older than the base_crng, then it means 379 * somebody reseeded the base_crng. In that case, we do fast key 380 * erasure on the base_crng, and use its output as the new key 381 * for our per-cpu crng. This brings us up to date with base_crng. 382 */ 383 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { 384 spin_lock(&base_crng.lock); 385 crng_fast_key_erasure(base_crng.key, chacha_state, 386 crng->key, sizeof(crng->key)); 387 crng->generation = base_crng.generation; 388 spin_unlock(&base_crng.lock); 389 } 390 391 /* 392 * Finally, when we've made it this far, our per-cpu crng has an up 393 * to date key, and we can do fast key erasure with it to produce 394 * some random data and a ChaCha state for the caller. All other 395 * branches of this function are "unlikely", so most of the time we 396 * should wind up here immediately. 397 */ 398 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); 399 local_unlock_irqrestore(&crngs.lock, flags); 400 } 401 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.