diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index 41189552df3f..93b303faa7fa 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -1,4 +1,4 @@ -/* $NetBSD: kern_entropy.c,v 1.60 2023/05/24 20:22:12 riastradh Exp $ */ +/* $NetBSD: kern_entropy.c,v 1.61 2023/05/24 20:22:23 riastradh Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -75,7 +75,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.60 2023/05/24 20:22:12 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.61 2023/05/24 20:22:23 riastradh Exp $"); #include #include @@ -1910,6 +1910,14 @@ rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, return; } + /* + * Hold up the reset xcall before it zeroes the entropy counts + * on this CPU or globally. Otherwise, we might leave some + * nonzero entropy attributed to an untrusted source in the + * event of a race with a change to flags. + */ + kpreempt_disable(); + /* Load a snapshot of the flags. Ioctl may change them under us. */ flags = atomic_load_relaxed(&rs->flags); @@ -1922,7 +1930,7 @@ rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, if (!atomic_load_relaxed(&entropy_collection) || ISSET(flags, RND_FLAG_NO_COLLECT) || !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) - return; + goto out; /* If asked, ignore the estimate. */ if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) @@ -1939,6 +1947,9 @@ rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, rnd_add_data_1(rs, &extra, sizeof extra, 0, RND_FLAG_COLLECT_TIME); } + +out: /* Allow concurrent changes to flags to finish. */ + kpreempt_enable(); } static unsigned