Fixes and enhancements for polled entropy sources:
Add explicit enable/disable hooks for callout-driven sources (be more power friendly). Make "skew" source polled so it runs only when there is entropy demand. Adjust entropy collection from polled sources so it's processed sooner.
This commit is contained in:
parent
d1bd22e812
commit
9b3a62bd20
@ -1,4 +1,4 @@
|
||||
# LIST OF CHANGES FROM LAST RELEASE: <$Revision: 1.2003 $>
|
||||
# LIST OF CHANGES FROM LAST RELEASE: <$Revision: 1.2004 $>
|
||||
#
|
||||
#
|
||||
# [Note: This file does not mention every change made to the NetBSD source tree.
|
||||
@ -83,3 +83,9 @@ Changes from NetBSD 7.0 to NetBSD 8.0:
|
||||
openpam(3): update to 20140912 (ourouparia) [christos 20141024]
|
||||
pppd(8): updated to version 2.4.7. [christos 20141025]
|
||||
acpi(4): Updated ACPICA to 20140926. [christos 20141025]
|
||||
rnd(9): Add explicit enable/disable hooks for callout-driven
|
||||
sources (be more power friendly). [tls 20141026]
|
||||
rnd(9): Make "skew" source polled so it runs only when there
|
||||
is entropy demand. [tls 20141026]
|
||||
rnd(9): Adjust entropy collection from polled sources so it's
|
||||
processed sooner. [tls 20141026]
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: rndpseudo.c,v 1.22 2014/09/05 09:23:14 matt Exp $ */
|
||||
/* $NetBSD: rndpseudo.c,v 1.23 2014/10/26 18:22:32 tls Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
|
||||
@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.22 2014/09/05 09:23:14 matt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.23 2014/10/26 18:22:32 tls Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_compat_netbsd.h"
|
||||
@ -526,6 +526,20 @@ krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
|
||||
re->dv_total = kr->value_delta.outbits;
|
||||
}
|
||||
|
||||
static void
|
||||
krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
|
||||
{
|
||||
uint32_t oflags = kr->flags;
|
||||
|
||||
kr->flags &= ~mask;
|
||||
kr->flags |= (flags & mask);
|
||||
|
||||
if (oflags & RND_FLAG_HASENABLE &&
|
||||
((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) {
|
||||
kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
rnd_ioctl(struct file *fp, u_long cmd, void *addr)
|
||||
{
|
||||
@ -536,7 +550,7 @@ rnd_ioctl(struct file *fp, u_long cmd, void *addr)
|
||||
rndstat_est_name_t *rsetnm;
|
||||
rndctl_t *rctl;
|
||||
rnddata_t *rnddata;
|
||||
u_int32_t count, start;
|
||||
uint32_t count, start;
|
||||
int ret = 0;
|
||||
int estimate_ok = 0, estimate = 0;
|
||||
|
||||
@ -736,12 +750,9 @@ rnd_ioctl(struct file *fp, u_long cmd, void *addr)
|
||||
if (rctl->type != 0xff) {
|
||||
while (kr != NULL) {
|
||||
if (kr->type == rctl->type) {
|
||||
kr->flags &= ~rctl->mask;
|
||||
|
||||
kr->flags |=
|
||||
(rctl->flags & rctl->mask);
|
||||
krs_setflags(kr,
|
||||
rctl->flags, rctl->mask);
|
||||
}
|
||||
|
||||
kr = kr->list.le_next;
|
||||
}
|
||||
mutex_spin_exit(&rndpool_mtx);
|
||||
@ -755,9 +766,7 @@ rnd_ioctl(struct file *fp, u_long cmd, void *addr)
|
||||
if (strncmp(kr->name, rctl->name,
|
||||
MIN(sizeof(kr->name),
|
||||
sizeof(rctl->name))) == 0) {
|
||||
kr->flags &= ~rctl->mask;
|
||||
kr->flags |= (rctl->flags & rctl->mask);
|
||||
|
||||
krs_setflags(kr, rctl->flags, rctl->mask);
|
||||
mutex_spin_exit(&rndpool_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_rndq.c,v 1.27 2014/08/11 14:07:55 riastradh Exp $ */
|
||||
/* $NetBSD: kern_rndq.c,v 1.28 2014/10/26 18:22:32 tls Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
|
||||
@ -32,7 +32,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.27 2014/08/11 14:07:55 riastradh Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.28 2014/10/26 18:22:32 tls Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/atomic.h>
|
||||
@ -160,7 +160,7 @@ static krndsource_t rnd_source_anonymous = {
|
||||
krndsource_t rnd_printf_source, rnd_autoconf_source;
|
||||
|
||||
void *rnd_process, *rnd_wakeup;
|
||||
struct callout skew_callout;
|
||||
struct callout skew_callout, skew_stop_callout;
|
||||
|
||||
void rnd_wakeup_readers(void);
|
||||
static inline uint32_t rnd_counter(void);
|
||||
@ -283,6 +283,8 @@ rnd_getmore(size_t byteswanted)
|
||||
KASSERT(rs->getarg != NULL);
|
||||
rs->get(byteswanted, rs->getarg);
|
||||
#ifdef RND_VERBOSE
|
||||
rnd_printf("rnd: entropy estimate %zu bits\n",
|
||||
rndpool_get_entropy_count(&rnd_pool));
|
||||
rnd_printf("rnd: asking source %s for %zu bytes\n",
|
||||
rs->name, byteswanted);
|
||||
#endif
|
||||
@ -426,41 +428,80 @@ rnd_dv_estimate(krndsource_t *rs, uint32_t v)
|
||||
}
|
||||
|
||||
#if defined(__HAVE_CPU_COUNTER)
|
||||
kmutex_t rnd_skew_mtx;
|
||||
|
||||
static void rnd_skew(void *);
|
||||
|
||||
static void
|
||||
rnd_skew_enable(krndsource_t *rs, bool enabled)
|
||||
{
|
||||
mutex_spin_enter(&rnd_skew_mtx);
|
||||
if (enabled) {
|
||||
rnd_skew(rs);
|
||||
} else {
|
||||
callout_stop(&skew_callout);
|
||||
}
|
||||
mutex_spin_exit(&rnd_skew_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
rnd_skew_stop(void *arg)
|
||||
{
|
||||
mutex_spin_enter(&rnd_skew_mtx);
|
||||
callout_stop(&skew_callout);
|
||||
mutex_spin_exit(&rnd_skew_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
rnd_skew_get(size_t bytes, void *priv)
|
||||
{
|
||||
krndsource_t *skewsrcp = priv;
|
||||
if (RND_ENABLED(skewsrcp)) {
|
||||
/* Measure for 30s */
|
||||
if (mutex_tryenter(&rnd_skew_mtx)) {
|
||||
callout_schedule(&skew_stop_callout, hz * 30);
|
||||
callout_schedule(&skew_callout, 1);
|
||||
mutex_spin_exit(&rnd_skew_mtx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
rnd_skew(void *arg)
|
||||
{
|
||||
static krndsource_t skewsrc;
|
||||
static int live, flipflop;
|
||||
|
||||
/*
|
||||
* Only one instance of this callout will ever be scheduled
|
||||
* at a time (it is only ever scheduled by itself). So no
|
||||
* locking is required here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Even on systems with seemingly stable clocks, the
|
||||
* delta-time entropy estimator seems to think we get 1 bit here
|
||||
* about every 2 calls. That seems like too much. Instead,
|
||||
* we feed the rnd_counter() value to the value estimator as well,
|
||||
* to take advantage of the additional LZ test on estimated values.
|
||||
* about every 2 calls.
|
||||
*
|
||||
*/
|
||||
if (__predict_false(!live)) {
|
||||
/* XXX must be spin, taken with rndpool_mtx held */
|
||||
mutex_init(&rnd_skew_mtx, MUTEX_DEFAULT, IPL_VM);
|
||||
rndsource_setcb(&skewsrc, rnd_skew_get, &skewsrc);
|
||||
rndsource_setenable(&skewsrc, rnd_skew_enable);
|
||||
rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW,
|
||||
RND_FLAG_COLLECT_VALUE|
|
||||
RND_FLAG_ESTIMATE_VALUE);
|
||||
RND_FLAG_ESTIMATE_VALUE|
|
||||
RND_FLAG_HASCB|RND_FLAG_HASENABLE);
|
||||
live = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_spin_enter(&rnd_skew_mtx);
|
||||
flipflop = !flipflop;
|
||||
|
||||
if (flipflop) {
|
||||
rnd_add_uint32(&skewsrc, rnd_counter());
|
||||
callout_schedule(&skew_callout, hz / 10);
|
||||
} else {
|
||||
callout_schedule(&skew_callout, 1);
|
||||
if (RND_ENABLED(&skewsrc)) {
|
||||
if (flipflop) {
|
||||
rnd_add_uint32(&skewsrc, rnd_counter());
|
||||
callout_schedule(&skew_callout, hz / 10);
|
||||
} else {
|
||||
callout_schedule(&skew_callout, 1);
|
||||
}
|
||||
}
|
||||
mutex_spin_exit(&rnd_skew_mtx);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -527,7 +568,9 @@ rnd_init(void)
|
||||
*/
|
||||
#if defined(__HAVE_CPU_COUNTER)
|
||||
callout_init(&skew_callout, CALLOUT_MPSAFE);
|
||||
callout_init(&skew_stop_callout, CALLOUT_MPSAFE);
|
||||
callout_setfunc(&skew_callout, rnd_skew, NULL);
|
||||
callout_setfunc(&skew_stop_callout, rnd_skew_stop, NULL);
|
||||
rnd_skew(NULL);
|
||||
#endif
|
||||
|
||||
@ -835,15 +878,16 @@ rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len,
|
||||
* is adding entropy at a rate of at least 1 bit every 10 seconds,
|
||||
* mark it as "fast" and add its samples in bulk.
|
||||
*/
|
||||
if (__predict_true(rs->flags & RND_FLAG_FAST)) {
|
||||
if (__predict_true(rs->flags & RND_FLAG_FAST) ||
|
||||
(todo >= RND_SAMPLE_COUNT)) {
|
||||
sample_count = RND_SAMPLE_COUNT;
|
||||
} else {
|
||||
if (!cold && rnd_initial_entropy) {
|
||||
if (!(rs->flags & RND_FLAG_HASCB) &&
|
||||
!cold && rnd_initial_entropy) {
|
||||
struct timeval upt;
|
||||
|
||||
getmicrouptime(&upt);
|
||||
if ((todo >= RND_SAMPLE_COUNT) ||
|
||||
(upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
|
||||
if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
|
||||
(upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
|
||||
(upt.tv_sec > 100 &&
|
||||
rs->total > upt.tv_sec / 10)) {
|
||||
@ -1083,10 +1127,10 @@ rnd_process_events(void)
|
||||
wake++;
|
||||
} else {
|
||||
rnd_empty = 1;
|
||||
rnd_getmore((RND_POOLBITS - pool_entropy) / 8);
|
||||
rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY));
|
||||
#ifdef RND_VERBOSE
|
||||
rnd_printf("rnd: empty, asking for %d bits\n",
|
||||
(int)((RND_POOLBITS - pool_entropy) / 8));
|
||||
rnd_printf("rnd: empty, asking for %d bytes\n",
|
||||
(int)(howmany((RND_POOLBITS - pool_entropy), NBBY)));
|
||||
#endif
|
||||
}
|
||||
mutex_spin_exit(&rndpool_mtx);
|
||||
@ -1201,6 +1245,11 @@ rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags)
|
||||
#endif
|
||||
entropy_count = rndpool_get_entropy_count(&rnd_pool);
|
||||
if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) {
|
||||
#ifdef RND_VERBOSE
|
||||
rnd_printf("rnd: empty, asking for %d bytes\n",
|
||||
(int)(howmany((RND_POOLBITS - entropy_count),
|
||||
NBBY)));
|
||||
#endif
|
||||
rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY));
|
||||
}
|
||||
return rndpool_extract_data(&rnd_pool, p, len, flags);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_rndsink.c,v 1.9 2014/09/05 05:57:21 matt Exp $ */
|
||||
/* $NetBSD: kern_rndsink.c,v 1.10 2014/10/26 18:22:32 tls Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2013 The NetBSD Foundation, Inc.
|
||||
@ -30,7 +30,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_rndsink.c,v 1.9 2014/09/05 05:57:21 matt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_rndsink.c,v 1.10 2014/10/26 18:22:32 tls Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
@ -216,15 +216,17 @@ rndsinks_enqueue(struct rndsink *rndsink)
|
||||
* XXX This should request only rndsink->rs_bytes bytes of
|
||||
* entropy, but that might get buffered up indefinitely because
|
||||
* kern_rndq has no bound on the duration before it will
|
||||
* process queued entropy samples. For now, request refilling
|
||||
* the pool altogether so that the buffer will fill up and get
|
||||
* processed. Later, we ought to (a) bound the duration before
|
||||
* process queued entropy samples. To work around this, we are
|
||||
* a little too incestuous with kern_rndq: we avoid marking polled
|
||||
* sources "fast" there, and know here that for non-fast sources,
|
||||
* that code will buffer two ints worth of data per source.
|
||||
* Later, we ought to (a) bound the duration before
|
||||
* queued entropy samples get processed, and (b) add a target
|
||||
* or something -- as soon as we get that much from the entropy
|
||||
* sources, distribute it.
|
||||
*/
|
||||
mutex_spin_enter(&rndpool_mtx);
|
||||
rnd_getmore(RND_POOLBITS / NBBY);
|
||||
rnd_getmore(MAX(rndsink->rsink_bytes, 2 * sizeof(uint32_t)));
|
||||
mutex_spin_exit(&rndpool_mtx);
|
||||
|
||||
switch (rndsink->rsink_state) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: rnd.h,v 1.42 2014/09/05 05:52:27 matt Exp $ */
|
||||
/* $NetBSD: rnd.h,v 1.43 2014/10/26 18:22:32 tls Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997 The NetBSD Foundation, Inc.
|
||||
@ -105,6 +105,7 @@ typedef struct {
|
||||
#define RND_FLAG_COLLECT_VALUE 0x00002000 /* use value as input */
|
||||
#define RND_FLAG_ESTIMATE_TIME 0x00004000 /* estimate entropy on time */
|
||||
#define RND_FLAG_ESTIMATE_VALUE 0x00008000 /* estimate entropy on value */
|
||||
#define RND_FLAG_HASENABLE 0x00010000 /* has enable/disable fns */
|
||||
#define RND_FLAG_DEFAULT (RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME|\
|
||||
RND_FLAG_ESTIMATE_TIME)
|
||||
|
||||
@ -150,6 +151,7 @@ typedef struct krndsource {
|
||||
size_t test_cnt; /* how much test data accumulated? */
|
||||
void (*get)(size_t, void *); /* pool wants N bytes (badly) */
|
||||
void *getarg; /* argument to get-function */
|
||||
void (*enable)(struct krndsource *, bool); /* turn on/off */
|
||||
rngtest_t *test; /* test data for RNG type sources */
|
||||
} krndsource_t;
|
||||
|
||||
@ -161,6 +163,12 @@ rndsource_setcb(struct krndsource *const rs, void (*const cb)(size_t, void *),
|
||||
rs->getarg = arg;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rndsource_setenable(struct krndsource *const rs, void *const cb)
|
||||
{
|
||||
rs->enable = cb;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
uint32_t cursor; /* current add point in the pool */
|
||||
uint32_t rotate; /* how many bits to rotate by */
|
||||
|
Loading…
Reference in New Issue
Block a user