2000-07-03 01:04:33 +04:00
|
|
|
/* $NetBSD: rnd.c,v 1.21 2000/07/02 21:04:33 sommerfeld Exp $ */
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
/*-
|
|
|
|
* Copyright (c) 1997 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
1997-10-13 23:59:26 +04:00
|
|
|
* by Michael Graff <explorer@flame.org>. This code uses ideas and
|
|
|
|
* algorithms from the Linux driver written by Ted Ts'o.
|
1997-10-10 03:13:12 +04:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/select.h>
|
|
|
|
#include <sys/poll.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/systm.h>
|
2000-03-23 10:01:25 +03:00
|
|
|
#include <sys/callout.h>
|
1997-10-10 03:13:12 +04:00
|
|
|
#include <sys/rnd.h>
|
|
|
|
#include <sys/vnode.h>
|
1999-02-28 20:18:42 +03:00
|
|
|
#include <sys/pool.h>
|
1997-10-10 03:13:12 +04:00
|
|
|
|
2000-06-06 05:33:15 +04:00
|
|
|
#ifdef __HAVE_CPU_COUNTER
|
2000-06-06 03:42:34 +04:00
|
|
|
#include <machine/rnd.h>
|
|
|
|
#endif
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
#ifdef RND_DEBUG
|
|
|
|
#define DPRINTF(l,x) if (rnd_debug & (l)) printf x
|
|
|
|
int rnd_debug = 0;
|
|
|
|
#else
|
|
|
|
#define DPRINTF(l,x)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define RND_DEBUG_WRITE 0x0001
|
|
|
|
#define RND_DEBUG_READ 0x0002
|
|
|
|
#define RND_DEBUG_IOCTL 0x0004
|
|
|
|
#define RND_DEBUG_SNOOZE 0x0008
|
|
|
|
|
|
|
|
/*
|
|
|
|
* list devices attached
|
|
|
|
*/
|
1997-10-12 22:49:09 +04:00
|
|
|
/* #define RND_VERBOSE */
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the extraction time as a somewhat-random source
|
|
|
|
*/
|
|
|
|
#ifndef RND_USE_EXTRACT_TIME
|
|
|
|
#define RND_USE_EXTRACT_TIME 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The size of a temporary buffer, malloc()ed when needed, and used for
|
|
|
|
* reading and writing data.
|
|
|
|
*/
|
|
|
|
#define RND_TEMP_BUFFER_SIZE 128
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
/*
|
|
|
|
* This is a little bit of state information attached to each device that we
|
|
|
|
* collect entropy from. This is simply a collection buffer, and when it
|
|
|
|
* is full it will be "detached" from the source and added to the entropy
|
|
|
|
* pool after entropy is distilled as much as possible.
|
|
|
|
*/
|
|
|
|
#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
|
|
|
|
typedef struct _rnd_sample_t {
|
|
|
|
SIMPLEQ_ENTRY(_rnd_sample_t) next;
|
1997-10-20 22:43:48 +04:00
|
|
|
rndsource_t *source;
|
1999-02-28 20:18:42 +03:00
|
|
|
int cursor;
|
|
|
|
int entropy;
|
|
|
|
u_int32_t ts[RND_SAMPLE_COUNT];
|
|
|
|
u_int32_t values[RND_SAMPLE_COUNT];
|
|
|
|
} rnd_sample_t;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
|
|
|
/*
|
2000-06-06 03:42:34 +04:00
|
|
|
* The event queue. Fields are altered at an interrupt level.
|
|
|
|
* All accesses must be protected at splhigh().
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
volatile int rnd_timeout_pending;
|
|
|
|
SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
/*
|
|
|
|
* our select/poll queue
|
|
|
|
*/
|
|
|
|
struct selinfo rnd_selq;
|
|
|
|
|
|
|
|
/*
|
1997-10-20 22:43:48 +04:00
|
|
|
* Set when there are readers blocking on data from us
|
1997-10-10 03:13:12 +04:00
|
|
|
*/
|
|
|
|
#define RND_READWAITING 0x00000001
|
1997-10-20 22:43:48 +04:00
|
|
|
volatile u_int32_t rnd_status;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
2000-06-06 03:42:34 +04:00
|
|
|
* Memory pool; accessed only at splhigh().
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
struct pool rnd_mempool;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
|
|
|
/*
|
2000-06-06 03:42:34 +04:00
|
|
|
* Our random pool. This is defined here rather than using the general
|
|
|
|
* purpose one defined in rndpool.c.
|
|
|
|
*
|
|
|
|
* Samples are collected and queued at splhigh() into a separate queue
|
|
|
|
* (rnd_samples, see above), and processed in a timeout routine; therefore,
|
|
|
|
* all other accesses to the random pool must be at splsoftclock() as well.
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
rndpool_t rnd_pool;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This source is used to easily "remove" queue entries when the source
|
|
|
|
* which actually generated the events is going away.
|
|
|
|
*/
|
|
|
|
static rndsource_t rnd_source_no_collect = {
|
1999-02-28 20:18:42 +03:00
|
|
|
{ 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
|
1997-10-19 15:43:05 +04:00
|
|
|
0, 0, 0, 0,
|
2000-05-19 08:03:33 +04:00
|
|
|
RND_TYPE_UNKNOWN,
|
1999-02-28 20:18:42 +03:00
|
|
|
(RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
|
|
|
|
NULL
|
1997-10-19 15:43:05 +04:00
|
|
|
};
|
|
|
|
|
2000-03-23 10:01:25 +03:00
|
|
|
struct callout rnd_callout = CALLOUT_INITIALIZER;
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
void rndattach __P((int));
|
|
|
|
int rndopen __P((dev_t, int, int, struct proc *));
|
|
|
|
int rndclose __P((dev_t, int, int, struct proc *));
|
|
|
|
int rndread __P((dev_t, struct uio *, int));
|
|
|
|
int rndwrite __P((dev_t, struct uio *, int));
|
|
|
|
int rndioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
|
|
|
|
int rndpoll __P((dev_t, int, struct proc *));
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
static inline void rnd_wakeup_readers(void);
|
|
|
|
static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
|
2000-06-06 05:33:15 +04:00
|
|
|
static inline u_int32_t rnd_counter(void);
|
1997-10-19 15:43:05 +04:00
|
|
|
static void rnd_timeout(void *);
|
|
|
|
|
1999-04-01 23:07:40 +04:00
|
|
|
static int rnd_ready = 0;
|
|
|
|
static int rnd_have_entropy = 0;
|
|
|
|
|
|
|
|
LIST_HEAD(, __rndsource_element) rnd_sources;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
2000-06-06 05:33:15 +04:00
|
|
|
* Generate a 32-bit counter. This should be more machine dependant,
|
1997-10-19 15:43:05 +04:00
|
|
|
* using cycle counters and the like when possible.
|
|
|
|
*/
|
|
|
|
static inline u_int32_t
|
2000-06-06 05:33:15 +04:00
|
|
|
rnd_counter()
|
1997-10-19 15:43:05 +04:00
|
|
|
{
|
|
|
|
struct timeval tv;
|
|
|
|
|
2000-06-06 05:33:15 +04:00
|
|
|
#ifdef __HAVE_CPU_COUNTER
|
|
|
|
if (cpu_hascounter())
|
2000-07-03 01:04:33 +04:00
|
|
|
return (cpu_counter() & 0xffffffff);
|
2000-06-06 03:42:34 +04:00
|
|
|
#endif
|
1997-10-19 15:43:05 +04:00
|
|
|
microtime(&tv);
|
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
return tv.tv_sec * 1000000 + tv.tv_usec;
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if there are readers waiting on us. If so, kick them.
|
2000-06-06 03:42:34 +04:00
|
|
|
*
|
|
|
|
* Must be called at splsoftclock().
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
rnd_wakeup_readers()
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we have added new bits, and now have enough to do something,
|
|
|
|
* wake up sleeping readers.
|
|
|
|
*/
|
1998-05-27 04:59:14 +04:00
|
|
|
if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
|
1997-10-19 15:43:05 +04:00
|
|
|
if (rnd_status & RND_READWAITING) {
|
|
|
|
DPRINTF(RND_DEBUG_SNOOZE,
|
|
|
|
("waking up pending readers.\n"));
|
|
|
|
rnd_status &= ~RND_READWAITING;
|
1997-10-20 22:43:48 +04:00
|
|
|
wakeup(&rnd_selq);
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
selwakeup(&rnd_selq);
|
1999-04-01 23:07:40 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow open of /dev/random now, too.
|
|
|
|
*/
|
|
|
|
rnd_have_entropy = 1;
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
}
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the timing of the event to estimate the entropy gathered.
|
1999-02-28 20:18:42 +03:00
|
|
|
* If all the differentials (first, second, and third) are non-zero, return
|
|
|
|
* non-zero. If any of these are zero, return zero.
|
1997-10-10 03:13:12 +04:00
|
|
|
*/
|
|
|
|
static inline u_int32_t
|
|
|
|
rnd_estimate_entropy(rs, t)
|
|
|
|
rndsource_t *rs;
|
1997-10-19 15:43:05 +04:00
|
|
|
u_int32_t t;
|
1997-10-10 03:13:12 +04:00
|
|
|
{
|
|
|
|
int32_t delta;
|
|
|
|
int32_t delta2;
|
|
|
|
int32_t delta3;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* If the time counter has overflowed, calculate the real difference.
|
|
|
|
* If it has not, it is simplier.
|
|
|
|
*/
|
|
|
|
if (t < rs->last_time)
|
|
|
|
delta = UINT_MAX - rs->last_time + t;
|
1997-10-10 03:13:12 +04:00
|
|
|
else
|
1997-10-19 15:43:05 +04:00
|
|
|
delta = rs->last_time - t;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
if (delta < 0)
|
|
|
|
delta = -delta;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* Calculate the second and third order differentials
|
|
|
|
*/
|
1997-10-10 03:13:12 +04:00
|
|
|
delta2 = rs->last_delta - delta;
|
|
|
|
if (delta2 < 0)
|
|
|
|
delta2 = -delta2;
|
|
|
|
|
|
|
|
delta3 = rs->last_delta2 - delta2;
|
|
|
|
if (delta3 < 0)
|
|
|
|
delta3 = -delta3;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
rs->last_time = t;
|
1997-10-10 03:13:12 +04:00
|
|
|
rs->last_delta = delta;
|
|
|
|
rs->last_delta2 = delta2;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* If any delta is 0, we got no entropy. If all are non-zero, we
|
1999-02-28 20:18:42 +03:00
|
|
|
* might have something.
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1997-10-10 03:13:12 +04:00
|
|
|
if (delta == 0 || delta2 == 0 || delta3 == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* attach the random device, and initialize the global random pool
|
|
|
|
* for our use.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rndattach(num)
|
|
|
|
int num;
|
|
|
|
{
|
1999-01-27 13:41:00 +03:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
rnd_init();
|
|
|
|
}
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
void
|
|
|
|
rnd_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (rnd_ready)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LIST_INIT(&rnd_sources);
|
|
|
|
SIMPLEQ_INIT(&rnd_samples);
|
|
|
|
|
|
|
|
pool_init(&rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample",
|
2000-05-19 08:03:33 +04:00
|
|
|
0, NULL, NULL, 0);
|
1999-02-28 20:18:42 +03:00
|
|
|
|
|
|
|
rndpool_init(&rnd_pool);
|
|
|
|
|
|
|
|
rnd_ready = 1;
|
|
|
|
|
|
|
|
#ifdef RND_VERBOSE
|
|
|
|
printf("Random device ready\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
int
|
|
|
|
rndopen(dev, flags, ifmt, p)
|
|
|
|
dev_t dev;
|
|
|
|
int flags, ifmt;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
1999-01-27 13:41:00 +03:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
if (rnd_ready == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
|
1999-04-01 23:07:40 +04:00
|
|
|
if (minor(dev) == RND_DEV_URANDOM)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the strong random device and we have never collected
|
|
|
|
* entropy (or have not yet) don't allow it to be opened. This will
|
|
|
|
* prevent waiting forever for something that just will not appear.
|
|
|
|
*/
|
|
|
|
if (minor(dev) == RND_DEV_RANDOM) {
|
|
|
|
if (rnd_have_entropy == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
else
|
|
|
|
return (0);
|
|
|
|
}
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rndclose(dev, flags, ifmt, p)
|
|
|
|
dev_t dev;
|
|
|
|
int flags, ifmt;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
1999-01-27 13:41:00 +03:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rndread(dev, uio, ioflag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int ioflag;
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u_int32_t nread;
|
|
|
|
int n;
|
|
|
|
int s;
|
|
|
|
u_int8_t *buf;
|
|
|
|
u_int32_t mode;
|
|
|
|
u_int32_t entcnt;
|
|
|
|
|
|
|
|
DPRINTF(RND_DEBUG_READ,
|
|
|
|
("Random: Read of %d requested, flags 0x%08x\n",
|
|
|
|
uio->uio_resid, ioflag));
|
|
|
|
|
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
switch (minor(dev)) {
|
|
|
|
case RND_DEV_RANDOM:
|
|
|
|
mode = RND_EXTRACT_GOOD;
|
|
|
|
break;
|
|
|
|
case RND_DEV_URANDOM:
|
|
|
|
mode = RND_EXTRACT_ANY;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Can't happen, but this is cheap */
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
|
|
|
|
|
|
|
|
while (uio->uio_resid > 0) {
|
|
|
|
n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make certain there is data available. If there
|
|
|
|
* is, do the I/O even if it is partial. If not,
|
|
|
|
* sleep unless the user has requested non-blocking
|
|
|
|
* I/O.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
/*
|
|
|
|
* If not requesting strong randomness, we
|
|
|
|
* can always read.
|
|
|
|
*/
|
|
|
|
if (mode == RND_EXTRACT_ANY)
|
|
|
|
break;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* How much entropy do we have? If it is enough for
|
|
|
|
* one hash, we can read.
|
|
|
|
*/
|
|
|
|
s = splsoftclock();
|
|
|
|
entcnt = rndpool_get_entropy_count(&rnd_pool);
|
1997-10-10 03:13:12 +04:00
|
|
|
splx(s);
|
1998-05-27 04:59:14 +04:00
|
|
|
if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
|
1997-10-10 03:13:12 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data is not available.
|
|
|
|
*/
|
|
|
|
if (ioflag & IO_NDELAY) {
|
|
|
|
ret = EWOULDBLOCK;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rnd_status |= RND_READWAITING;
|
1997-10-20 22:43:48 +04:00
|
|
|
ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
|
1997-10-10 03:13:12 +04:00
|
|
|
"rndread", 0);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nread = rnd_extract_data(buf, n, mode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy (possibly partial) data to the user.
|
|
|
|
* If an error occurs, or this is a partial
|
|
|
|
* read, bail out.
|
|
|
|
*/
|
|
|
|
ret = uiomove((caddr_t)buf, nread, uio);
|
|
|
|
if (ret != 0 || nread != n)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rndwrite(dev, uio, ioflag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int ioflag;
|
|
|
|
{
|
|
|
|
u_int8_t *buf;
|
|
|
|
int ret;
|
|
|
|
int n;
|
1997-10-19 15:43:05 +04:00
|
|
|
int s;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
DPRINTF(RND_DEBUG_WRITE,
|
|
|
|
("Random: Write of %d requested\n", uio->uio_resid));
|
|
|
|
|
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
|
|
|
|
|
|
|
|
while (uio->uio_resid > 0) {
|
|
|
|
n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
|
|
|
|
|
|
|
|
ret = uiomove((caddr_t)buf, n, uio);
|
|
|
|
if (ret != 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mix in the bytes.
|
|
|
|
*/
|
1997-10-19 15:43:05 +04:00
|
|
|
s = splsoftclock();
|
|
|
|
rndpool_add_data(&rnd_pool, buf, n, 0);
|
|
|
|
splx(s);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
|
|
|
|
}
|
|
|
|
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rndioctl(dev, cmd, addr, flag, p)
|
|
|
|
dev_t dev;
|
|
|
|
u_long cmd;
|
|
|
|
caddr_t addr;
|
|
|
|
int flag;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
rndsource_element_t *rse;
|
|
|
|
rndstat_t *rst;
|
|
|
|
rndstat_name_t *rstnm;
|
|
|
|
rndctl_t *rctl;
|
|
|
|
rnddata_t *rnddata;
|
|
|
|
u_int32_t count;
|
|
|
|
u_int32_t start;
|
1997-10-19 15:43:05 +04:00
|
|
|
int s;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handled in upper layer really, but we have to return zero
|
|
|
|
* for it to be accepted by the upper layer.
|
|
|
|
*/
|
|
|
|
case FIONBIO:
|
|
|
|
case FIOASYNC:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RNDGETENTCNT:
|
1997-10-19 15:43:05 +04:00
|
|
|
s = splsoftclock();
|
|
|
|
*(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
|
|
|
|
splx(s);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
case RNDGETPOOLSTAT:
|
|
|
|
if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
s = splsoftclock();
|
|
|
|
rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
case RNDGETSRCNUM:
|
|
|
|
if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
rst = (rndstat_t *)addr;
|
|
|
|
|
|
|
|
if (rst->count == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (rst->count > RND_MAXSTATCOUNT)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* find the starting source by running through the
|
|
|
|
* list of sources.
|
|
|
|
*/
|
|
|
|
rse = rnd_sources.lh_first;
|
|
|
|
start = rst->start;
|
|
|
|
while (rse != NULL && start >= 1) {
|
|
|
|
rse = rse->list.le_next;
|
|
|
|
start--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return up to as many structures as the user asked
|
|
|
|
* for. If we run out of sources, a count of zero
|
|
|
|
* will be returned, without an error.
|
|
|
|
*/
|
|
|
|
for (count = 0 ; count < rst->count && rse != NULL ; count++) {
|
|
|
|
bcopy(&rse->data, &rst->source[count],
|
|
|
|
sizeof(rndsource_t));
|
2000-06-06 03:42:34 +04:00
|
|
|
/* Zero out information which may leak */
|
|
|
|
rst->source[count].last_time = 0;
|
|
|
|
rst->source[count].last_delta = 0;
|
|
|
|
rst->source[count].last_delta2 = 0;
|
|
|
|
rst->source[count].state = 0;
|
1997-10-10 03:13:12 +04:00
|
|
|
rse = rse->list.le_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
rst->count = count;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RNDGETSRCNAME:
|
|
|
|
if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scan through the list, trying to find the name
|
|
|
|
*/
|
|
|
|
rstnm = (rndstat_name_t *)addr;
|
|
|
|
rse = rnd_sources.lh_first;
|
|
|
|
while (rse != NULL) {
|
|
|
|
if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
|
|
|
|
bcopy(&rse->data, &rstnm->source,
|
|
|
|
sizeof(rndsource_t));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
rse = rse->list.le_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ENOENT; /* name not found */
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RNDCTL:
|
|
|
|
if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set flags to enable/disable entropy counting and/or
|
|
|
|
* collection
|
|
|
|
*/
|
|
|
|
rctl = (rndctl_t *)addr;
|
|
|
|
rse = rnd_sources.lh_first;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* flags set apply to all sources of this type
|
|
|
|
*/
|
|
|
|
if (rctl->type != 0xff) {
|
|
|
|
while (rse != NULL) {
|
1999-02-28 20:18:42 +03:00
|
|
|
if (rse->data.type == rctl->type) {
|
|
|
|
rse->data.flags &= ~rctl->mask;
|
|
|
|
rse->data.flags |= (rctl->flags
|
1997-10-10 03:13:12 +04:00
|
|
|
& rctl->mask);
|
|
|
|
}
|
|
|
|
rse = rse->list.le_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scan through the list, trying to find the name
|
|
|
|
*/
|
|
|
|
while (rse != NULL) {
|
|
|
|
if (strncmp(rse->data.name, rctl->name, 16) == 0) {
|
1999-02-28 20:18:42 +03:00
|
|
|
rse->data.flags &= ~rctl->mask;
|
|
|
|
rse->data.flags |= (rctl->flags & rctl->mask);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
rse = rse->list.le_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ENOENT; /* name not found */
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RNDADDDATA:
|
|
|
|
if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
rnddata = (rnddata_t *)addr;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
|
|
|
s = splsoftclock();
|
|
|
|
rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
|
|
|
|
rnddata->entropy);
|
|
|
|
|
|
|
|
rnd_wakeup_readers();
|
|
|
|
splx(s);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rndpoll(dev, events, p)
|
|
|
|
dev_t dev;
|
|
|
|
int events;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
int revents;
|
|
|
|
int s;
|
|
|
|
u_int32_t entcnt;
|
|
|
|
|
|
|
|
/*
|
1999-01-27 13:41:00 +03:00
|
|
|
* we are always writable
|
1997-10-10 03:13:12 +04:00
|
|
|
*/
|
|
|
|
revents = events & (POLLOUT | POLLWRNORM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save some work if not checking for reads
|
|
|
|
*/
|
|
|
|
if ((events & (POLLIN | POLLRDNORM)) == 0)
|
|
|
|
return (revents);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the minor device is not /dev/random, we are always readable.
|
|
|
|
*/
|
|
|
|
if (minor(dev) != RND_DEV_RANDOM) {
|
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
return (revents);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make certain we have enough entropy to be readable
|
|
|
|
*/
|
1997-10-19 15:43:05 +04:00
|
|
|
s = splsoftclock();
|
|
|
|
entcnt = rndpool_get_entropy_count(&rnd_pool);
|
1997-10-10 03:13:12 +04:00
|
|
|
splx(s);
|
|
|
|
|
1998-05-27 04:59:14 +04:00
|
|
|
if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
|
1997-10-10 03:13:12 +04:00
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
else
|
|
|
|
selrecord(p, &rnd_selq);
|
|
|
|
|
|
|
|
return (revents);
|
|
|
|
}
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
static rnd_sample_t *
|
|
|
|
rnd_sample_allocate(rndsource_t *source)
|
|
|
|
{
|
|
|
|
rnd_sample_t *c;
|
1999-06-12 14:58:47 +04:00
|
|
|
int s;
|
1999-02-28 20:18:42 +03:00
|
|
|
|
1999-06-12 14:58:47 +04:00
|
|
|
s = splhigh();
|
|
|
|
c = pool_get(&rnd_mempool, PR_WAITOK);
|
|
|
|
splx(s);
|
1999-02-28 20:18:42 +03:00
|
|
|
if (c == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
c->source = source;
|
|
|
|
c->cursor = 0;
|
|
|
|
c->entropy = 0;
|
|
|
|
|
|
|
|
return (c);
|
|
|
|
}
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
/*
|
1999-04-01 23:07:40 +04:00
|
|
|
* don't wait on allocation. to be used in an interrupt context.
|
1997-10-10 03:13:12 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
static rnd_sample_t *
|
|
|
|
rnd_sample_allocate_isr(rndsource_t *source)
|
1997-10-10 03:13:12 +04:00
|
|
|
{
|
1999-02-28 20:18:42 +03:00
|
|
|
rnd_sample_t *c;
|
1999-06-12 14:58:47 +04:00
|
|
|
int s;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1999-06-12 14:58:47 +04:00
|
|
|
s = splhigh();
|
1999-02-28 20:18:42 +03:00
|
|
|
c = pool_get(&rnd_mempool, 0);
|
1999-06-12 14:58:47 +04:00
|
|
|
splx(s);
|
1999-02-28 20:18:42 +03:00
|
|
|
if (c == NULL)
|
|
|
|
return (NULL);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
c->source = source;
|
|
|
|
c->cursor = 0;
|
|
|
|
c->entropy = 0;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
return (c);
|
|
|
|
}
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
static void
|
|
|
|
rnd_sample_free(rnd_sample_t *c)
|
|
|
|
{
|
1999-06-12 14:58:47 +04:00
|
|
|
int s;
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
memset(c, 0, sizeof(rnd_sample_t));
|
1999-06-12 14:58:47 +04:00
|
|
|
s = splhigh();
|
1999-02-28 20:18:42 +03:00
|
|
|
pool_put(&rnd_mempool, c);
|
1999-06-12 14:58:47 +04:00
|
|
|
splx(s);
|
1997-10-10 03:13:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* add a source to our list of sources
|
|
|
|
*/
|
|
|
|
void
|
1999-02-28 20:18:42 +03:00
|
|
|
rnd_attach_source(rs, name, type, flags)
|
1997-10-10 03:13:12 +04:00
|
|
|
rndsource_element_t *rs;
|
|
|
|
char *name;
|
1999-02-28 20:18:42 +03:00
|
|
|
u_int32_t type;
|
|
|
|
u_int32_t flags;
|
1997-10-10 03:13:12 +04:00
|
|
|
{
|
1999-02-28 20:18:42 +03:00
|
|
|
u_int32_t ts;
|
|
|
|
|
2000-06-06 05:33:15 +04:00
|
|
|
ts = rnd_counter();
|
1999-02-28 20:18:42 +03:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
strcpy(rs->data.name, name);
|
1999-02-28 20:18:42 +03:00
|
|
|
rs->data.last_time = ts;
|
|
|
|
rs->data.last_delta = 0;
|
|
|
|
rs->data.last_delta2 = 0;
|
|
|
|
rs->data.total = 0;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1997-10-10 20:35:00 +04:00
|
|
|
/*
|
|
|
|
* force network devices to not collect any entropy by
|
|
|
|
* default
|
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
if (type == RND_TYPE_NET)
|
1999-02-28 22:01:30 +03:00
|
|
|
flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
|
1999-02-28 20:18:42 +03:00
|
|
|
|
|
|
|
rs->data.type = type;
|
|
|
|
rs->data.flags = flags;
|
1997-10-10 20:35:00 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
rs->data.state = rnd_sample_allocate(&rs->data);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
LIST_INSERT_HEAD(&rnd_sources, rs, list);
|
|
|
|
|
|
|
|
#ifdef RND_VERBOSE
|
|
|
|
printf("%s: attached as an entropy source\n", rs->data.name);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* remove a source from our list of sources
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rnd_detach_source(rs)
|
|
|
|
rndsource_element_t *rs;
|
|
|
|
{
|
1999-02-28 20:18:42 +03:00
|
|
|
rnd_sample_t *sample;
|
|
|
|
rndsource_t *source;
|
|
|
|
int s;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
|
|
|
s = splhigh();
|
|
|
|
|
|
|
|
LIST_REMOVE(rs, list);
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
source = &rs->data;
|
|
|
|
|
|
|
|
if (source->state) {
|
|
|
|
rnd_sample_free(source->state);
|
|
|
|
source->state = NULL;
|
|
|
|
}
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
1999-02-28 20:18:42 +03:00
|
|
|
* If there are samples queued up "remove" them from the sample queue
|
|
|
|
* by setting the source to the no-collect pseudosource.
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
sample = SIMPLEQ_FIRST(&rnd_samples);
|
|
|
|
while (sample != NULL) {
|
|
|
|
if (sample->source == source)
|
|
|
|
sample->source = &rnd_source_no_collect;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
sample = SIMPLEQ_NEXT(sample, next);
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
/*
|
|
|
|
* Add a value to the entropy pool. If rs is NULL no entropy estimation
|
|
|
|
* will be performed, otherwise it should point to the source-specific
|
|
|
|
* source structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rnd_add_uint32(rs, val)
|
|
|
|
rndsource_element_t *rs;
|
|
|
|
u_int32_t val;
|
|
|
|
{
|
1997-10-19 15:43:05 +04:00
|
|
|
rndsource_t *rst;
|
1997-10-10 03:13:12 +04:00
|
|
|
int s;
|
1999-02-28 20:18:42 +03:00
|
|
|
rnd_sample_t *state;
|
|
|
|
u_int32_t ts;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
/*
|
|
|
|
* If we are not collecting any data at all, just return.
|
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
if (rs == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rst = &rs->data;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
if (rst->flags & RND_FLAG_NO_COLLECT)
|
1997-10-10 03:13:12 +04:00
|
|
|
return;
|
1999-02-28 20:18:42 +03:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
/*
|
2000-06-06 05:33:15 +04:00
|
|
|
* Sample the counter as soon as possible to avoid
|
2000-06-06 03:42:34 +04:00
|
|
|
* entropy overestimation.
|
|
|
|
*/
|
2000-06-06 05:33:15 +04:00
|
|
|
ts = rnd_counter();
|
2000-06-06 03:42:34 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
/*
|
|
|
|
* If the sample buffer is NULL, try to allocate one here. If this
|
|
|
|
* fails, drop this sample.
|
|
|
|
*/
|
|
|
|
state = rst->state;
|
|
|
|
if (state == NULL) {
|
|
|
|
state = rnd_sample_allocate_isr(rst);
|
|
|
|
if (state == NULL)
|
|
|
|
return;
|
|
|
|
rst->state = state;
|
1997-10-10 03:13:12 +04:00
|
|
|
}
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
2000-06-06 03:42:34 +04:00
|
|
|
* If we are estimating entropy on this source,
|
1999-02-28 20:18:42 +03:00
|
|
|
* calculate differentials.
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
2000-06-06 03:42:34 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
|
|
|
|
state->entropy += rnd_estimate_entropy(rst, ts);
|
|
|
|
|
|
|
|
state->ts[state->cursor] = ts;
|
|
|
|
state->values[state->cursor] = val;
|
|
|
|
state->cursor++;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
1997-10-10 03:13:12 +04:00
|
|
|
/*
|
1999-02-28 20:18:42 +03:00
|
|
|
* If the state arrays are not full, we're done.
|
1997-10-10 03:13:12 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
if (state->cursor < RND_SAMPLE_COUNT)
|
1997-10-10 03:13:12 +04:00
|
|
|
return;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
2000-06-06 03:42:34 +04:00
|
|
|
* State arrays are full. Queue this chunk on the processing queue.
|
1997-10-19 15:43:05 +04:00
|
|
|
*/
|
1999-02-28 20:18:42 +03:00
|
|
|
s = splhigh();
|
|
|
|
SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
|
|
|
|
rst->state = NULL;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
/*
|
|
|
|
* If the timeout isn't pending, have it run in the near future.
|
|
|
|
*/
|
1997-10-20 22:43:48 +04:00
|
|
|
if (rnd_timeout_pending == 0) {
|
|
|
|
rnd_timeout_pending = 1;
|
2000-03-23 10:01:25 +03:00
|
|
|
callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
splx(s);
|
1999-02-28 20:18:42 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* To get here we have to have queued the state up, and therefore
|
2000-06-06 03:42:34 +04:00
|
|
|
* we need a new state buffer. If we can, allocate one now;
|
|
|
|
* if we don't get it, it doesn't matter; we'll try again on
|
|
|
|
* the next random event.
|
1999-02-28 20:18:42 +03:00
|
|
|
*/
|
|
|
|
rst->state = rnd_sample_allocate_isr(rst);
|
1997-10-19 15:43:05 +04:00
|
|
|
}
|
|
|
|
|
1997-10-20 19:05:05 +04:00
|
|
|
/*
|
|
|
|
* timeout, run to process the events in the ring buffer. Only one of these
|
2000-06-06 03:42:34 +04:00
|
|
|
* can possibly be running at a time, run at splsoftclock().
|
1997-10-20 19:05:05 +04:00
|
|
|
*/
|
1997-10-19 15:43:05 +04:00
|
|
|
static void
|
|
|
|
rnd_timeout(arg)
|
|
|
|
void *arg;
|
|
|
|
{
|
1999-02-28 20:18:42 +03:00
|
|
|
rnd_sample_t *sample;
|
|
|
|
rndsource_t *source;
|
2000-06-06 03:42:34 +04:00
|
|
|
int s;
|
|
|
|
u_int32_t entropy;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
/*
|
|
|
|
* sample queue is protected at splhigh(); go there briefly to dequeue.
|
|
|
|
*/
|
|
|
|
s = splhigh();
|
1997-10-20 22:43:48 +04:00
|
|
|
rnd_timeout_pending = 0;
|
1997-10-19 15:43:05 +04:00
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
sample = SIMPLEQ_FIRST(&rnd_samples);
|
|
|
|
while (sample != NULL) {
|
|
|
|
SIMPLEQ_REMOVE_HEAD(&rnd_samples, sample, next);
|
2000-06-06 03:42:34 +04:00
|
|
|
splx(s);
|
|
|
|
|
1999-02-28 20:18:42 +03:00
|
|
|
source = sample->source;
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* We repeat this check here, since it is possible the source
|
|
|
|
* was disabled before we were called, but after the entry
|
|
|
|
* was queued.
|
|
|
|
*/
|
2000-06-06 03:42:34 +04:00
|
|
|
if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
|
|
|
|
rndpool_add_data(&rnd_pool, sample->values,
|
|
|
|
RND_SAMPLE_COUNT * 4, 0);
|
1999-02-28 20:18:42 +03:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
entropy = sample->entropy;
|
|
|
|
if (source->flags & RND_FLAG_NO_ESTIMATE)
|
|
|
|
entropy = 0;
|
1999-02-28 20:18:42 +03:00
|
|
|
|
|
|
|
rndpool_add_data(&rnd_pool, sample->ts,
|
|
|
|
RND_SAMPLE_COUNT * 4,
|
2000-06-06 03:42:34 +04:00
|
|
|
entropy);
|
1999-02-28 20:18:42 +03:00
|
|
|
|
2000-06-06 03:42:34 +04:00
|
|
|
source->total += sample->entropy;
|
|
|
|
}
|
1999-02-28 20:18:42 +03:00
|
|
|
|
|
|
|
rnd_sample_free(sample);
|
2000-06-06 03:42:34 +04:00
|
|
|
|
|
|
|
/* Go back to splhigh to dequeue the next one.. */
|
|
|
|
s = splhigh();
|
1999-02-28 20:18:42 +03:00
|
|
|
sample = SIMPLEQ_FIRST(&rnd_samples);
|
1997-10-10 03:13:12 +04:00
|
|
|
}
|
2000-06-06 03:42:34 +04:00
|
|
|
splx(s);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
/*
|
|
|
|
* wake up any potential readers waiting.
|
|
|
|
*/
|
|
|
|
rnd_wakeup_readers();
|
1997-10-10 03:13:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rnd_extract_data(p, len, flags)
|
|
|
|
void *p;
|
|
|
|
u_int32_t len;
|
|
|
|
u_int32_t flags;
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int retval;
|
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
s = splsoftclock();
|
1997-10-10 03:13:12 +04:00
|
|
|
|
1997-10-19 15:43:05 +04:00
|
|
|
retval = rndpool_extract_data(&rnd_pool, p, len, flags);
|
1997-10-10 03:13:12 +04:00
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|