Commit d1d9cfc3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull /dev/random changes from Ted Ts'o:
 "A number of cleanups plus support for the RDSEED instruction, which
  will be showing up in Intel Broadwell CPU's"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
  random: Add arch_has_random[_seed]()
  random: If we have arch_get_random_seed*(), try it before blocking
  random: Use arch_get_random_seed*() at init time and once a second
  x86, random: Enable the RDSEED instruction
  random: use the architectural HWRNG for the SHA's IV in extract_buf()
  random: clarify bits/bytes in wakeup thresholds
  random: entropy_bytes is actually bits
  random: simplify accounting code
  random: tighten bound on random_read_wakeup_thresh
  random: forget lock in lockless accounting
  random: simplify accounting logic
  random: fix comment on "account"
  random: simplify loop in random_read
  random: fix description of get_random_bytes
  random: fix comment on proc_do_uuid
  random: fix typos / spelling errors in comments
parents cda540ac 7b878d4b
......@@ -25,8 +25,26 @@ static inline int arch_get_random_int(unsigned int *v)
return rc;
}
static inline int arch_has_random(void)
{
return !!ppc_md.get_random_long;
}
int powernv_get_random_long(unsigned long *v);
static inline int arch_get_random_seed_long(unsigned long *v)
{
return 0;
}
static inline int arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
static inline int arch_has_random_seed(void)
{
return 0;
}
#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_POWERPC_ARCHRANDOM_H */
/*
* This file is part of the Linux kernel.
*
* Copyright (c) 2011, Intel Corporation
* Copyright (c) 2011-2014, Intel Corporation
* Authors: Fenghua Yu <fenghua.yu@intel.com>,
* H. Peter Anvin <hpa@linux.intel.com>
*
......@@ -31,10 +31,13 @@
#define RDRAND_RETRY_LOOPS 10
#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
#define RDSEED_INT ".byte 0x0f,0xc7,0xf8"
#ifdef CONFIG_X86_64
# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
# define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8"
#else
# define RDRAND_LONG RDRAND_INT
# define RDSEED_LONG RDSEED_INT
#endif
#ifdef CONFIG_ARCH_RANDOM
......@@ -53,6 +56,16 @@ static inline int rdrand_long(unsigned long *v)
return ok;
}
/* A single attempt at RDSEED */
static inline bool rdseed_long(unsigned long *v)
{
unsigned char ok;
asm volatile(RDSEED_LONG "\n\t"
"setc %0"
: "=qm" (ok), "=a" (*v));
return ok;
}
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
......@@ -70,18 +83,40 @@ static inline int name(type *v) \
return ok; \
}
#define GET_SEED(name, type, rdseed, nop) \
static inline int name(type *v) \
{ \
unsigned char ok; \
alternative_io("movb $0, %0\n\t" \
nop, \
rdseed "\n\t" \
"setc %0", \
X86_FEATURE_RDSEED, \
ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
return ok; \
}
#ifdef CONFIG_X86_64
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
#else
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
#endif /* CONFIG_X86_64 */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
#else
static inline int rdrand_long(unsigned long *v)
......@@ -89,6 +124,11 @@ static inline int rdrand_long(unsigned long *v)
return 0;
}
static inline bool rdseed_long(unsigned long *v)
{
return 0;
}
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
......
......@@ -295,17 +295,17 @@
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
*/
static int random_read_wakeup_thresh = 64;
static int random_read_wakeup_bits = 64;
/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
/*
* The minimum number of seconds between urandom pool resending. We
* The minimum number of seconds between urandom pool reseeding. We
* do this to limit the amount of entropy that can be drained from the
* input pool even if there are heavy demands on /dev/urandom.
*/
......@@ -322,7 +322,7 @@ static int random_min_urandom_seed = 60;
* Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
* generators. ACM Transactions on Modeling and Computer Simulation
* 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
* GFSR generators II. ACM Transactions on Mdeling and Computer
* GFSR generators II. ACM Transactions on Modeling and Computer
* Simulation 4:254-266)
*
* Thanks to Colin Plumb for suggesting this.
......@@ -666,10 +666,10 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
r->entropy_total, _RET_IP_);
if (r == &input_pool) {
int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
/* should we wake readers? */
if (entropy_bytes >= random_read_wakeup_thresh) {
if (entropy_bits >= random_read_wakeup_bits) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
......@@ -678,9 +678,9 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
* forth between them, until the output pools are 75%
* full.
*/
if (entropy_bytes > random_write_wakeup_thresh &&
if (entropy_bits > random_write_wakeup_bits &&
r->initialized &&
r->entropy_total >= 2*random_read_wakeup_thresh) {
r->entropy_total >= 2*random_read_wakeup_bits) {
static struct entropy_store *last = &blocking_pool;
struct entropy_store *other = &blocking_pool;
......@@ -844,6 +844,8 @@ void add_interrupt_randomness(int irq, int irq_flags)
cycles_t cycles = random_get_entropy();
__u32 input[4], c_high, j_high;
__u64 ip;
unsigned long seed;
int credit;
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
j_high = (sizeof(now) > 4) ? now >> 32 : 0;
......@@ -862,20 +864,33 @@ void add_interrupt_randomness(int irq, int irq_flags)
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
/*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
* any entropy.
* any entropy, otherwise credit 1 bit.
*/
credit = 1;
if (cycles == 0) {
if (irq_flags & __IRQF_TIMER) {
if (fast_pool->last_timer_intr)
return;
credit = 0;
fast_pool->last_timer_intr = 1;
} else
fast_pool->last_timer_intr = 0;
}
credit_entropy_bits(r, 1);
/*
* If we have architectural seed generator, produce a seed and
* add it to the pool. For the sake of paranoia count it as
* 50% entropic.
*/
if (arch_get_random_seed_long(&seed)) {
__mix_pool_bytes(r, &seed, sizeof(seed), NULL);
credit += sizeof(seed) * 4;
}
credit_entropy_bits(r, credit);
}
#ifdef CONFIG_BLOCK
......@@ -924,19 +939,19 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
__u32 tmp[OUTPUT_POOL_WORDS];
/* For /dev/random's pool, always leave two wakeup worth's BITS */
int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
/* For /dev/random's pool, always leave two wakeups' worth */
int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
int bytes = nbytes;
/* pull at least as many as BYTES as wakeup BITS */
bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
/* pull at least as much as a wakeup */
bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
/* but never more than the buffer size */
bytes = min_t(int, bytes, sizeof(tmp));
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd);
random_read_wakeup_bits / 8, rsvd_bytes);
mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
......@@ -952,35 +967,22 @@ static void push_to_pool(struct work_struct *work)
struct entropy_store *r = container_of(work, struct entropy_store,
push_work);
BUG_ON(!r);
_xfer_secondary_pool(r, random_read_wakeup_thresh/8);
_xfer_secondary_pool(r, random_read_wakeup_bits/8);
trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
r->pull->entropy_count >> ENTROPY_SHIFT);
}
/*
* These functions extracts randomness from the "entropy pool", and
* returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
* failing to avoid races that defeat catastrophic reseeding while the
* reserved parameter indicates how much entropy we must leave in the
* pool after each pull to avoid starving other readers.
*
* Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
*/
static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
unsigned long flags;
int wakeup_write = 0;
int have_bytes;
int entropy_count, orig;
size_t ibytes;
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, flags);
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
/* Can we pull enough? */
......@@ -988,29 +990,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
ibytes = nbytes;
if (have_bytes < min + reserved) {
/* If limited, never pull more than available */
if (r->limit)
ibytes = min_t(size_t, ibytes, have_bytes - reserved);
if (ibytes < min)
ibytes = 0;
} else {
/* If limited, never pull more than available */
if (r->limit && ibytes + reserved >= have_bytes)
ibytes = have_bytes - reserved;
if (have_bytes >= ibytes + reserved)
entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
else
entropy_count = reserved << (ENTROPY_SHIFT + 3);
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
if ((r->entropy_count >> ENTROPY_SHIFT)
< random_write_wakeup_thresh)
wakeup_write = 1;
}
spin_unlock_irqrestore(&r->lock, flags);
entropy_count = max_t(int, 0,
entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
trace_debit_entropy(r->name, 8 * ibytes);
if (wakeup_write) {
if (ibytes &&
(r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
......@@ -1018,6 +1010,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
return ibytes;
}
/*
* This function does the actual extraction for extract_entropy and
* extract_entropy_user.
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
......@@ -1029,23 +1027,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u8 extract[64];
unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */
sha_init(hash.w);
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* If we have a architectural hardware random number
* generator, mix that in, too.
* If we have an architectural hardware random number
* generator, use it for SHA's initial vector
*/
sha_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
break;
hash.l[i] ^= v;
hash.l[i] = v;
}
/* Generate a hash across the pool, 16 words (512 bits) at a time */
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
......@@ -1079,6 +1077,15 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memset(&hash, 0, sizeof(hash));
}
/*
* This function extracts randomness from the "entropy pool", and
* returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
* failing to avoid races that defeat catastrophic reseeding while the
* reserved parameter indicates how much entropy we must leave in the
* pool after each pull to avoid starving other readers.
*/
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
......@@ -1129,6 +1136,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
return ret;
}
/*
* This function extracts randomness from the "entropy pool", and
* returns it in a userspace buffer.
*/
static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
size_t nbytes)
{
......@@ -1170,8 +1181,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
* TCP sequence numbers, etc. It does not use the hw random number
* generator, if available; use get_random_bytes_arch() for that.
* TCP sequence numbers, etc. It does not rely on the hardware random
* number generator. For random bytes direct from the hardware RNG
* (when available), use get_random_bytes_arch().
*/
void get_random_bytes(void *buf, int nbytes)
{
......@@ -1238,7 +1250,8 @@ static void init_std_data(struct entropy_store *r)
r->last_pulled = jiffies;
mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
......@@ -1281,56 +1294,71 @@ void rand_initialize_disk(struct gendisk *disk)
}
#endif
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
/*
* Attempt an emergency refill using arch_get_random_seed_long().
*
* As with add_interrupt_randomness() be paranoid and only
* credit the output as 50% entropic.
*/
static int arch_random_refill(void)
{
ssize_t n, retval = 0, count = 0;
const unsigned int nlongs = 64; /* Arbitrary number */
unsigned int n = 0;
unsigned int i;
unsigned long buf[nlongs];
if (nbytes == 0)
if (!arch_has_random_seed())
return 0;
while (nbytes > 0) {
n = nbytes;
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
for (i = 0; i < nlongs; i++) {
if (arch_get_random_seed_long(&buf[n]))
n++;
}
n = extract_entropy_user(&blocking_pool, buf, n);
if (n) {
unsigned int rand_bytes = n * sizeof(unsigned long);
if (n < 0) {
retval = n;
break;
}
mix_pool_bytes(&input_pool, buf, rand_bytes, NULL);
credit_entropy_bits(&input_pool, rand_bytes*4);
}
return n;
}
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
ssize_t n;
if (nbytes == 0)
return 0;
nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
while (1) {
n = extract_entropy_user(&blocking_pool, buf, nbytes);
if (n < 0)
return n;
trace_random_read(n*8, (nbytes-n)*8,
ENTROPY_BITS(&blocking_pool),
ENTROPY_BITS(&input_pool));
if (n > 0)
return n;
if (n == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_thresh);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
/* Pool is (near) empty. Maybe wait and retry. */
/* First try an emergency refill */
if (arch_random_refill())
continue;
}
count += n;
buf += n;
nbytes -= n;
break; /* This break makes the device work */
/* like a named pipe */
}
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
return (count ? count : retval);
wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_bits);
if (signal_pending(current))
return -ERESTARTSYS;
}
}
static ssize_t
......@@ -1358,9 +1386,9 @@ random_poll(struct file *file, poll_table * wait)
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
mask |= POLLIN | POLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
......@@ -1507,18 +1535,18 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
static int max_read_thresh = INPUT_POOL_WORDS * 32;
static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
/*
* These functions is used to return both the bootid UUID, and random
* This function is used to return both the bootid UUID, and random
* UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
*
* If the user accesses this via the proc interface, it will be returned
* as an ASCII string in the standard UUID format. If accesses via the
* sysctl system call, it is returned as 16 bytes of binary data.
* If the user accesses this via the proc interface, the UUID will be
* returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
......@@ -1583,7 +1611,7 @@ struct ctl_table random_table[] = {
},
{
.procname = "read_wakeup_threshold",
.data = &random_read_wakeup_thresh,
.data = &random_read_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
......@@ -1592,7 +1620,7 @@ struct ctl_table random_table[] = {
},
{
.procname = "write_wakeup_threshold",
.data = &random_write_wakeup_thresh,
.data = &random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
......
......@@ -88,6 +88,22 @@ static inline int arch_get_random_int(unsigned int *v)
{
return 0;
}
static inline int arch_has_random(void)
{
return 0;
}
static inline int arch_get_random_seed_long(unsigned long *v)
{
return 0;
}
static inline int arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
static inline int arch_has_random_seed(void)
{
return 0;
}
#endif
/* Pseudo random number generator from numerical recipes. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment