Guest User

Untitled

a guest
May 21st, 2018
236
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 40.90 KB | None | 0 0
  1. /*
  2.  * random.c -- A strong random number generator
  3.  *
  4.  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
  5.  *
  6.  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
  7.  * rights reserved.
  8.  *
  9.  * Redistribution and use in source and binary forms, with or without
  10.  * modification, are permitted provided that the following conditions
  11.  * are met:
  12.  * 1. Redistributions of source code must retain the above copyright
  13.  *    notice, and the entire permission notice in its entirety,
  14.  *    including the disclaimer of warranties.
  15.  * 2. Redistributions in binary form must reproduce the above copyright
  16.  *    notice, this list of conditions and the following disclaimer in the
  17.  *    documentation and/or other materials provided with the distribution.
  18.  * 3. The name of the author may not be used to endorse or promote
  19.  *    products derived from this software without specific prior
  20.  *    written permission.
  21.  *
  22.  * ALTERNATIVELY, this product may be distributed under the terms of
  23.  * the GNU General Public License, in which case the provisions of the GPL are
  24.  * required INSTEAD OF the above restrictions.  (This clause is
  25.  * necessary due to a potential bad interaction between the GPL and
  26.  * the restrictions contained in a BSD-style copyright.)
  27.  *
  28.  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  29.  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  30.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
  31.  * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
  32.  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  33.  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  34.  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  35.  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  36.  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  38.  * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
  39.  * DAMAGE.
  40.  */
  41.  
  42. /*
  43.  * (now, with legal B.S. out of the way.....)
  44.  *
  45.  * This routine gathers environmental noise from device drivers, etc.,
  46.  * and returns good random numbers, suitable for cryptographic use.
  47.  * Besides the obvious cryptographic uses, these numbers are also good
  48.  * for seeding TCP sequence numbers, and other places where it is
  49.  * desirable to have numbers which are not only random, but hard to
  50.  * predict by an attacker.
  51.  *
  52.  * Theory of operation
  53.  * ===================
  54.  *
  55.  * Computers are very predictable devices.  Hence it is extremely hard
  56.  * to produce truly random numbers on a computer --- as opposed to
  57.  * pseudo-random numbers, which can easily generated by using a
  58.  * algorithm.  Unfortunately, it is very easy for attackers to guess
  59.  * the sequence of pseudo-random number generators, and for some
  60.  * applications this is not acceptable.  So instead, we must try to
  61.  * gather "environmental noise" from the computer's environment, which
  62.  * must be hard for outside attackers to observe, and use that to
  63.  * generate random numbers.  In a Unix environment, this is best done
  64.  * from inside the kernel.
  65.  *
  66.  * Sources of randomness from the environment include inter-keyboard
  67.  * timings, inter-interrupt timings from some interrupts, and other
  68.  * events which are both (a) non-deterministic and (b) hard for an
  69.  * outside observer to measure.  Randomness from these sources are
  70.  * added to an "entropy pool", which is mixed using a CRC-like function.
  71.  * This is not cryptographically strong, but it is adequate assuming
  72.  * the randomness is not chosen maliciously, and it is fast enough that
  73.  * the overhead of doing it on every interrupt is very reasonable.
  74.  * As random bytes are mixed into the entropy pool, the routines keep
  75.  * an *estimate* of how many bits of randomness have been stored into
  76.  * the random number generator's internal state.
  77.  *
  78.  * When random bytes are desired, they are obtained by taking the SHA
  79.  * hash of the contents of the "entropy pool".  The SHA hash avoids
  80.  * exposing the internal state of the entropy pool.  It is believed to
  81.  * be computationally infeasible to derive any useful information
  82.  * about the input of SHA from its output.  Even if it is possible to
  83.  * analyze SHA in some clever way, as long as the amount of data
  84.  * returned from the generator is less than the inherent entropy in
  85.  * the pool, the output data is totally unpredictable.  For this
  86.  * reason, the routine decreases its internal estimate of how many
  87.  * bits of "true randomness" are contained in the entropy pool as it
  88.  * outputs random numbers.
  89.  *
  90.  * If this estimate goes to zero, the routine can still generate
  91.  * random numbers; however, an attacker may (at least in theory) be
  92.  * able to infer the future output of the generator from prior
  93.  * outputs.  This requires successful cryptanalysis of SHA, which is
  94.  * not believed to be feasible, but there is a remote possibility.
  95.  * Nonetheless, these numbers should be useful for the vast majority
  96.  * of purposes.
  97.  *
  98.  * Exported interfaces ---- output
  99.  * ===============================
  100.  *
  101.  * There are three exported interfaces; the first is one designed to
  102.  * be used from within the kernel:
  103.  *
  104.  *  void get_random_bytes(void *buf, int nbytes);
  105.  *
  106.  * This interface will return the requested number of random bytes,
  107.  * and place it in the requested buffer.
  108.  *
  109.  * The two other interfaces are two character devices /dev/random and
  110.  * /dev/urandom.  /dev/random is suitable for use when very high
  111.  * quality randomness is desired (for example, for key generation or
  112.  * one-time pads), as it will only return a maximum of the number of
  113.  * bits of randomness (as estimated by the random number generator)
  114.  * contained in the entropy pool.
  115.  *
  116.  * The /dev/urandom device does not have this limit, and will return
  117.  * as many bytes as are requested.  As more and more random bytes are
  118.  * requested without giving time for the entropy pool to recharge,
  119.  * this will result in random numbers that are merely cryptographically
  120.  * strong.  For many applications, however, this is acceptable.
  121.  *
  122.  * Exported interfaces ---- input
  123.  * ==============================
  124.  *
  125.  * The current exported interfaces for gathering environmental noise
  126.  * from the devices are:
  127.  *
  128.  *  void add_input_randomness(unsigned int type, unsigned int code,
  129.  *                                unsigned int value);
  130.  *  void add_interrupt_randomness(int irq);
  131.  *  void add_disk_randomness(struct gendisk *disk);
  132.  *
  133.  * add_input_randomness() uses the input layer interrupt timing, as well as
  134.  * the event type information from the hardware.
  135.  *
  136.  * add_interrupt_randomness() uses the inter-interrupt timing as random
  137.  * inputs to the entropy pool.  Note that not all interrupts are good
  138.  * sources of randomness!  For example, the timer interrupts is not a
  139.  * good choice, because the periodicity of the interrupts is too
  140.  * regular, and hence predictable to an attacker.  Network Interface
  141.  * Controller interrupts are a better measure, since the timing of the
  142.  * NIC interrupts are more unpredictable.
  143.  *
  144.  * add_disk_randomness() uses what amounts to the seek time of block
  145.  * layer request events, on a per-disk_devt basis, as input to the
  146.  * entropy pool. Note that high-speed solid state drives with very low
  147.  * seek times do not make for good sources of entropy, as their seek
  148.  * times are usually fairly consistent.
  149.  *
  150.  * All of these routines try to estimate how many bits of randomness a
  151.  * particular randomness source.  They do this by keeping track of the
  152.  * first and second order deltas of the event timings.
  153.  *
  154.  * Ensuring unpredictability at system startup
  155.  * ============================================
  156.  *
  157.  * When any operating system starts up, it will go through a sequence
  158.  * of actions that are fairly predictable by an adversary, especially
  159.  * if the start-up does not involve interaction with a human operator.
  160.  * This reduces the actual number of bits of unpredictability in the
  161.  * entropy pool below the value in entropy_count.  In order to
  162.  * counteract this effect, it helps to carry information in the
  163.  * entropy pool across shut-downs and start-ups.  To do this, put the
  164.  * following lines an appropriate script which is run during the boot
  165.  * sequence:
  166.  *
  167.  *  echo "Initializing random number generator..."
  168.  *  random_seed=/var/run/random-seed
  169.  *  # Carry a random seed from start-up to start-up
  170.  *  # Load and then save the whole entropy pool
  171.  *  if [ -f $random_seed ]; then
  172.  *      cat $random_seed >/dev/urandom
  173.  *  else
  174.  *      touch $random_seed
  175.  *  fi
  176.  *  chmod 600 $random_seed
  177.  *  dd if=/dev/urandom of=$random_seed count=1 bs=512
  178.  *
  179.  * and the following lines in an appropriate script which is run as
  180.  * the system is shutdown:
  181.  *
  182.  *  # Carry a random seed from shut-down to start-up
  183.  *  # Save the whole entropy pool
  184.  *  echo "Saving random seed..."
  185.  *  random_seed=/var/run/random-seed
  186.  *  touch $random_seed
  187.  *  chmod 600 $random_seed
  188.  *  dd if=/dev/urandom of=$random_seed count=1 bs=512
  189.  *
  190.  * For example, on most modern systems using the System V init
  191.  * scripts, such code fragments would be found in
  192.  * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
  193.  * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
  194.  *
  195.  * Effectively, these commands cause the contents of the entropy pool
  196.  * to be saved at shut-down time and reloaded into the entropy pool at
  197.  * start-up.  (The 'dd' in the addition to the bootup script is to
  198.  * make sure that /etc/random-seed is different for every start-up,
  199.  * even if the system crashes without executing rc.0.)  Even with
  200.  * complete knowledge of the start-up activities, predicting the state
  201.  * of the entropy pool requires knowledge of the previous history of
  202.  * the system.
  203.  *
  204.  * Configuring the /dev/random driver under Linux
  205.  * ==============================================
  206.  *
  207.  * The /dev/random driver under Linux uses minor numbers 8 and 9 of
  208.  * the /dev/mem major number (#1).  So if your system does not have
  209.  * /dev/random and /dev/urandom created already, they can be created
  210.  * by using the commands:
  211.  *
  212.  *  mknod /dev/random c 1 8
  213.  *  mknod /dev/urandom c 1 9
  214.  *
  215.  * Acknowledgements:
  216.  * =================
  217.  *
  218.  * Ideas for constructing this random number generator were derived
  219.  * from Pretty Good Privacy's random number generator, and from private
  220.  * discussions with Phil Karn.  Colin Plumb provided a faster random
  221.  * number generator, which speed up the mixing function of the entropy
  222.  * pool, taken from PGPfone.  Dale Worley has also contributed many
  223.  * useful ideas and suggestions to improve this driver.
  224.  *
  225.  * Any flaws in the design are solely my responsibility, and should
  226.  * not be attributed to the Phil, Colin, or any of authors of PGP.
  227.  *
  228.  * Further background information on this topic may be obtained from
  229.  * RFC 1750, "Randomness Recommendations for Security", by Donald
  230.  * Eastlake, Steve Crocker, and Jeff Schiller.
  231.  */
  232.  
  233. #include <linux/utsname.h>
  234. #include <linux/module.h>
  235. #include <linux/kernel.h>
  236. #include <linux/major.h>
  237. #include <linux/string.h>
  238. #include <linux/fcntl.h>
  239. #include <linux/slab.h>
  240. #include <linux/random.h>
  241. #include <linux/poll.h>
  242. #include <linux/init.h>
  243. #include <linux/fs.h>
  244. #include <linux/genhd.h>
  245. #include <linux/interrupt.h>
  246. #include <linux/mm.h>
  247. #include <linux/spinlock.h>
  248. #include <linux/percpu.h>
  249. #include <linux/cryptohash.h>
  250. #include <linux/fips.h>
  251.  
  252. #ifdef CONFIG_GENERIC_HARDIRQS
  253. # include <linux/irq.h>
  254. #endif
  255.  
  256. #include <asm/processor.h>
  257. #include <asm/uaccess.h>
  258. #include <asm/irq.h>
  259. #include <asm/io.h>
  260.  
  261. /*
  262.  * Configuration information
  263.  */
  264. #define INPUT_POOL_WORDS 128
  265. #define OUTPUT_POOL_WORDS 32
  266. #define SEC_XFER_SIZE 512
  267. #define EXTRACT_SIZE 10
  268.  
  269. /*
  270.  * The minimum number of bits of entropy before we wake up a read on
  271.  * /dev/random.  Should be enough to do a significant reseed.
  272.  */
  273. static int random_read_wakeup_thresh = 64;
  274.  
  275. /*
  276.  * If the entropy count falls under this number of bits, then we
  277.  * should wake up processes which are selecting or polling on write
  278.  * access to /dev/random.
  279.  */
  280. static int random_write_wakeup_thresh = 128;
  281.  
  282. /*
  283.  * When the input pool goes over trickle_thresh, start dropping most
  284.  * samples to avoid wasting CPU time and reduce lock contention.
  285.  */
  286.  
  287. static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
  288.  
  289. static DEFINE_PER_CPU(int, trickle_count);
  290.  
  291. /*
  292.  * A pool of size .poolwords is stirred with a primitive polynomial
  293.  * of degree .poolwords over GF(2).  The taps for various sizes are
  294.  * defined below.  They are chosen to be evenly spaced (minimum RMS
  295.  * distance from evenly spaced; the numbers in the comments are a
  296.  * scaled squared error sum) except for the last tap, which is 1 to
  297.  * get the twisting happening as fast as possible.
  298.  */
  299. static struct poolinfo {
  300.     int poolwords;
  301.     int tap1, tap2, tap3, tap4, tap5;
  302. } poolinfo_table[] = {
  303.     /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
  304.     { 128103,    76, 51, 25, 1 },
  305.     /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
  306.     { 32,   26, 20, 14, 71 },
  307. #if 0
  308.     /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
  309.     { 2048, 1638,   1231,   819,    411,    1 },
  310.  
  311.     /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
  312.     { 1024, 817,    615,    412,    204,    1 },
  313.  
  314.     /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
  315.     { 1024, 819,    616,    410,    207,    2 },
  316.  
  317.     /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
  318.     { 512411,    308,    208,    104,    1 },
  319.  
  320.     /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
  321.     { 512409,    307,    206,    102,    2 },
  322.     /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
  323.     { 512409,    309,    205,    103,    2 },
  324.  
  325.     /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
  326.     { 256205,    155,    101,    52, 1 },
  327.  
  328.     /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
  329.     { 128103,    78, 51, 27, 2 },
  330.  
  331.     /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
  332.     { 64,   52, 39, 26, 14, 1 },
  333. #endif
  334. };
  335.  
  336. #define POOLBITS    poolwords*32
  337. #define POOLBYTES   poolwords*4
  338.  
  339. /*
  340.  * For the purposes of better mixing, we use the CRC-32 polynomial as
  341.  * well to make a twisted Generalized Feedback Shift Reigster
  342.  *
  343.  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
  344.  * Transactions on Modeling and Computer Simulation 2(3):179-194.
  345.  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
  346.  * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
  347.  *
  348.  * Thanks to Colin Plumb for suggesting this.
  349.  *
  350.  * We have not analyzed the resultant polynomial to prove it primitive;
  351.  * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
  352.  * of a random large-degree polynomial over GF(2) are more than large enough
  353.  * that periodicity is not a concern.
  354.  *
  355.  * The input hash is much less sensitive than the output hash.  All
  356.  * that we want of it is that it be a good non-cryptographic hash;
  357.  * i.e. it not produce collisions when fed "random" data of the sort
  358.  * we expect to see.  As long as the pool state differs for different
  359.  * inputs, we have preserved the input entropy and done a good job.
  360.  * The fact that an intelligent attacker can construct inputs that
  361.  * will produce controlled alterations to the pool's state is not
  362.  * important because we don't consider such inputs to contribute any
  363.  * randomness.  The only property we need with respect to them is that
  364.  * the attacker can't increase his/her knowledge of the pool's state.
  365.  * Since all additions are reversible (knowing the final state and the
  366.  * input, you can reconstruct the initial state), if an attacker has
  367.  * any uncertainty about the initial state, he/she can only shuffle
  368.  * that uncertainty about, but never cause any collisions (which would
  369.  * decrease the uncertainty).
  370.  *
  371.  * The chosen system lets the state of the pool be (essentially) the input
  372.  * modulo the generator polymnomial.  Now, for random primitive polynomials,
  373.  * this is a universal class of hash functions, meaning that the chance
  374.  * of a collision is limited by the attacker's knowledge of the generator
  375.  * polynomail, so if it is chosen at random, an attacker can never force
  376.  * a collision.  Here, we use a fixed polynomial, but we *can* assume that
  377.  * ###--> it is unknown to the processes generating the input entropy. <-###
  378.  * Because of this important property, this is a good, collision-resistant
  379.  * hash; hash collisions will occur no more often than chance.
  380.  */
  381.  
  382. /*
  383.  * Static global variables
  384.  */
  385. static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
  386. static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
  387. static struct fasync_struct *fasync;
  388.  
  389. #if 0
  390. static int debug;
  391. module_param(debug, bool, 0644);
  392. #define DEBUG_ENT(fmt, arg...) do { \
  393.     if (debug) \
  394.         printk(KERN_DEBUG "random %04d %04d %04d: " \
  395.         fmt,\
  396.         input_pool.entropy_count,\
  397.         blocking_pool.entropy_count,\
  398.         nonblocking_pool.entropy_count,\
  399.         ## arg); } while (0)
  400. #else
  401. #define DEBUG_ENT(fmt, arg...) do {} while (0)
  402. #endif
  403.  
  404. /**********************************************************************
  405.  *
  406.  * OS independent entropy store.   Here are the functions which handle
  407.  * storing entropy in an entropy pool.
  408.  *
  409.  **********************************************************************/
  410.  
  411. struct entropy_store;
  412. struct entropy_store {
  413.     /* read-only data: */
  414.     struct poolinfo *poolinfo;
  415.     __u32 *pool;
  416.     const char *name;
  417.     struct entropy_store *pull;
  418.     int limit;
  419.  
  420.     /* read-write data: */
  421.     spinlock_t lock;
  422.     unsigned add_ptr;
  423.     int entropy_count;
  424.     int input_rotate;
  425.     __u8 last_data[EXTRACT_SIZE];
  426. };
  427.  
  428. static __u32 input_pool_data[INPUT_POOL_WORDS];
  429. static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
  430. static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
  431.  
  432. static struct entropy_store input_pool = {
  433.     .poolinfo = &poolinfo_table[0],
  434.     .name = "input",
  435.     .limit = 1,
  436.     .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
  437.     .pool = input_pool_data
  438. };
  439.  
  440. static struct entropy_store blocking_pool = {
  441.     .poolinfo = &poolinfo_table[1],
  442.     .name = "blocking",
  443.     .limit = 1,
  444.     .pull = &input_pool,
  445.     .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
  446.     .pool = blocking_pool_data
  447. };
  448.  
  449. static struct entropy_store nonblocking_pool = {
  450.     .poolinfo = &poolinfo_table[1],
  451.     .name = "nonblocking",
  452.     .pull = &input_pool,
  453.     .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
  454.     .pool = nonblocking_pool_data
  455. };
  456.  
  457. /*
  458.  * This function adds bytes into the entropy "pool".  It does not
  459.  * update the entropy estimate.  The caller should call
  460.  * credit_entropy_bits if this is appropriate.
  461.  *
  462.  * The pool is stirred with a primitive polynomial of the appropriate
  463.  * degree, and then twisted.  We twist by three bits at a time because
  464.  * it's cheap to do so and helps slightly in the expected case where
  465.  * the entropy is concentrated in the low-order bits.
  466.  */
  467. static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
  468.                    int nbytes, __u8 out[64])
  469. {
  470.     static __u32 const twist_table[8] = {
  471.         0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
  472.         0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
  473.     unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
  474.     int input_rotate;
  475.     int wordmask = r->poolinfo->poolwords - 1;
  476.     const char *bytes = in;
  477.     __u32 w;
  478.     unsigned long flags;
  479.  
  480.     /* Taps are constant, so we can load them without holding r->lock.  */
  481.     tap1 = r->poolinfo->tap1;
  482.     tap2 = r->poolinfo->tap2;
  483.     tap3 = r->poolinfo->tap3;
  484.     tap4 = r->poolinfo->tap4;
  485.     tap5 = r->poolinfo->tap5;
  486.  
  487.     spin_lock_irqsave(&r->lock, flags);
  488.     input_rotate = r->input_rotate;
  489.     i = r->add_ptr;
  490.  
  491.     /* mix one byte at a time to simplify size handling and churn faster */
  492.     while (nbytes--) {
  493.         w = rol32(*bytes++, input_rotate & 31);
  494.         i = (i - 1) & wordmask;
  495.  
  496.         /* XOR in the various taps */
  497.         w ^= r->pool[i];
  498.         w ^= r->pool[(i + tap1) & wordmask];
  499.         w ^= r->pool[(i + tap2) & wordmask];
  500.         w ^= r->pool[(i + tap3) & wordmask];
  501.         w ^= r->pool[(i + tap4) & wordmask];
  502.         w ^= r->pool[(i + tap5) & wordmask];
  503.  
  504.         /* Mix the result back in with a twist */
  505.         r->pool[i] = (w >> 3) ^ twist_table[w & 7];
  506.  
  507.         /*
  508.          * Normally, we add 7 bits of rotation to the pool.
  509.          * At the beginning of the pool, add an extra 7 bits
  510.          * rotation, so that successive passes spread the
  511.          * input bits across the pool evenly.
  512.          */
  513.         input_rotate += i ? 7 : 14;
  514.     }
  515.  
  516.     r->input_rotate = input_rotate;
  517.     r->add_ptr = i;
  518.  
  519.     if (out)
  520.         for (j = 0; j < 16; j++)
  521.             ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
  522.  
  523.     spin_unlock_irqrestore(&r->lock, flags);
  524. }
  525.  
  526. static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
  527. {
  528.        mix_pool_bytes_extract(r, in, bytes, NULL);
  529. }
  530.  
  531. /*
  532.  * Credit (or debit) the entropy store with n bits of entropy
  533.  */
  534. static void credit_entropy_bits(struct entropy_store *r, int nbits)
  535. {
  536.     unsigned long flags;
  537.     int entropy_count;
  538.  
  539.     if (!nbits)
  540.         return;
  541.  
  542.     spin_lock_irqsave(&r->lock, flags);
  543.  
  544.     DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
  545.     entropy_count = r->entropy_count;
  546.     entropy_count += nbits;
  547.     if (entropy_count < 0) {
  548.         DEBUG_ENT("negative entropy/overflow\n");
  549.         entropy_count = 0;
  550.     } else if (entropy_count > r->poolinfo->POOLBITS)
  551.         entropy_count = r->poolinfo->POOLBITS;
  552.     r->entropy_count = entropy_count;
  553.  
  554.     /* should we wake readers? */
  555.     if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
  556.         wake_up_interruptible(&random_read_wait);
  557.         kill_fasync(&fasync, SIGIO, POLL_IN);
  558.     }
  559.     spin_unlock_irqrestore(&r->lock, flags);
  560. }
  561.  
  562. /*********************************************************************
  563.  *
  564.  * Entropy input management
  565.  *
  566.  *********************************************************************/
  567.  
  568. /* There is one of these per entropy source */
  569. struct timer_rand_state {
  570.     cycles_t last_time;
  571.     long last_delta, last_delta2;
  572.     unsigned dont_count_entropy:1;
  573. };
  574.  
  575. #ifndef CONFIG_GENERIC_HARDIRQS
  576.  
  577. static struct timer_rand_state *irq_timer_state[NR_IRQS];
  578.  
  579. static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
  580. {
  581.     return irq_timer_state[irq];
  582. }
  583.  
  584. static void set_timer_rand_state(unsigned int irq,
  585.                  struct timer_rand_state *state)
  586. {
  587.     irq_timer_state[irq] = state;
  588. }
  589.  
  590. #else
  591.  
  592. static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
  593. {
  594.     struct irq_desc *desc;
  595.  
  596.     desc = irq_to_desc(irq);
  597.  
  598.     return desc->timer_rand_state;
  599. }
  600.  
  601. static void set_timer_rand_state(unsigned int irq,
  602.                  struct timer_rand_state *state)
  603. {
  604.     struct irq_desc *desc;
  605.  
  606.     desc = irq_to_desc(irq);
  607.  
  608.     desc->timer_rand_state = state;
  609. }
  610. #endif
  611.  
  612. static struct timer_rand_state input_timer_state;
  613.  
  614. /*
  615.  * This function adds entropy to the entropy "pool" by using timing
  616.  * delays.  It uses the timer_rand_state structure to make an estimate
  617.  * of how many bits of entropy this call has added to the pool.
  618.  *
  619.  * The number "num" is also added to the pool - it should somehow describe
  620.  * the type of event which just happened.  This is currently 0-255 for
  621.  * keyboard scan codes, and 256 upwards for interrupts.
  622.  *
  623.  */
  624. static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
  625. {
  626.     struct {
  627.         cycles_t cycles;
  628.         long jiffies;
  629.         unsigned num;
  630.     } sample;
  631.     long delta, delta2, delta3;
  632.  
  633.     preempt_disable();
  634.     /* if over the trickle threshold, use only 1 in 4096 samples */
  635.     if (input_pool.entropy_count > trickle_thresh &&
  636.         ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
  637.         goto out;
  638.  
  639.     sample.jiffies = jiffies;
  640.     sample.cycles = get_cycles();
  641.     sample.num = num;
  642.     mix_pool_bytes(&input_pool, &sample, sizeof(sample));
  643.  
  644.     /*
  645.      * Calculate number of bits of randomness we probably added.
  646.      * We take into account the first, second and third-order deltas
  647.      * in order to make our estimate.
  648.      */
  649.  
  650.     if (!state->dont_count_entropy) {
  651.         delta = sample.jiffies - state->last_time;
  652.         state->last_time = sample.jiffies;
  653.  
  654.         delta2 = delta - state->last_delta;
  655.         state->last_delta = delta;
  656.  
  657.         delta3 = delta2 - state->last_delta2;
  658.         state->last_delta2 = delta2;
  659.  
  660.         if (delta < 0)
  661.             delta = -delta;
  662.         if (delta2 < 0)
  663.             delta2 = -delta2;
  664.         if (delta3 < 0)
  665.             delta3 = -delta3;
  666.         if (delta > delta2)
  667.             delta = delta2;
  668.         if (delta > delta3)
  669.             delta = delta3;
  670.  
  671.         /*
  672.          * delta is now minimum absolute delta.
  673.          * Round down by 1 bit on general principles,
  674.          * and limit entropy entimate to 12 bits.
  675.          */
  676.         credit_entropy_bits(&input_pool,
  677.                     min_t(int, fls(delta>>1), 11));
  678.     }
  679. out:
  680.     preempt_enable();
  681. }
  682.  
  683. void add_input_randomness(unsigned int type, unsigned int code,
  684.                  unsigned int value)
  685. {
  686.     static unsigned char last_value;
  687.  
  688.     /* ignore autorepeat and the like */
  689.     if (value == last_value)
  690.         return;
  691.  
  692.     DEBUG_ENT("input event\n");
  693.     last_value = value;
  694.     add_timer_randomness(&input_timer_state,
  695.                  (type << 4) ^ code ^ (code >> 4) ^ value);
  696. }
  697. EXPORT_SYMBOL_GPL(add_input_randomness);
  698.  
  699. void add_interrupt_randomness(int irq)
  700. {
  701.     struct timer_rand_state *state;
  702.  
  703.     state = get_timer_rand_state(irq);
  704.  
  705.     if (state == NULL)
  706.         return;
  707.  
  708.     DEBUG_ENT("irq event %d\n", irq);
  709.     add_timer_randomness(state, 0x100 + irq);
  710. }
  711.  
  712. #ifdef CONFIG_BLOCK
  713. void add_disk_randomness(struct gendisk *disk)
  714. {
  715.     if (!disk || !disk->random)
  716.         return;
  717.     /* first major is 1, so we get >= 0x200 here */
  718.     DEBUG_ENT("disk event %d:%d\n",
  719.           MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
  720.  
  721.     add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
  722. }
  723. #endif
  724.  
  725. /*********************************************************************
  726.  *
  727.  * Entropy extraction routines
  728.  *
  729.  *********************************************************************/
  730.  
  731. static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  732.                    size_t nbytes, int min, int rsvd);
  733.  
  734. /*
  735.  * This utility inline function is responsible for transferring entropy
  736.  * from the primary pool to the secondary extraction pool. We make
  737.  * sure we pull enough for a 'catastrophic reseed'.
  738.  */
  739. static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
  740. {
  741.     __u32 tmp[OUTPUT_POOL_WORDS];
  742.  
  743.     if (r->pull && r->entropy_count < nbytes * 8 &&
  744.         r->entropy_count < r->poolinfo->POOLBITS) {
  745.         /* If we're limited, always leave two wakeup worth's BITS */
  746.         int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
  747.         int bytes = nbytes;
  748.  
  749.         /* pull at least as many as BYTES as wakeup BITS */
  750.         bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
  751.         /* but never more than the buffer size */
  752.         bytes = min_t(int, bytes, sizeof(tmp));
  753.  
  754.         DEBUG_ENT("going to reseed %s with %d bits "
  755.               "(%d of %d requested)\n",
  756.               r->name, bytes * 8, nbytes * 8, r->entropy_count);
  757.  
  758.         bytes = extract_entropy(r->pull, tmp, bytes,
  759.                     random_read_wakeup_thresh / 8, rsvd);
  760.         mix_pool_bytes(r, tmp, bytes);
  761.         credit_entropy_bits(r, bytes*8);
  762.     }
  763. }
  764.  
  765. /*
  766.  * These functions extracts randomness from the "entropy pool", and
  767.  * returns it in a buffer.
  768.  *
  769.  * The min parameter specifies the minimum amount we can pull before
  770.  * failing to avoid races that defeat catastrophic reseeding while the
  771.  * reserved parameter indicates how much entropy we must leave in the
  772.  * pool after each pull to avoid starving other readers.
  773.  *
  774.  * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
  775.  */
  776.  
  777. static size_t account(struct entropy_store *r, size_t nbytes, int min,
  778.               int reserved)
  779. {
  780.     unsigned long flags;
  781.  
  782.     /* Hold lock while accounting */
  783.     spin_lock_irqsave(&r->lock, flags);
  784.  
  785.     BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
  786.     DEBUG_ENT("trying to extract %d bits from %s\n",
  787.           nbytes * 8, r->name);
  788.  
  789.     /* Can we pull enough? */
  790.     if (r->entropy_count / 8 < min + reserved) {
  791.         nbytes = 0;
  792.     } else {
  793.         /* If limited, never pull more than available */
  794.         if (r->limit && nbytes + reserved >= r->entropy_count / 8)
  795.             nbytes = r->entropy_count/8 - reserved;
  796.  
  797.         if (r->entropy_count / 8 >= nbytes + reserved)
  798.             r->entropy_count -= nbytes*8;
  799.         else
  800.             r->entropy_count = reserved;
  801.  
  802.         if (r->entropy_count < random_write_wakeup_thresh) {
  803.             wake_up_interruptible(&random_write_wait);
  804.             kill_fasync(&fasync, SIGIO, POLL_OUT);
  805.         }
  806.     }
  807.  
  808.     DEBUG_ENT("debiting %d entropy credits from %s%s\n",
  809.           nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
  810.  
  811.     spin_unlock_irqrestore(&r->lock, flags);
  812.  
  813.     return nbytes;
  814. }
  815.  
  816. static void extract_buf(struct entropy_store *r, __u8 *out)
  817. {
  818.     int i;
  819.     __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
  820.     __u8 extract[64];
  821.  
  822.     /* Generate a hash across the pool, 16 words (512 bits) at a time */
  823.     sha_init(hash);
  824.     for (i = 0; i < r->poolinfo->poolwords; i += 16)
  825.         sha_transform(hash, (__u8 *)(r->pool + i), workspace);
  826.  
  827.     /*
  828.      * We mix the hash back into the pool to prevent backtracking
  829.      * attacks (where the attacker knows the state of the pool
  830.      * plus the current outputs, and attempts to find previous
  831.      * ouputs), unless the hash function can be inverted. By
  832.      * mixing at least a SHA1 worth of hash data back, we make
  833.      * brute-forcing the feedback as hard as brute-forcing the
  834.      * hash.
  835.      */
  836.     mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
  837.  
  838.     /*
  839.      * To avoid duplicates, we atomically extract a portion of the
  840.      * pool while mixing, and hash one final time.
  841.      */
  842.     sha_transform(hash, extract, workspace);
  843.     memset(extract, 0, sizeof(extract));
  844.     memset(workspace, 0, sizeof(workspace));
  845.  
  846.     /*
  847.      * In case the hash function has some recognizable output
  848.      * pattern, we fold it in half. Thus, we always feed back
  849.      * twice as much data as we output.
  850.      */
  851.     hash[0] ^= hash[3];
  852.     hash[1] ^= hash[4];
  853.     hash[2] ^= rol32(hash[2], 16);
  854.     memcpy(out, hash, EXTRACT_SIZE);
  855.     memset(hash, 0, sizeof(hash));
  856. }
  857.  
  858. static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  859.                    size_t nbytes, int min, int reserved)
  860. {
  861.     ssize_t ret = 0, i;
  862.     __u8 tmp[EXTRACT_SIZE];
  863.     unsigned long flags;
  864.  
  865.     xfer_secondary_pool(r, nbytes);
  866.     nbytes = account(r, nbytes, min, reserved);
  867.  
  868.     while (nbytes) {
  869.         extract_buf(r, tmp);
  870.  
  871.         if (fips_enabled) {
  872.             spin_lock_irqsave(&r->lock, flags);
  873.             if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
  874.                 panic("Hardware RNG duplicated output!\n");
  875.             memcpy(r->last_data, tmp, EXTRACT_SIZE);
  876.             spin_unlock_irqrestore(&r->lock, flags);
  877.         }
  878.         i = min_t(int, nbytes, EXTRACT_SIZE);
  879.         memcpy(buf, tmp, i);
  880.         nbytes -= i;
  881.         buf += i;
  882.         ret += i;
  883.     }
  884.  
  885.     /* Wipe data just returned from memory */
  886.     memset(tmp, 0, sizeof(tmp));
  887.  
  888.     return ret;
  889. }
  890.  
  891. static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  892.                     size_t nbytes)
  893. {
  894.     ssize_t ret = 0, i;
  895.     __u8 tmp[EXTRACT_SIZE];
  896.  
  897.     xfer_secondary_pool(r, nbytes);
  898.     nbytes = account(r, nbytes, 0, 0);
  899.  
  900.     while (nbytes) {
  901.         if (need_resched()) {
  902.             if (signal_pending(current)) {
  903.                 if (ret == 0)
  904.                     ret = -ERESTARTSYS;
  905.                 break;
  906.             }
  907.             schedule();
  908.         }
  909.  
  910.         extract_buf(r, tmp);
  911.         i = min_t(int, nbytes, EXTRACT_SIZE);
  912.         if (copy_to_user(buf, tmp, i)) {
  913.             ret = -EFAULT;
  914.             break;
  915.         }
  916.  
  917.         nbytes -= i;
  918.         buf += i;
  919.         ret += i;
  920.     }
  921.  
  922.     /* Wipe data just returned from memory */
  923.     memset(tmp, 0, sizeof(tmp));
  924.  
  925.     return ret;
  926. }
  927.  
  928. /*
  929.  * This function is the exported kernel interface.  It returns some
  930.  * number of good random numbers, suitable for seeding TCP sequence
  931.  * numbers, etc.
  932.  */
  933. void get_random_bytes(void *buf, int nbytes)
  934. {
  935.     char *p = buf;
  936.  
  937.     while (nbytes) {
  938.         unsigned long v;
  939.         int chunk = min(nbytes, (int)sizeof(unsigned long));
  940.        
  941.         if (!arch_get_random_long(&v))
  942.             break;
  943.        
  944.         memcpy(p, &v, chunk);
  945.         p += chunk;
  946.         nbytes -= chunk;
  947.     }
  948.  
  949.     extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
  950. }
  951. EXPORT_SYMBOL(get_random_bytes);
  952.  
  953. /*
  954.  * init_std_data - initialize pool with system data
  955.  *
  956.  * @r: pool to initialize
  957.  *
  958.  * This function clears the pool's entropy count and mixes some system
  959.  * data into the pool to prepare it for use. The pool is not cleared
  960.  * as that can only decrease the entropy in the pool.
  961.  */
  962. static void init_std_data(struct entropy_store *r)
  963. {
  964.     ktime_t now;
  965.     unsigned long flags;
  966.  
  967.     spin_lock_irqsave(&r->lock, flags);
  968.     r->entropy_count = 0;
  969.     spin_unlock_irqrestore(&r->lock, flags);
  970.  
  971.     now = ktime_get_real();
  972.     mix_pool_bytes(r, &now, sizeof(now));
  973.     mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
  974. }
  975.  
  976. static int rand_initialize(void)
  977. {
  978.     init_std_data(&input_pool);
  979.     init_std_data(&blocking_pool);
  980.     init_std_data(&nonblocking_pool);
  981.     return 0;
  982. }
  983. module_init(rand_initialize);
  984.  
  985. void rand_initialize_irq(int irq)
  986. {
  987.     struct timer_rand_state *state;
  988.  
  989.     state = get_timer_rand_state(irq);
  990.  
  991.     if (state)
  992.         return;
  993.  
  994.     /*
  995.      * If kzalloc returns null, we just won't use that entropy
  996.      * source.
  997.      */
  998.     state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
  999.     if (state)
  1000.         set_timer_rand_state(irq, state);
  1001. }
  1002.  
  1003. #ifdef CONFIG_BLOCK
  1004. void rand_initialize_disk(struct gendisk *disk)
  1005. {
  1006.     struct timer_rand_state *state;
  1007.  
  1008.     /*
  1009.      * If kzalloc returns null, we just won't use that entropy
  1010.      * source.
  1011.      */
  1012.     state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
  1013.     if (state)
  1014.         disk->random = state;
  1015. }
  1016. #endif
  1017.  
  1018. static ssize_t
  1019. random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
  1020. {
  1021.     ssize_t n, retval = 0, count = 0;
  1022.  
  1023.     if (nbytes == 0)
  1024.         return 0;
  1025.  
  1026.     while (nbytes > 0) {
  1027.         n = nbytes;
  1028.         if (n > SEC_XFER_SIZE)
  1029.             n = SEC_XFER_SIZE;
  1030.  
  1031.         DEBUG_ENT("reading %d bits\n", n*8);
  1032.  
  1033.         n = extract_entropy_user(&blocking_pool, buf, n);
  1034.  
  1035.         DEBUG_ENT("read got %d bits (%d still needed)\n",
  1036.               n*8, (nbytes-n)*8);
  1037.  
  1038.         if (n == 0) {
  1039.             if (file->f_flags & O_NONBLOCK) {
  1040.                 retval = -EAGAIN;
  1041.                 break;
  1042.             }
  1043.  
  1044.             DEBUG_ENT("sleeping?\n");
  1045.  
  1046.             wait_event_interruptible(random_read_wait,
  1047.                 input_pool.entropy_count >=
  1048.                          random_read_wakeup_thresh);
  1049.  
  1050.             DEBUG_ENT("awake\n");
  1051.  
  1052.             if (signal_pending(current)) {
  1053.                 retval = -ERESTARTSYS;
  1054.                 break;
  1055.             }
  1056.  
  1057.             continue;
  1058.         }
  1059.  
  1060.         if (n < 0) {
  1061.             retval = n;
  1062.             break;
  1063.         }
  1064.         count += n;
  1065.         buf += n;
  1066.         nbytes -= n;
  1067.         break;      /* This break makes the device work */
  1068.                 /* like a named pipe */
  1069.     }
  1070.  
  1071.     return (count ? count : retval);
  1072. }
  1073.  
  1074. static ssize_t
  1075. urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
  1076. {
  1077.     return extract_entropy_user(&nonblocking_pool, buf, nbytes);
  1078. }
  1079.  
  1080. static unsigned int
  1081. random_poll(struct file *file, poll_table * wait)
  1082. {
  1083.     unsigned int mask;
  1084.  
  1085.     poll_wait(file, &random_read_wait, wait);
  1086.     poll_wait(file, &random_write_wait, wait);
  1087.     mask = 0;
  1088.     if (input_pool.entropy_count >= random_read_wakeup_thresh)
  1089.         mask |= POLLIN | POLLRDNORM;
  1090.     if (input_pool.entropy_count < random_write_wakeup_thresh)
  1091.         mask |= POLLOUT | POLLWRNORM;
  1092.     return mask;
  1093. }
  1094.  
  1095. static int
  1096. write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
  1097. {
  1098.     size_t bytes;
  1099.     __u32 buf[16];
  1100.     const char __user *p = buffer;
  1101.  
  1102.     while (count > 0) {
  1103.         bytes = min(count, sizeof(buf));
  1104.         if (copy_from_user(&buf, p, bytes))
  1105.             return -EFAULT;
  1106.  
  1107.         count -= bytes;
  1108.         p += bytes;
  1109.  
  1110.         mix_pool_bytes(r, buf, bytes);
  1111.         cond_resched();
  1112.     }
  1113.  
  1114.     return 0;
  1115. }
  1116.  
  1117. static ssize_t random_write(struct file *file, const char __user *buffer,
  1118.                 size_t count, loff_t *ppos)
  1119. {
  1120.     size_t ret;
  1121.  
  1122.     ret = write_pool(&blocking_pool, buffer, count);
  1123.     if (ret)
  1124.         return ret;
  1125.     ret = write_pool(&nonblocking_pool, buffer, count);
  1126.     if (ret)
  1127.         return ret;
  1128.  
  1129.     return (ssize_t)count;
  1130. }
  1131.  
  1132. static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
  1133. {
  1134.     int size, ent_count;
  1135.     int __user *p = (int __user *)arg;
  1136.     int retval;
  1137.  
  1138.     switch (cmd) {
  1139.     case RNDGETENTCNT:
  1140.         /* inherently racy, no point locking */
  1141.         if (put_user(input_pool.entropy_count, p))
  1142.             return -EFAULT;
  1143.         return 0;
  1144.     case RNDADDTOENTCNT:
  1145.         if (!capable(CAP_SYS_ADMIN))
  1146.             return -EPERM;
  1147.         if (get_user(ent_count, p))
  1148.             return -EFAULT;
  1149.         credit_entropy_bits(&input_pool, ent_count);
  1150.         return 0;
  1151.     case RNDADDENTROPY:
  1152.         if (!capable(CAP_SYS_ADMIN))
  1153.             return -EPERM;
  1154.         if (get_user(ent_count, p++))
  1155.             return -EFAULT;
  1156.         if (ent_count < 0)
  1157.             return -EINVAL;
  1158.         if (get_user(size, p++))
  1159.             return -EFAULT;
  1160.         retval = write_pool(&input_pool, (const char __user *)p,
  1161.                     size);
  1162.         if (retval < 0)
  1163.             return retval;
  1164.         credit_entropy_bits(&input_pool, ent_count);
  1165.         return 0;
  1166.     case RNDZAPENTCNT:
  1167.     case RNDCLEARPOOL:
  1168.         /* Clear the entropy pool counters. */
  1169.         if (!capable(CAP_SYS_ADMIN))
  1170.             return -EPERM;
  1171.         rand_initialize();
  1172.         return 0;
  1173.     default:
  1174.         return -EINVAL;
  1175.     }
  1176. }
  1177.  
  1178. static int random_fasync(int fd, struct file *filp, int on)
  1179. {
  1180.     return fasync_helper(fd, filp, on, &fasync);
  1181. }
  1182.  
  1183. const struct file_operations random_fops = {
  1184.     .read  = random_read,
  1185.     .write = random_write,
  1186.     .poll  = random_poll,
  1187.     .unlocked_ioctl = random_ioctl,
  1188.     .fasync = random_fasync,
  1189.     .llseek = noop_llseek,
  1190. };
  1191.  
  1192. const struct file_operations urandom_fops = {
  1193.     .read  = urandom_read,
  1194.     .write = random_write,
  1195.     .unlocked_ioctl = random_ioctl,
  1196.     .fasync = random_fasync,
  1197.     .llseek = noop_llseek,
  1198. };
  1199.  
  1200. /***************************************************************
  1201.  * Random UUID interface
  1202.  *
  1203.  * Used here for a Boot ID, but can be useful for other kernel
  1204.  * drivers.
  1205.  ***************************************************************/
  1206.  
  1207. /*
  1208.  * Generate random UUID
  1209.  */
  1210. void generate_random_uuid(unsigned char uuid_out[16])
  1211. {
  1212.     get_random_bytes(uuid_out, 16);
  1213.     /* Set UUID version to 4 --- truly random generation */
  1214.     uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
  1215.     /* Set the UUID variant to DCE */
  1216.     uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
  1217. }
  1218. EXPORT_SYMBOL(generate_random_uuid);
  1219.  
  1220. /********************************************************************
  1221.  *
  1222.  * Sysctl interface
  1223.  *
  1224.  ********************************************************************/
  1225.  
  1226. #ifdef CONFIG_SYSCTL
  1227.  
  1228. #include <linux/sysctl.h>
  1229.  
  1230. static int min_read_thresh = 8, min_write_thresh;
  1231. static int max_read_thresh = INPUT_POOL_WORDS * 32;
  1232. static int max_write_thresh = INPUT_POOL_WORDS * 32;
  1233. static char sysctl_bootid[16];
  1234.  
  1235. /*
  1236.  * These functions is used to return both the bootid UUID, and random
  1237.  * UUID.  The difference is in whether table->data is NULL; if it is,
  1238.  * then a new UUID is generated and returned to the user.
  1239.  *
  1240.  * If the user accesses this via the proc interface, it will be returned
  1241.  * as an ASCII string in the standard UUID format.  If accesses via the
  1242.  * sysctl system call, it is returned as 16 bytes of binary data.
  1243.  */
  1244. static int proc_do_uuid(ctl_table *table, int write,
  1245.             void __user *buffer, size_t *lenp, loff_t *ppos)
  1246. {
  1247.     ctl_table fake_table;
  1248.     unsigned char buf[64], tmp_uuid[16], *uuid;
  1249.  
  1250.     uuid = table->data;
  1251.     if (!uuid) {
  1252.         uuid = tmp_uuid;
  1253.         uuid[8] = 0;
  1254.     }
  1255.     if (uuid[8] == 0)
  1256.         generate_random_uuid(uuid);
  1257.  
  1258.     sprintf(buf, "%pU", uuid);
  1259.  
  1260.     fake_table.data = buf;
  1261.     fake_table.maxlen = sizeof(buf);
  1262.  
  1263.     return proc_dostring(&fake_table, write, buffer, lenp, ppos);
  1264. }
  1265.  
  1266. static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
  1267. ctl_table random_table[] = {
  1268.     {
  1269.         .procname   = "poolsize",
  1270.         .data       = &sysctl_poolsize,
  1271.         .maxlen     = sizeof(int),
  1272.         .mode       = 0444,
  1273.         .proc_handler   = proc_dointvec,
  1274.     },
  1275.     {
  1276.         .procname   = "entropy_avail",
  1277.         .maxlen     = sizeof(int),
  1278.         .mode       = 0444,
  1279.         .proc_handler   = proc_dointvec,
  1280.         .data       = &input_pool.entropy_count,
  1281.     },
  1282.     {
  1283.         .procname   = "read_wakeup_threshold",
  1284.         .data       = &random_read_wakeup_thresh,
  1285.         .maxlen     = sizeof(int),
  1286.         .mode       = 0644,
  1287.         .proc_handler   = proc_dointvec_minmax,
  1288.         .extra1     = &min_read_thresh,
  1289.         .extra2     = &max_read_thresh,
  1290.     },
  1291.     {
  1292.         .procname   = "write_wakeup_threshold",
  1293.         .data       = &random_write_wakeup_thresh,
  1294.         .maxlen     = sizeof(int),
  1295.         .mode       = 0644,
  1296.         .proc_handler   = proc_dointvec_minmax,
  1297.         .extra1     = &min_write_thresh,
  1298.         .extra2     = &max_write_thresh,
  1299.     },
  1300.     {
  1301.         .procname   = "boot_id",
  1302.         .data       = &sysctl_bootid,
  1303.         .maxlen     = 16,
  1304.         .mode       = 0444,
  1305.         .proc_handler   = proc_do_uuid,
  1306.     },
  1307.     {
  1308.         .procname   = "uuid",
  1309.         .maxlen     = 16,
  1310.         .mode       = 0444,
  1311.         .proc_handler   = proc_do_uuid,
  1312.     },
  1313.     { }
  1314. };
  1315. #endif  /* CONFIG_SYSCTL */
  1316.  
  1317. static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
  1318.  
  1319. static int __init random_int_secret_init(void)
  1320. {
  1321.     get_random_bytes(random_int_secret, sizeof(random_int_secret));
  1322.     return 0;
  1323. }
  1324. late_initcall(random_int_secret_init);
  1325.  
  1326. /*
  1327.  * Get a random word for internal kernel use only. Similar to urandom but
  1328.  * with the goal of minimal entropy pool depletion. As a result, the random
  1329.  * value is not cryptographically secure but for several uses the cost of
  1330.  * depleting entropy is too high
  1331.  */
  1332. DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
  1333. unsigned int get_random_int(void)
  1334. {
  1335.     __u32 *hash;
  1336.     unsigned int ret;
  1337.  
  1338.     if (arch_get_random_int(&ret))
  1339.         return ret;
  1340.  
  1341.     hash = get_cpu_var(get_random_int_hash);
  1342.  
  1343.     hash[0] += current->pid + jiffies + get_cycles();
  1344.     md5_transform(hash, random_int_secret);
  1345.     ret = hash[0];
  1346.     put_cpu_var(get_random_int_hash);
  1347.  
  1348.     return ret;
  1349. }
  1350.  
  1351. /*
  1352.  * randomize_range() returns a start address such that
  1353.  *
  1354.  *    [...... <range> .....]
  1355.  *  start                  end
  1356.  *
  1357.  * a <range> with size "len" starting at the return value is inside in the
  1358.  * area defined by [start, end], but is otherwise randomized.
  1359.  */
  1360. unsigned long
  1361. randomize_range(unsigned long start, unsigned long end, unsigned long len)
  1362. {
  1363.     unsigned long range = end - len - start;
  1364.  
  1365.     if (end <= start + len)
  1366.         return 0;
  1367.     return PAGE_ALIGN(get_random_int() % range + start);
  1368. }
Add Comment
Please, Sign In to add comment