cregit-Linux how code gets into the kernel

Release 4.11 drivers/char/hw_random/core.c

/*
 * hw_random/core.c: HWRNG core API
 *
 * Copyright 2006 Michael Buesch <m@bues.ch>
 * Copyright 2005 (c) MontaVista Software, Inc.
 *
 * Please read Documentation/hw_random.txt for details on use.
 *
 * This software may be used and distributed according to the terms
 * of the GNU General Public License, incorporated herein by reference.
 */

#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>


#define RNG_MODULE_NAME		"hw_random"


static struct hwrng *current_rng;

static struct task_struct *hwrng_fill;
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
static DEFINE_MUTEX(reading_mutex);

static int data_avail;


static u8 *rng_buffer, *rng_fillbuf;

static unsigned short current_quality;

static unsigned short default_quality; 
/* = 0; default to "off" */

module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
		 "current hwrng entropy estimation per mill");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
		 "default entropy content of hwrng per mill");

static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
static void start_khwrngd(void);

static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			       int wait);


static size_t rng_buffer_size(void) { return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell1058.82%133.33%
Ian Molton741.18%266.67%
Total17100.00%3100.00%


static void add_early_randomness(struct hwrng *rng) { int bytes_read; size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); bytes_read = rng_get_data(rng, rng_buffer, size, 1); mutex_unlock(&reading_mutex); if (bytes_read > 0) add_device_randomness(rng_buffer, bytes_read); }

Contributors

PersonTokensPropCommitsCommitProp
Amit Shah3756.92%133.33%
Andrew Lutomirski1624.62%133.33%
Rusty Russell1218.46%133.33%
Total65100.00%3100.00%


static inline void cleanup_rng(struct kref *kref) { struct hwrng *rng = container_of(kref, struct hwrng, ref); if (rng->cleanup) rng->cleanup(rng); complete(&rng->cleanup_done); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell4593.75%266.67%
Herbert Xu36.25%133.33%
Total48100.00%3100.00%


static int set_current_rng(struct hwrng *rng) { int err; BUG_ON(!mutex_is_locked(&rng_mutex)); err = hwrng_init(rng); if (err) return err; drop_current_rng(); current_rng = rng; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu2450.00%266.67%
Rusty Russell2450.00%133.33%
Total48100.00%3100.00%


static void drop_current_rng(void) { BUG_ON(!mutex_is_locked(&rng_mutex)); if (!current_rng) return; /* decrease last reference for triggering the cleanup */ kref_put(&current_rng->ref, cleanup_rng); current_rng = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell39100.00%1100.00%
Total39100.00%1100.00%

/* Returns ERR_PTR(), NULL or refcounted hwrng */
static struct hwrng *get_current_rng(void) { struct hwrng *rng; if (mutex_lock_interruptible(&rng_mutex)) return ERR_PTR(-ERESTARTSYS); rng = current_rng; if (rng) kref_get(&rng->ref); mutex_unlock(&rng_mutex); return rng; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell55100.00%1100.00%
Total55100.00%1100.00%


static void put_rng(struct hwrng *rng) { /* * Hold rng_mutex here so we serialize in case they set_current_rng * on rng again immediately. */ mutex_lock(&rng_mutex); if (rng) kref_put(&rng->ref, cleanup_rng); mutex_unlock(&rng_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell38100.00%1100.00%
Total38100.00%1100.00%


static int hwrng_init(struct hwrng *rng) { if (kref_get_unless_zero(&rng->ref)) goto skip_init; if (rng->init) { int ret; ret = rng->init(rng); if (ret) return ret; } kref_init(&rng->ref); reinit_completion(&rng->cleanup_done); skip_init: add_early_randomness(rng); current_quality = rng->quality ? : default_quality; if (current_quality > 1024) current_quality = 1024; if (current_quality == 0 && hwrng_fill) kthread_stop(hwrng_fill); if (current_quality > 0 && !hwrng_fill) start_khwrngd(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Torsten Duwe3629.75%233.33%
Herbert Xu3125.62%116.67%
Michael Büsch2419.83%116.67%
Amit Shah2218.18%116.67%
Keith Packard86.61%116.67%
Total121100.00%6100.00%


static int rng_dev_open(struct inode *inode, struct file *filp) { /* enforce read-only access to this chrdev */ if ((filp->f_mode & FMODE_READ) == 0) return -EINVAL; if (filp->f_mode & FMODE_WRITE) return -EINVAL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch48100.00%1100.00%
Total48100.00%1100.00%


static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait) { int present; BUG_ON(!mutex_is_locked(&reading_mutex)); if (rng->read) return rng->read(rng, (void *)buffer, size, wait); if (rng->data_present) present = rng->data_present(rng, wait); else present = 1; if (present) return rng->data_read(rng, (u32 *)buffer); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ian Molton9290.20%150.00%
Rusty Russell109.80%150.00%
Total102100.00%2100.00%


static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { ssize_t ret = 0; int err = 0; int bytes_read, len; struct hwrng *rng; while (size) { rng = get_current_rng(); if (IS_ERR(rng)) { err = PTR_ERR(rng); goto out; } if (!rng) { err = -ENODEV; goto out; } if (mutex_lock_interruptible(&reading_mutex)) { err = -ERESTARTSYS; goto out_put; } if (!data_avail) { bytes_read = rng_get_data(rng, rng_buffer, rng_buffer_size(), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; goto out_unlock_reading; } data_avail = bytes_read; } if (!data_avail) { if (filp->f_flags & O_NONBLOCK) { err = -EAGAIN; goto out_unlock_reading; } } else { len = data_avail; if (len > size) len = size; data_avail -= len; if (copy_to_user(buf + ret, rng_buffer + data_avail, len)) { err = -EFAULT; goto out_unlock_reading; } size -= len; ret += len; } mutex_unlock(&reading_mutex); put_rng(rng); if (need_resched()) schedule_timeout_interruptible(1); if (signal_pending(current)) { err = -ERESTARTSYS; goto out; } } out: return ret ? : err; out_unlock_reading: mutex_unlock(&reading_mutex); out_put: put_rng(rng); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch11839.86%111.11%
Ian Molton9532.09%111.11%
Rusty Russell4515.20%333.33%
Jiri Slaby155.07%111.11%
Ralph Wuerthner144.73%111.11%
Herbert Xu82.70%111.11%
Patrick McHardy10.34%111.11%
Total296100.00%9100.00%

static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, .llseek = noop_llseek, }; static const struct attribute_group *rng_dev_groups[]; static struct miscdevice rng_miscdev = { .minor = HWRNG_MINOR, .name = RNG_MODULE_NAME, .nodename = "hwrng", .fops = &rng_chrdev_ops, .groups = rng_dev_groups, };
static ssize_t hwrng_attr_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int err; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; err = -ENODEV; list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { err = 0; if (rng != current_rng) err = set_current_rng(rng); break; } } mutex_unlock(&rng_mutex); return err ? : len; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch9084.91%120.00%
Greg Kroah-Hartman76.60%120.00%
Herbert Xu54.72%120.00%
Rusty Russell32.83%120.00%
Lee Jones10.94%120.00%
Total106100.00%5100.00%


static ssize_t hwrng_attr_current_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng)) return PTR_ERR(rng); ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch4460.27%133.33%
Rusty Russell2230.14%133.33%
Greg Kroah-Hartman79.59%133.33%
Total73100.00%3100.00%


static ssize_t hwrng_attr_available_show(struct device *dev, struct device_attribute *attr, char *buf) { int err; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; buf[0] = '\0'; list_for_each_entry(rng, &rng_list, list) { strlcat(buf, rng->name, PAGE_SIZE); strlcat(buf, " ", PAGE_SIZE); } strlcat(buf, "\n", PAGE_SIZE); mutex_unlock(&rng_mutex); return strlen(buf); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch8886.27%133.33%
Greg Kroah-Hartman76.86%133.33%
Rickard Strandqvist76.86%133.33%
Total102100.00%3100.00%

static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, hwrng_attr_current_show, hwrng_attr_current_store); static DEVICE_ATTR(rng_available, S_IRUGO, hwrng_attr_available_show, NULL); static struct attribute *rng_dev_attrs[] = { &dev_attr_rng_current.attr, &dev_attr_rng_available.attr, NULL }; ATTRIBUTE_GROUPS(rng_dev);
static void __exit unregister_miscdev(void) { misc_deregister(&rng_miscdev); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch1280.00%133.33%
Rafael J. Wysocki213.33%133.33%
Herbert Xu16.67%133.33%
Total15100.00%3100.00%


static int __init register_miscdev(void) { return misc_register(&rng_miscdev); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch1487.50%133.33%
Herbert Xu16.25%133.33%
Takashi Iwai16.25%133.33%
Total16100.00%3100.00%


static int hwrng_fillfn(void *unused) { long rc; while (!kthread_should_stop()) { struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng) || !rng) break; mutex_lock(&reading_mutex); rc = rng_get_data(rng, rng_fillbuf, rng_buffer_size(), 1); mutex_unlock(&reading_mutex); put_rng(rng); if (rc <= 0) { pr_warn("hwrng: no data available\n"); msleep_interruptible(10000); continue; } /* Outside lock, sure, but y'know: randomness. */ add_hwgenerator_randomness((void *)rng_fillbuf, rc, rc * current_quality * 8 >> 10); } hwrng_fill = NULL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Torsten Duwe8268.91%240.00%
Rusty Russell3529.41%240.00%
Stephen Boyd21.68%120.00%
Total119100.00%5100.00%


static void start_khwrngd(void) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed"); hwrng_fill = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Torsten Duwe3594.59%150.00%
Martin Schwidefsky25.41%150.00%
Total37100.00%2100.00%


int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *old_rng, *tmp; if (!rng->name || (!rng->data_read && !rng->read)) goto out; mutex_lock(&rng_mutex); /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { if (strcmp(tmp->name, rng->name) == 0) goto out_unlock; } init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); old_rng = current_rng; err = 0; if (!old_rng) { err = set_current_rng(rng); if (err) goto out_unlock; } list_add_tail(&rng->list, &rng_list); if (old_rng && !rng->init) { /* * Use a new device's input to add some randomness to * the system. If this rng device isn't going to be * used right away, its init function hasn't been * called yet; so only use the randomness from devices * that don't need an init callback. */ add_early_randomness(rng); } out_unlock: mutex_unlock(&rng_mutex); out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch12572.67%112.50%
Herbert Xu179.88%225.00%
Amit Shah116.40%112.50%
Kees Cook63.49%112.50%
Ian Molton63.49%112.50%
Rusty Russell42.33%112.50%
Corentin Labbe31.74%112.50%
Total172100.00%8100.00%

EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng) { mutex_lock(&rng_mutex); list_del(&rng->list); if (current_rng == rng) { drop_current_rng(); if (!list_empty(&rng_list)) { struct hwrng *tail; tail = list_entry(rng_list.prev, struct hwrng, list); set_current_rng(tail); } } if (list_empty(&rng_list)) { mutex_unlock(&rng_mutex); if (hwrng_fill) kthread_stop(hwrng_fill); } else mutex_unlock(&rng_mutex); wait_for_completion(&rng->cleanup_done); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch7063.64%114.29%
Rusty Russell1917.27%228.57%
Torsten Duwe1110.00%114.29%
Amos Kong76.36%114.29%
Herbert Xu21.82%114.29%
Rafael J. Wysocki10.91%114.29%
Total110100.00%7100.00%

EXPORT_SYMBOL_GPL(hwrng_unregister);
static void devm_hwrng_release(struct device *dev, void *res) { hwrng_unregister(*(struct hwrng **)res); }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Torokhov27100.00%1100.00%
Total27100.00%1100.00%


static int devm_hwrng_match(struct device *dev, void *res, void *data) { struct hwrng **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Torokhov48100.00%1100.00%
Total48100.00%1100.00%


int devm_hwrng_register(struct device *dev, struct hwrng *rng) { struct hwrng **ptr; int error; ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; error = hwrng_register(rng); if (error) { devres_free(ptr); return error; } *ptr = rng; devres_add(dev, ptr); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Torokhov84100.00%1100.00%
Total84100.00%1100.00%

EXPORT_SYMBOL_GPL(devm_hwrng_register);
void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) { devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Torokhov26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void) { int ret = -ENOMEM; /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_buffer) return -ENOMEM; rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_fillbuf) { kfree(rng_buffer); return -ENOMEM; } ret = register_miscdev(); if (ret) { kfree(rng_fillbuf); kfree(rng_buffer); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
PrasannaKumar Muralidharan7284.71%150.00%
Herbert Xu1315.29%150.00%
Total85100.00%2100.00%


static void __exit hwrng_modexit(void) { mutex_lock(&rng_mutex); BUG_ON(current_rng); kfree(rng_buffer); kfree(rng_fillbuf); mutex_unlock(&rng_mutex); unregister_miscdev(); }

Contributors

PersonTokensPropCommitsCommitProp
Satoru Takeuchi3076.92%133.33%
Torsten Duwe512.82%133.33%
Herbert Xu410.26%133.33%
Total39100.00%3100.00%

module_init(hwrng_modinit); module_exit(hwrng_modexit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
Michael Büsch75933.33%12.17%
Rusty Russell37716.56%510.87%
Torsten Duwe22910.06%36.52%
Ian Molton2079.09%24.35%
Dmitry Torokhov1958.56%12.17%
Herbert Xu1325.80%613.04%
Amit Shah914.00%12.17%
PrasannaKumar Muralidharan723.16%12.17%
Takashi Iwai401.76%12.17%
Satoru Takeuchi341.49%12.17%
Greg Kroah-Hartman231.01%12.17%
Corentin Labbe170.75%48.70%
Andrew Lutomirski160.70%12.17%
Jiri Slaby150.66%12.17%
Ralph Wuerthner140.61%12.17%
Kees Cook80.35%12.17%
Keith Packard80.35%12.17%
Amos Kong70.31%12.17%
Rickard Strandqvist70.31%12.17%
Arnd Bergmann50.22%12.17%
Kay Sievers50.22%24.35%
Rafael J. Wysocki40.18%12.17%
Ingo Molnar30.13%12.17%
Stephen Boyd20.09%12.17%
Martin Schwidefsky20.09%12.17%
Linus Torvalds10.04%12.17%
Patrick McHardy10.04%12.17%
Lee Jones10.04%12.17%
Arjan van de Ven10.04%12.17%
Al Viro10.04%12.17%
Total2277100.00%46100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.