Release 4.11 init/initramfs.c
/*
* Many of the syscalls used in this file expect some of the arguments
* to be __user pointers not __kernel pointers. To limit the sparse
* noise, turn off sparse checking for this file.
*/
#ifdef __CHECKER__
#undef __CHECKER__
#warning "Sparse checking disabled for this file"
#endif
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/dirent.h>
#include <linux/syscalls.h>
#include <linux/utime.h>
#include <linux/file.h>
static ssize_t __init xwrite(int fd, const char *p, size_t count)
{
ssize_t out = 0;
/* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
while (count) {
ssize_t rv = sys_write(fd, p, count);
if (rv < 0) {
if (rv == -EINTR || rv == -EAGAIN)
continue;
return out ? out : rv;
} else if (rv == 0)
break;
p += rv;
out += rv;
count -= rv;
}
return out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 93 | 100.00% | 1 | 100.00% |
Total | 93 | 100.00% | 1 | 100.00% |
static __initdata char *message;
static void __init error(char *x)
{
if (!message)
message = x;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 13 | 65.00% | 1 | 50.00% |
Andrew Morton | 7 | 35.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
/* link hash */
#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
static __initdata struct hash {
int ino, minor, major;
umode_t mode;
struct
hash *next;
char name[N_ALIGN(PATH_MAX)];
}
*head[32];
static inline int hash(int major, int minor, int ino)
{
unsigned long tmp = ino + minor + (major << 3);
tmp += tmp >> 5;
return tmp & 31;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static char __init *find_link(int major, int minor, int ino,
umode_t mode, char *name)
{
struct hash **p, *q;
for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
if ((*p)->ino != ino)
continue;
if ((*p)->minor != minor)
continue;
if ((*p)->major != major)
continue;
if (((*p)->mode ^ mode) & S_IFMT)
continue;
return (*p)->name;
}
q = kmalloc(sizeof(struct hash), GFP_KERNEL);
if (!q)
panic("can't allocate link hash entry");
q->major = major;
q->minor = minor;
q->ino = ino;
q->mode = mode;
strcpy(q->name, name);
q->next = NULL;
*p = q;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 156 | 80.83% | 1 | 16.67% |
H. Peter Anvin | 28 | 14.51% | 1 | 16.67% |
Mark Huang | 4 | 2.07% | 1 | 16.67% |
Thomas Petazzoni | 3 | 1.55% | 1 | 16.67% |
Al Viro | 1 | 0.52% | 1 | 16.67% |
Andrew Morton | 1 | 0.52% | 1 | 16.67% |
Total | 193 | 100.00% | 6 | 100.00% |
static void __init free_hash(void)
{
struct hash **p, *q;
for (p = head; p < head + 32; p++) {
while (*p) {
q = *p;
*p = q->next;
kfree(q);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 58 | 98.31% | 1 | 50.00% |
Thomas Petazzoni | 1 | 1.69% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
static long __init do_utime(char *filename, time_t mtime)
{
struct timespec t[2];
t[0].tv_sec = mtime;
t[0].tv_nsec = 0;
t[1].tv_sec = mtime;
t[1].tv_nsec = 0;
return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nye Liu | 69 | 100.00% | 1 | 100.00% |
Total | 69 | 100.00% | 1 | 100.00% |
static __initdata LIST_HEAD(dir_list);
struct dir_entry {
struct list_head list;
char *name;
time_t mtime;
};
static void __init dir_add(const char *name, time_t mtime)
{
struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
if (!de)
panic("can't allocate dir_entry buffer");
INIT_LIST_HEAD(&de->list);
de->name = kstrdup(name, GFP_KERNEL);
de->mtime = mtime;
list_add(&de->list, &dir_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nye Liu | 77 | 100.00% | 1 | 100.00% |
Total | 77 | 100.00% | 1 | 100.00% |
static void __init dir_utime(void)
{
struct dir_entry *de, *tmp;
list_for_each_entry_safe(de, tmp, &dir_list, list) {
list_del(&de->list);
do_utime(de->name, de->mtime);
kfree(de->name);
kfree(de);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nye Liu | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
static __initdata time_t mtime;
/* cpio header parsing */
static __initdata unsigned long ino, major, minor, nlink;
static __initdata umode_t mode;
static __initdata unsigned long body_len, name_len;
static __initdata uid_t uid;
static __initdata gid_t gid;
static __initdata unsigned rdev;
static void __init parse_header(char *s)
{
unsigned long parsed[12];
char buf[9];
int i;
buf[8] = '\0';
for (i = 0, s += 6; i < 12; i++, s += 8) {
memcpy(buf, s, 8);
parsed[i] = simple_strtoul(buf, NULL, 16);
}
ino = parsed[0];
mode = parsed[1];
uid = parsed[2];
gid = parsed[3];
nlink = parsed[4];
mtime = parsed[5];
body_len = parsed[6];
major = parsed[7];
minor = parsed[8];
rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
name_len = parsed[11];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 158 | 94.05% | 1 | 25.00% |
Nye Liu | 7 | 4.17% | 1 | 25.00% |
Al Viro | 3 | 1.79% | 2 | 50.00% |
Total | 168 | 100.00% | 4 | 100.00% |
/* FSM */
static __initdata enum state {
Start,
Collect,
GotHeader,
SkipIt,
GotName,
CopyFile,
GotSymlink,
Reset
}
state, next_state;
static __initdata char *victim;
static unsigned long byte_count __initdata;
static __initdata loff_t this_header, next_header;
static inline void __init eat(unsigned n)
{
victim += n;
this_header += n;
byte_count -= n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 21 | 91.30% | 1 | 33.33% |
Mark D Rustad | 1 | 4.35% | 1 | 33.33% |
Al Viro | 1 | 4.35% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
static __initdata char *vcollected;
static __initdata char *collected;
static long remains __initdata;
static __initdata char *collect;
static void __init read_into(char *buf, unsigned size, enum state next)
{
if (byte_count >= size) {
collected = victim;
eat(size);
state = next;
} else {
collect = collected = buf;
remains = size;
next_state = next;
state = Collect;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 59 | 98.33% | 1 | 50.00% |
Mark D Rustad | 1 | 1.67% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
static __initdata char *header_buf, *symlink_buf, *name_buf;
static int __init do_start(void)
{
read_into(header_buf, 110, GotHeader);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static int __init do_collect(void)
{
unsigned long n = remains;
if (byte_count < n)
n = byte_count;
memcpy(collect, victim, n);
eat(n);
collect += n;
if ((remains -= n) != 0)
return 1;
state = next_state;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 56 | 88.89% | 1 | 25.00% |
Mika Kukkonen | 4 | 6.35% | 1 | 25.00% |
Mark D Rustad | 2 | 3.17% | 1 | 25.00% |
Yinghai Lu | 1 | 1.59% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
static int __init do_header(void)
{
if (memcmp(collected, "070707", 6)==0) {
error("incorrect cpio method used: use -H newc option");
return 1;
}
if (memcmp(collected, "070701", 6)) {
error("no cpio magic");
return 1;
}
parse_header(collected);
next_header = this_header + N_ALIGN(name_len) + body_len;
next_header = (next_header + 3) & ~3;
state = SkipIt;
if (name_len <= 0 || name_len > PATH_MAX)
return 0;
if (S_ISLNK(mode)) {
if (body_len > PATH_MAX)
return 0;
collect = collected = symlink_buf;
remains = N_ALIGN(name_len) + body_len;
next_state = GotSymlink;
state = Collect;
return 0;
}
if (S_ISREG(mode) || !body_len)
read_into(name_buf, N_ALIGN(name_len), GotName);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 104 | 62.65% | 1 | 33.33% |
Andrew Morton | 39 | 23.49% | 1 | 33.33% |
Arjan van de Ven | 23 | 13.86% | 1 | 33.33% |
Total | 166 | 100.00% | 3 | 100.00% |
static int __init do_skip(void)
{
if (this_header + byte_count < next_header) {
eat(byte_count);
return 1;
} else {
eat(next_header - this_header);
state = next_state;
return 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 41 | 93.18% | 1 | 33.33% |
Mark D Rustad | 2 | 4.55% | 1 | 33.33% |
Andrew Morton | 1 | 2.27% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
static int __init do_reset(void)
{
while (byte_count && *victim == '\0')
eat(1);
if (byte_count && (this_header & 3))
error("broken padding");
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 39 | 95.12% | 1 | 50.00% |
Mark D Rustad | 2 | 4.88% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
static int __init maybe_link(void)
{
if (nlink >= 2) {
char *old = find_link(major, minor, ino, mode, collected);
if (old)
return (sys_link(old, collected) < 0) ? -1 : 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 56 | 96.55% | 1 | 50.00% |
H. Peter Anvin | 2 | 3.45% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
static void __init clean_path(char *path, umode_t fmode)
{
struct stat st;
if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
if (S_ISDIR(st.st_mode))
sys_rmdir(path);
else
sys_unlink(path);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
H. Peter Anvin | 58 | 95.08% | 1 | 33.33% |
Mark D Rustad | 2 | 3.28% | 1 | 33.33% |
Al Viro | 1 | 1.64% | 1 | 33.33% |
Total | 61 | 100.00% | 3 | 100.00% |
static __initdata int wfd;
static int __init do_name(void)
{
state = SkipIt;
next_state = Reset;
if (strcmp(collected, "TRAILER!!!") == 0) {
free_hash();
return 0;
}
clean_path(collected, mode);
if (S_ISREG(mode)) {
int ml = maybe_link();
if (ml >= 0) {
int openflags = O_WRONLY|O_CREAT;
if (ml != 1)
openflags |= O_TRUNC;
wfd = sys_open(collected, openflags, mode);
if (wfd >= 0) {
sys_fchown(wfd, uid, gid);
sys_fchmod(wfd, mode);
if (body_len)
sys_ftruncate(wfd, body_len);
vcollected = kstrdup(collected, GFP_KERNEL);
state = CopyFile;
}
}
} else if (S_ISDIR(mode)) {
sys_mkdir(collected, mode);
sys_chown(collected, uid, gid);
sys_chmod(collected, mode);
dir_add(collected, mtime);
} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode)) {
if (maybe_link() == 0) {
sys_mknod(collected, mode, rdev);
sys_chown(collected, uid, gid);
sys_chmod(collected, mode);
do_utime(collected, mtime);
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 168 | 66.93% | 1 | 12.50% |
H. Peter Anvin | 32 | 12.75% | 1 | 12.50% |
Nye Liu | 23 | 9.16% | 1 | 12.50% |
Anton Blanchard | 14 | 5.58% | 1 | 12.50% |
David Howells | 7 | 2.79% | 1 | 12.50% |
Randy Robertson | 4 | 1.59% | 1 | 12.50% |
Andrew Morton | 2 | 0.80% | 1 | 12.50% |
Milton D. Miller II | 1 | 0.40% | 1 | 12.50% |
Total | 251 | 100.00% | 8 | 100.00% |
static int __init do_copy(void)
{
if (byte_count >= body_len) {
if (xwrite(wfd, victim, body_len) != body_len)
error("write error");
sys_close(wfd);
do_utime(vcollected, mtime);
kfree(vcollected);
eat(body_len);
state = SkipIt;
return 0;
} else {
if (xwrite(wfd, victim, byte_count) != byte_count)
error("write error");
body_len -= byte_count;
eat(byte_count);
return 1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 61 | 62.89% | 1 | 20.00% |
David Engraf | 17 | 17.53% | 1 | 20.00% |
Nye Liu | 12 | 12.37% | 1 | 20.00% |
Mark D Rustad | 5 | 5.15% | 1 | 20.00% |
Yinghai Lu | 2 | 2.06% | 1 | 20.00% |
Total | 97 | 100.00% | 5 | 100.00% |
static int __init do_symlink(void)
{
collected[N_ALIGN(name_len) + body_len] = '\0';
clean_path(collected, 0);
sys_symlink(collected + N_ALIGN(name_len), collected);
sys_lchown(collected, uid, gid);
do_utime(collected, mtime);
state = SkipIt;
next_state = Reset;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 52 | 77.61% | 1 | 25.00% |
H. Peter Anvin | 7 | 10.45% | 1 | 25.00% |
Nye Liu | 7 | 10.45% | 1 | 25.00% |
Milton D. Miller II | 1 | 1.49% | 1 | 25.00% |
Total | 67 | 100.00% | 4 | 100.00% |
static __initdata int (*actions[])(void) = {
[Start] = do_start,
[Collect] = do_collect,
[GotHeader] = do_header,
[SkipIt] = do_skip,
[GotName] = do_name,
[CopyFile] = do_copy,
[GotSymlink] = do_symlink,
[Reset] = do_reset,
};
static long __init write_buffer(char *buf, unsigned long len)
{
byte_count = len;
victim = buf;
while (!actions[state]())
;
return len - byte_count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 36 | 90.00% | 1 | 33.33% |
Yinghai Lu | 2 | 5.00% | 1 | 33.33% |
Mark D Rustad | 2 | 5.00% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
static long __init flush_buffer(void *bufv, unsigned long len)
{
char *buf = (char *) bufv;
long written;
long origLen = len;
if (message)
return -1;
while ((written = write_buffer(buf, len)) < len && !message) {
char c = buf[written];
if (c == '0') {
buf += written;
len -= written;
state = Start;
} else if (c == 0) {
buf += written;
len -= written;
state = Reset;
} else
error("junk in compressed archive");
}
return origLen;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 64 | 53.78% | 1 | 20.00% |
Alain Knaff | 23 | 19.33% | 1 | 20.00% |
Milton D. Miller II | 21 | 17.65% | 1 | 20.00% |
Andrew Morton | 7 | 5.88% | 1 | 20.00% |
Yinghai Lu | 4 | 3.36% | 1 | 20.00% |
Total | 119 | 100.00% | 5 | 100.00% |
static unsigned long my_inptr;
/* index of next byte to be processed in inbuf */
#include <linux/decompress/generic.h>
static char * __init unpack_to_rootfs(char *buf, unsigned long len)
{
long written;
decompress_fn decompress;
const char *compress_name;
static __initdata char msg_buf[64];
header_buf = kmalloc(110, GFP_KERNEL);
symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
if (!header_buf || !symlink_buf || !name_buf)
panic("can't allocate buffers");
state = Start;
this_header = 0;
message = NULL;
while (!message && len) {
loff_t saved_offset = this_header;
if (*buf == '0' && !(this_header & 3)) {
state = Start;
written = write_buffer(buf, len);
buf += written;
len -= written;
continue;
}
if (!*buf) {
buf++;
len--;
this_header++;
continue;
}
this_header = 0;
decompress = decompress_method(buf, len, &compress_name);
pr_debug("Detected %s compressed data\n", compress_name);
if (decompress) {
int res = decompress(buf, len, NULL, flush_buffer, NULL,
&my_inptr, error);
if (res)
error("decompressor failed");
} else if (compress_name) {
if (!message) {
snprintf(msg_buf, sizeof msg_buf,
"compression method %s not configured",
compress_name);
message = msg_buf;
}
} else
error("junk in compressed archive");
if (state != Reset)
error("junk in compressed archive");
this_header = saved_offset + my_inptr;
buf += my_inptr;
len -= my_inptr;
}
dir_utime();
kfree(name_buf);
kfree(symlink_buf);
kfree(header_buf);
return message;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 171 | 54.98% | 1 | 8.33% |
H. Peter Anvin | 53 | 17.04% | 2 | 16.67% |
Alain Knaff | 28 | 9.00% | 1 | 8.33% |
Phillip Lougher | 18 | 5.79% | 2 | 16.67% |
Andrew Morton | 13 | 4.18% | 1 | 8.33% |
Thomas Petazzoni | 12 | 3.86% | 1 | 8.33% |
Daniel M. Weeks | 7 | 2.25% | 1 | 8.33% |
Yinghai Lu | 5 | 1.61% | 1 | 8.33% |
Nye Liu | 3 | 0.96% | 1 | 8.33% |
Adrian Bunk | 1 | 0.32% | 1 | 8.33% |
Total | 311 | 100.00% | 12 | 100.00% |
static int __initdata do_retain_initrd;
static int __init retain_initrd_param(char *str)
{
if (*str)
return 0;
do_retain_initrd = 1;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Neuling | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
__setup("retain_initrd", retain_initrd_param);
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
#include <linux/initrd.h>
#include <linux/kexec.h>
static void __init free_initrd(void)
{
#ifdef CONFIG_KEXEC_CORE
unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
#endif
if (do_retain_initrd)
goto skip;
#ifdef CONFIG_KEXEC_CORE
/*
* If the initrd region is overlapped with crashkernel reserved region,
* free only memory that is not part of crashkernel region.
*/
if (initrd_start < crashk_end && initrd_end > crashk_start) {
/*
* Initialize initrd memory region since the kexec boot does
* not do.
*/
memset((void *)initrd_start, 0, initrd_end - initrd_start);
if (initrd_start < crashk_start)
free_initrd_mem(initrd_start, crashk_start);
if (initrd_end > crashk_end)
free_initrd_mem(crashk_end, initrd_end);
} else
#endif
free_initrd_mem(initrd_start, initrd_end);
skip:
initrd_start = 0;
initrd_end = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Haren Myneni | 90 | 69.77% | 1 | 25.00% |
Jan Beulich | 24 | 18.60% | 1 | 25.00% |
Michael Neuling | 13 | 10.08% | 1 | 25.00% |
Dave Young | 2 | 1.55% | 1 | 25.00% |
Total | 129 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
int fd;
void *buf;
struct linux_dirent64 *dirp;
int num;
fd = sys_open("/", O_RDONLY, 0);
WARN_ON(fd < 0);
if (fd < 0)
return;
buf = kzalloc(BUF_SIZE, GFP_KERNEL);
WARN_ON(!buf);
if (!buf) {
sys_close(fd);
return;
}
dirp = buf;
num = sys_getdents64(fd, dirp, BUF_SIZE);
while (num > 0) {
while (num > 0) {
struct stat st;
int ret;
ret = sys_newlstat(dirp->d_name, &st);
WARN_ON_ONCE(ret);
if (!ret) {
if (S_ISDIR(st.st_mode))
sys_rmdir(dirp->d_name);
else
sys_unlink(dirp->d_name);
}
num -= dirp->d_reclen;
dirp = (void *)dirp + dirp->d_reclen;
}
dirp = buf;
memset(buf, 0, BUF_SIZE);
num = sys_getdents64(fd, dirp, BUF_SIZE);
}
sys_close(fd);
kfree(buf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Shaohua Li | 209 | 97.21% | 1 | 50.00% |
H Hartley Sweeten | 6 | 2.79% | 1 | 50.00% |
Total | 215 | 100.00% | 2 | 100.00% |
#endif
static int __init populate_rootfs(void)
{
char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
if (err)
panic("%s", err); /* Failed to decompress INTERNAL initramfs */
if (initrd_start) {
#ifdef CONFIG_BLK_DEV_RAM
int fd;
printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
if (!err) {
free_initrd();
goto done;
} else {
clean_rootfs();
unpack_to_rootfs(__initramfs_start, __initramfs_size);
}
printk(KERN_INFO "rootfs image is not initramfs (%s)"
"; looks like an initrd\n", err);
fd = sys_open("/initrd.image",
O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
ssize_t written = xwrite(fd, (char *)initrd_start,
initrd_end - initrd_start);
if (written != initrd_end - initrd_start)
pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
written, initrd_end - initrd_start);
sys_close(fd);
free_initrd();
}
done:
#else
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
if (err)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
free_initrd();
#endif
flush_delayed_fput();
/*
* Try loading default modules from initramfs. This gives
* us a chance to load before device_initcalls.
*/
load_default_modules();
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 95 | 43.58% | 1 | 5.88% |
Zdenek Pavlas | 38 | 17.43% | 1 | 5.88% |
Yinghai Lu | 23 | 10.55% | 1 | 5.88% |
Jeff Garzik | 13 | 5.96% | 1 | 5.88% |
David Shaohua Li | 12 | 5.50% | 1 | 5.88% |
Tejun Heo | 9 | 4.13% | 1 | 5.88% |
Linus Torvalds | 5 | 2.29% | 2 | 11.76% |
Jan Beulich | 4 | 1.83% | 1 | 5.88% |
H. Peter Anvin | 4 | 1.83% | 1 | 5.88% |
Éric Piel | 3 | 1.38% | 1 | 5.88% |
Simon Kitching | 3 | 1.38% | 1 | 5.88% |
Lokesh Vutla | 3 | 1.38% | 1 | 5.88% |
Hendrik Brueckner | 2 | 0.92% | 1 | 5.88% |
Tetsuo Handa | 2 | 0.92% | 1 | 5.88% |
Al Viro | 1 | 0.46% | 1 | 5.88% |
Jason Gunthorpe | 1 | 0.46% | 1 | 5.88% |
Total | 218 | 100.00% | 17 | 100.00% |
rootfs_initcall(populate_rootfs);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Garzik | 1554 | 49.57% | 2 | 3.70% |
Nye Liu | 295 | 9.41% | 1 | 1.85% |
David Shaohua Li | 228 | 7.27% | 1 | 1.85% |
Andrew Morton | 193 | 6.16% | 3 | 5.56% |
H. Peter Anvin | 187 | 5.96% | 4 | 7.41% |
Yinghai Lu | 135 | 4.31% | 2 | 3.70% |
Haren Myneni | 93 | 2.97% | 1 | 1.85% |
Alain Knaff | 52 | 1.66% | 1 | 1.85% |
Michael Neuling | 51 | 1.63% | 1 | 1.85% |
Zdenek Pavlas | 48 | 1.53% | 2 | 3.70% |
Jan Beulich | 28 | 0.89% | 1 | 1.85% |
Arjan van de Ven | 23 | 0.73% | 1 | 1.85% |
Milton D. Miller II | 23 | 0.73% | 1 | 1.85% |
Art Haas | 22 | 0.70% | 1 | 1.85% |
Phillip Lougher | 18 | 0.57% | 2 | 3.70% |
H Hartley Sweeten | 18 | 0.57% | 2 | 3.70% |
Mark D Rustad | 18 | 0.57% | 1 | 1.85% |
Mark Huang | 17 | 0.54% | 1 | 1.85% |
David Engraf | 17 | 0.54% | 1 | 1.85% |
Thomas Petazzoni | 16 | 0.51% | 1 | 1.85% |
Anton Blanchard | 14 | 0.45% | 1 | 1.85% |
Al Viro | 10 | 0.32% | 5 | 9.26% |
Linus Torvalds | 10 | 0.32% | 2 | 3.70% |
Tejun Heo | 9 | 0.29% | 1 | 1.85% |
David Howells | 8 | 0.26% | 2 | 3.70% |
Hendrik Brueckner | 7 | 0.22% | 1 | 1.85% |
Daniel M. Weeks | 7 | 0.22% | 1 | 1.85% |
Lokesh Vutla | 6 | 0.19% | 1 | 1.85% |
Nikanth Karthikesan | 5 | 0.16% | 1 | 1.85% |
Mika Kukkonen | 4 | 0.13% | 1 | 1.85% |
Randy Robertson | 4 | 0.13% | 1 | 1.85% |
Éric Piel | 3 | 0.10% | 1 | 1.85% |
Christoph Hellwig | 3 | 0.10% | 1 | 1.85% |
Simon Kitching | 3 | 0.10% | 1 | 1.85% |
Dave Young | 2 | 0.06% | 1 | 1.85% |
Tetsuo Handa | 2 | 0.06% | 1 | 1.85% |
Adrian Bunk | 1 | 0.03% | 1 | 1.85% |
Jason Gunthorpe | 1 | 0.03% | 1 | 1.85% |
Total | 3135 | 100.00% | 54 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.