cregit-Linux how code gets into the kernel

Release 4.12 include/linux/swap.h

Directory: include/linux
#ifndef _LINUX_SWAP_H

#define _LINUX_SWAP_H

#include <linux/spinlock.h>
#include <linux/linkage.h>
#include <linux/mmzone.h>
#include <linux/list.h>
#include <linux/memcontrol.h>
#include <linux/sched.h>
#include <linux/node.h>
#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
#include <asm/page.h>

struct notifier_block;

struct bio;


#define SWAP_FLAG_PREFER	0x8000	
/* set if swap priority specified */

#define SWAP_FLAG_PRIO_MASK	0x7fff

#define SWAP_FLAG_PRIO_SHIFT	0

#define SWAP_FLAG_DISCARD	0x10000 
/* enable discard for swap */

#define SWAP_FLAG_DISCARD_ONCE	0x20000 
/* discard swap area at swapon-time */

#define SWAP_FLAG_DISCARD_PAGES 0x40000 
/* discard page-clusters after use */


#define SWAP_FLAGS_VALID	(SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
                                 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
                                 SWAP_FLAG_DISCARD_PAGES)

#define SWAP_BATCH 64


static inline int current_is_kswapd(void) { return current->flags & PF_KSWAPD; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton16100.00%1100.00%
Total16100.00%1100.00%

/* * MAX_SWAPFILES defines the maximum number of swaptypes: things which can * be swapped to. The swap type and the offset into that swap type are * encoded into pte's and into pgoff_t's in the swapcache. Using five bits * for the type means that the maximum number of swapcache pages is 27 bits * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 /* * Use some of the swap files numbers for other purposes. This * is a convenient way to hook into the VM to trigger special * actions on faults. */ /* * NUMA node memory migration support */ #ifdef CONFIG_MIGRATION #define SWP_MIGRATION_NUM 2 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) #else #define SWP_MIGRATION_NUM 0 #endif /* * Handling of hardware poisoned pages with memory corruption. */ #ifdef CONFIG_MEMORY_FAILURE #define SWP_HWPOISON_NUM 1 #define SWP_HWPOISON MAX_SWAPFILES #else #define SWP_HWPOISON_NUM 0 #endif #define MAX_SWAPFILES \ ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) * swap area format, the second part of the union adds - in the * old reserved area - some extra information. Note that the first * kilobyte is reserved for boot loader or disk label stuff... * * Having the magic at the end of the PAGE_SIZE makes detecting swap * areas somewhat tricky on machines that support multiple page sizes. * For 2.5 we'll probably want to move the magic to just beyond the * bootbits... */ union swap_header { struct { char reserved[PAGE_SIZE - 10]; char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ } magic; struct { char bootbits[1024]; /* Space for disklabel etc. */ __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; /* * current->reclaim_state points to one of these when a task is running * memory reclaim */ struct reclaim_state { unsigned long reclaimed_slab; }; #ifdef __KERNEL__ struct address_space; struct sysinfo; struct writeback_control; struct zone; /* * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of * disk blocks. A list of swap extents maps the entire swapfile. (Where the * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart * from setup, they're handled identically. * * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { struct list_head list; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; }; /* * Max bad pages in the new format.. */ #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) #define MAX_SWAP_BADPAGES \ ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ SWP_FILE = (1 << 7), /* set after swap_activate success */ SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ /* add others here before... */ SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * * The data field stores next cluster if the cluster is free or cluster usage * counter otherwise. The flags field determines if a cluster is free. This is * protected by swap_info_struct.lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields * and swap_info_struct->swap_map * elements correspond to the swap * cluster */ unsigned int data:24; unsigned int flags:8; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from * its own cluster and swapout sequentially. The purpose is to optimize swapout * throughput. */ struct percpu_cluster { struct swap_cluster_info index; /* Current cluster index */ unsigned int next; /* Likely next allocation offset */ }; struct swap_cluster_list { struct swap_cluster_info head; struct swap_cluster_info tail; }; /* * The in-memory structure used to track swap areas. */ struct swap_info_struct { unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ struct plist_node avail_list; /* entry in swap_avail_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct swap_cluster_list free_clusters; /* free clusters list */ unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ unsigned int inuse_pages; /* number of those currently in use */ unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct swap_extent *curr_swap_extent; struct swap_extent first_swap_extent; struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ #ifdef CONFIG_FRONTSWAP unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ atomic_t frontswap_pages; /* frontswap pages in-use counter */ #endif spinlock_t lock; /* * protect map scan related fields like * swap_map, lowest_bit, highest_bit, * inuse_pages, cluster_next, * cluster_nr, lowest_alloc, * highest_alloc, free/discard cluster * list. other fields are only changed * at swapon/swapoff, so are protected * by swap_lock. changing flags need * hold this lock and swap_lock. If * both locks need hold, hold swap_lock * first. */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ }; /* linux/mm/workingset.c */ void *workingset_eviction(struct address_space *mapping, struct page *page); bool workingset_refault(void *shadow); void workingset_activation(struct page *page); void workingset_update_node(struct radix_tree_node *node, void *private); /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); /* Definition of global_page_state not available yet */ #define nr_free_pages() global_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ extern void lru_cache_add(struct page *); extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); extern void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); extern void add_page_to_unevictable_list(struct page *page); extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, bool may_swap); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; #ifdef CONFIG_NUMA extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else #define node_reclaim_mode 0
static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter1986.36%150.00%
Mel Gorman313.64%150.00%
Total22100.00%2100.00%

#endif extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct page **, int nr_pages); extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP #include <linux/blk_types.h> /* for bio_end_io_t */ /* linux/mm/page_io.c */ extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); extern void end_swap_bio_write(struct bio *bio); extern int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func); extern int swap_set_page_dirty(struct page *page); int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); /* linux/mm/swap_state.c */ /* One swap address space for each 64M swap space */ #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) extern struct address_space *swapper_spaces[]; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *, struct list_head *list); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); extern void __delete_from_swap_cache(struct page *); extern void delete_from_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t); extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr); extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); extern struct page *swapin_readahead(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void) { return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li20100.00%1100.00%
Total20100.00%1100.00%


static inline long get_nr_swap_pages(void) { return atomic_long_read(&nr_swap_pages); }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li16100.00%1100.00%
Total16100.00%1100.00%

extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); extern int get_swap_pages(int n, swp_entry_t swp_entries[]); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free(swp_entry_t); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); #else /* CONFIG_SWAP */ #define swap_address_space(entry) (NULL) #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL #define vm_swap_full() 0 #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) /* only sparc can not include linux/pagemap.h in this file * so leave put_page and release_pages undeclared... */ #define free_page_and_swap_cache(page) \ put_page(page) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr), false);
static inline void show_swap_cache_info(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas787.50%150.00%
Christoph Hellwig112.50%150.00%
Total8100.00%2100.00%

#define free_swap_and_cache(swp) is_migration_entry(swp) #define swapcache_prepare(swp) is_migration_entry(swp)
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins16100.00%1100.00%
Total16100.00%1100.00%


static inline void swap_shmem_alloc(swp_entry_t swp) { }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins9100.00%1100.00%
Total9100.00%1100.00%


static inline int swap_duplicate(swp_entry_t swp) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki753.85%266.67%
Hugh Dickins646.15%133.33%
Total13100.00%3100.00%


static inline void swap_free(swp_entry_t swp) { }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas555.56%150.00%
Christoph Hellwig444.44%150.00%
Total9100.00%2100.00%


static inline void swapcache_free(swp_entry_t swp) { }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki9100.00%1100.00%
Total9100.00%1100.00%


static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas1659.26%120.00%
Andrew Morton414.81%120.00%
Hugh Dickins414.81%240.00%
Christoph Hellwig311.11%120.00%
Total27100.00%5100.00%


static inline int swap_writepage(struct page *p, struct writeback_control *wbc) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins20100.00%1100.00%
Total20100.00%1100.00%


static inline struct page *lookup_swap_cache(swp_entry_t swp) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas1173.33%150.00%
Christoph Hellwig426.67%150.00%
Total15100.00%2100.00%


static inline int add_to_swap(struct page *page, struct list_head *list) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins1575.00%150.00%
Shaohua Li525.00%150.00%
Total20100.00%2100.00%


static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { return -1; }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas1463.64%133.33%
Christoph Hellwig418.18%133.33%
Hugh Dickins418.18%133.33%
Total22100.00%3100.00%


static inline void __delete_from_swap_cache(struct page *page) { }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas872.73%150.00%
Christoph Hellwig327.27%150.00%
Total11100.00%2100.00%


static inline void delete_from_swap_cache(struct page *page) { }

Contributors

PersonTokensPropCommitsCommitProp
Con Kolivas872.73%150.00%
Christoph Hellwig327.27%150.00%
Total11100.00%2100.00%


static inline int page_swapcount(struct page *page) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins1386.67%150.00%
Tim Chen213.33%150.00%
Total15100.00%2100.00%


static inline int __swp_swapcount(swp_entry_t entry) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tim Chen1184.62%150.00%
Hugh Dickins215.38%150.00%
Total13100.00%2100.00%


static inline int swp_swapcount(swp_entry_t entry) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim13100.00%1100.00%
Total13100.00%1100.00%

#define reuse_swap_page(page, total_mapcount) \ (page_trans_huge_mapcount(page, total_mapcount) == 1)
static inline int try_to_free_swap(struct page *page) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Rik Van Riel1493.33%150.00%
Hugh Dickins16.67%150.00%
Total15100.00%2100.00%


static inline swp_entry_t get_swap_page(void) { swp_entry_t entry; entry.val = 0; return entry; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig21100.00%1100.00%
Total21100.00%1100.00%

#endif /* CONFIG_SWAP */ #ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { /* Cgroup2 doesn't have per-cgroup swappiness */ if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return vm_swappiness; /* root ? */ if (mem_cgroup_disabled() || !memcg->css.parent) return vm_swappiness; return memcg->swappiness; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov3375.00%150.00%
Johannes Weiner1125.00%150.00%
Total44100.00%2100.00%

#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { return vm_swappiness; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov15100.00%1100.00%
Total15100.00%1100.00%

#endif #ifdef CONFIG_MEMCG_SWAP extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); extern void mem_cgroup_uncharge_swap(swp_entry_t entry); extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else
static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov14100.00%1100.00%
Total14100.00%1100.00%


static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov18100.00%1100.00%
Total18100.00%1100.00%


static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) { }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov9100.00%1100.00%
Total9100.00%1100.00%


static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) { return get_nr_swap_pages(); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov16100.00%1100.00%
Total16100.00%1100.00%


static inline bool mem_cgroup_swap_full(struct page *page) { return vm_swap_full(); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Davydov16100.00%1100.00%
Total16100.00%1100.00%

#endif #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins31814.17%2816.00%
Linus Torvalds (pre-git)2249.98%2011.43%
Vladimir Davydov1898.42%31.71%
Andrew Morton1667.40%1910.86%
Christoph Hellwig1607.13%42.29%
Shaohua Li1496.64%74.00%
Johannes Weiner1175.21%84.57%
Mel Gorman1064.72%126.86%
Christoph Lameter873.88%74.00%
Con Kolivas693.07%10.57%
Huang Ying572.54%31.71%
Tim Chen562.50%42.29%
MinChan Kim512.27%63.43%
Seth Jennings401.78%21.14%
Kamezawa Hiroyuki371.65%31.71%
Andi Kleen361.60%10.57%
Lee Schermerhorn351.56%52.86%
Rafael J. Wysocki331.47%42.29%
Linus Torvalds311.38%63.43%
Rafael Aquini311.38%10.57%
Dmitry Safonov241.07%10.57%
Andrea Arcangeli220.98%21.14%
Harvey Harrison200.89%10.57%
Rik Van Riel200.89%10.57%
Andreas Dilger190.85%10.57%
Jeff Garzik190.85%10.57%
Balbir Singh170.76%10.57%
Dan Magenheimer150.67%10.57%
Jianyu Zhan130.58%10.57%
Dan Streetman100.45%21.14%
Nitin Gupta90.40%10.57%
David Rientjes80.36%10.57%
Konstantin Khlebnikov80.36%10.57%
Yasunori Goto80.36%10.57%
JoonSoo Kim70.31%10.57%
Dave Jones60.27%10.57%
Nicholas Piggin60.27%10.57%
Andy Whitcroft40.18%10.57%
Hideo Aoki40.18%10.57%
Kirill A. Shutemov30.13%31.71%
Martin Schwidefsky30.13%10.57%
Zhang Yanfei30.13%21.14%
Al Viro10.04%10.57%
Chris Metcalf10.04%10.57%
Miklos Szeredi10.04%10.57%
Arun Sharma10.04%10.57%
Total2244100.00%175100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.