Release 4.14 tools/vm/page-types.c
/*
* page-types: Tool for querying page flags
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; version 2.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should find a copy of v2 of the GNU General Public License somewhere on
* your Linux system; if not, write to the Free Software Foundation, Inc., 59
* Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* Copyright (C) 2009 Intel corporation
*
* Authors: Wu Fengguang <fengguang.wu@intel.com>
*/
#define _FILE_OFFSET_BITS 64
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
#include <stdarg.h>
#include <string.h>
#include <getopt.h>
#include <limits.h>
#include <assert.h>
#include <ftw.h>
#include <time.h>
#include <setjmp.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/fcntl.h>
#include <sys/mount.h>
#include <sys/statfs.h>
#include <sys/mman.h>
#include "../../include/uapi/linux/magic.h"
#include "../../include/uapi/linux/kernel-page-flags.h"
#include <api/fs/fs.h>
#ifndef MAX_PATH
# define MAX_PATH 256
#endif
#ifndef STR
# define _STR(x) #x
# define STR(x) _STR(x)
#endif
/*
* pagemap kernel ABI bits
*/
#define PM_ENTRY_BYTES 8
#define PM_PFRAME_BITS 55
#define PM_PFRAME_MASK ((1LL << PM_PFRAME_BITS) - 1)
#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
#define MAX_SWAPFILES_SHIFT 5
#define PM_SWAP_OFFSET(x) (((x) & PM_PFRAME_MASK) >> MAX_SWAPFILES_SHIFT)
#define PM_SOFT_DIRTY (1ULL << 55)
#define PM_MMAP_EXCLUSIVE (1ULL << 56)
#define PM_FILE (1ULL << 61)
#define PM_SWAP (1ULL << 62)
#define PM_PRESENT (1ULL << 63)
/*
* kernel page flags
*/
#define KPF_BYTES 8
#define PROC_KPAGEFLAGS "/proc/kpageflags"
#define PROC_KPAGECGROUP "/proc/kpagecgroup"
/* [32-] kernel hacking assistances */
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
#define KPF_MAPPEDTODISK 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
/* [48-] take some arbitrary free slots for expanding overloaded flags
* not part of kernel API
*/
#define KPF_READAHEAD 48
#define KPF_SLOB_FREE 49
#define KPF_SLUB_FROZEN 50
#define KPF_SLUB_DEBUG 51
#define KPF_FILE 61
#define KPF_SWAP 62
#define KPF_MMAP_EXCLUSIVE 63
#define KPF_ALL_BITS ((uint64_t)~0ULL)
#define KPF_HACKERS_BITS (0xffffULL << 32)
#define KPF_OVERLOADED_BITS (0xffffULL << 48)
#define BIT(name) (1ULL << KPF_##name)
#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
static const char * const page_flag_names[] = {
[KPF_LOCKED] = "L:locked",
[KPF_ERROR] = "E:error",
[KPF_REFERENCED] = "R:referenced",
[KPF_UPTODATE] = "U:uptodate",
[KPF_DIRTY] = "D:dirty",
[KPF_LRU] = "l:lru",
[KPF_ACTIVE] = "A:active",
[KPF_SLAB] = "S:slab",
[KPF_WRITEBACK] = "W:writeback",
[KPF_RECLAIM] = "I:reclaim",
[KPF_BUDDY] = "B:buddy",
[KPF_MMAP] = "M:mmap",
[KPF_ANON] = "a:anonymous",
[KPF_SWAPCACHE] = "s:swapcache",
[KPF_SWAPBACKED] = "b:swapbacked",
[KPF_COMPOUND_HEAD] = "H:compound_head",
[KPF_COMPOUND_TAIL] = "T:compound_tail",
[KPF_HUGE] = "G:huge",
[KPF_UNEVICTABLE] = "u:unevictable",
[KPF_HWPOISON] = "X:hwpoison",
[KPF_NOPAGE] = "n:nopage",
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
[KPF_BALLOON] = "o:balloon",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_IDLE] = "i:idle_page",
[KPF_RESERVED] = "r:reserved",
[KPF_MLOCKED] = "m:mlocked",
[KPF_MAPPEDTODISK] = "d:mappedtodisk",
[KPF_PRIVATE] = "P:private",
[KPF_PRIVATE_2] = "p:private_2",
[KPF_OWNER_PRIVATE] = "O:owner_private",
[KPF_ARCH] = "h:arch",
[KPF_UNCACHED] = "c:uncached",
[KPF_SOFTDIRTY] = "f:softdirty",
[KPF_READAHEAD] = "I:readahead",
[KPF_SLOB_FREE] = "P:slob_free",
[KPF_SLUB_FROZEN] = "A:slub_frozen",
[KPF_SLUB_DEBUG] = "E:slub_debug",
[KPF_FILE] = "F:file",
[KPF_SWAP] = "w:swap",
[KPF_MMAP_EXCLUSIVE] = "1:mmap_exclusive",
};
static const char * const debugfs_known_mountpoints[] = {
"/sys/kernel/debug",
"/debug",
0,
};
/*
* data structures
*/
static int opt_raw;
/* for kernel developers */
static int opt_list;
/* list pages (in ranges) */
static int opt_no_summary;
/* don't show summary */
static pid_t opt_pid;
/* process to walk */
const char * opt_file;
/* file or directory path */
static uint64_t opt_cgroup;
/* cgroup inode */
static int opt_list_cgroup;
/* list page cgroup */
#define MAX_ADDR_RANGES 1024
static int nr_addr_ranges;
static unsigned long opt_offset[MAX_ADDR_RANGES];
static unsigned long opt_size[MAX_ADDR_RANGES];
#define MAX_VMAS 10240
static int nr_vmas;
static unsigned long pg_start[MAX_VMAS];
static unsigned long pg_end[MAX_VMAS];
#define MAX_BIT_FILTERS 64
static int nr_bit_filters;
static uint64_t opt_mask[MAX_BIT_FILTERS];
static uint64_t opt_bits[MAX_BIT_FILTERS];
static int page_size;
static int pagemap_fd;
static int kpageflags_fd;
static int kpagecgroup_fd = -1;
static int opt_hwpoison;
static int opt_unpoison;
static const char *hwpoison_debug_fs;
static int hwpoison_inject_fd;
static int hwpoison_forget_fd;
#define HASH_SHIFT 13
#define HASH_SIZE (1 << HASH_SHIFT)
#define HASH_MASK (HASH_SIZE - 1)
#define HASH_KEY(flags) (flags & HASH_MASK)
static unsigned long total_pages;
static unsigned long nr_pages[HASH_SIZE];
static uint64_t page_flags[HASH_SIZE];
/*
* helper functions
*/
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define min_t(type, x, y) ({ \
type __min1 = (x); \
type __min2 = (y); \
__min1 < __min2 ? __min1 : __min2; })
#define max_t(type, x, y) ({ \
type __max1 = (x); \
type __max2 = (y); \
__max1 > __max2 ? __max1 : __max2; })
static unsigned long pages2mb(unsigned long pages)
{
return (pages * page_size) >> 20;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 19 | 95.00% | 1 | 50.00% |
Ladinu Chandrasinghe | 1 | 5.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
static void fatal(const char *x, ...)
{
va_list ap;
va_start(ap, x);
vfprintf(stderr, x, ap);
va_end(ap);
exit(EXIT_FAILURE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 41 | 97.62% | 1 | 50.00% |
Ladinu Chandrasinghe | 1 | 2.38% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
static int checked_open(const char *pathname, int flags)
{
int fd = open(pathname, flags);
if (fd < 0) {
perror(pathname);
exit(EXIT_FAILURE);
}
return fd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 44 | 97.78% | 1 | 50.00% |
Josh Triplett | 1 | 2.22% | 1 | 50.00% |
Total | 45 | 100.00% | 2 | 100.00% |
/*
* pagemap/kpageflags routines
*/
static unsigned long do_u64_read(int fd, char *name,
uint64_t *buf,
unsigned long index,
unsigned long count)
{
long bytes;
if (index > ULONG_MAX / 8)
fatal("index overflow: %lu\n", index);
bytes = pread(fd, buf, count * 8, (off_t)index * 8);
if (bytes < 0) {
perror(name);
exit(EXIT_FAILURE);
}
if (bytes % 8)
fatal("partial read: %lu bytes\n", bytes);
return bytes / 8;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 92 | 92.00% | 1 | 50.00% |
Konstantin Khlebnikov | 8 | 8.00% | 1 | 50.00% |
Total | 100 | 100.00% | 2 | 100.00% |
static unsigned long kpageflags_read(uint64_t *buf,
unsigned long index,
unsigned long pages)
{
return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static unsigned long kpagecgroup_read(uint64_t *buf,
unsigned long index,
unsigned long pages)
{
if (kpagecgroup_fd < 0)
return pages;
return do_u64_read(kpagecgroup_fd, PROC_KPAGEFLAGS, buf, index, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konstantin Khlebnikov | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
static unsigned long pagemap_read(uint64_t *buf,
unsigned long index,
unsigned long pages)
{
return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static unsigned long pagemap_pfn(uint64_t val)
{
unsigned long pfn;
if (val & PM_PRESENT)
pfn = PM_PFRAME(val);
else
pfn = 0;
return pfn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
static unsigned long pagemap_swap_offset(uint64_t val)
{
return val & PM_SWAP ? PM_SWAP_OFFSET(val) : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naoya Horiguchi | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
/*
* page flag names
*/
static char *page_flag_name(uint64_t flags)
{
static char buf[65];
int present;
size_t i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
present = (flags >> i) & 1;
if (!page_flag_names[i]) {
if (present)
fatal("unknown flag bit %d\n", i);
continue;
}
buf[j++] = present ? page_flag_names[i][0] : '_';
}
return buf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 97 | 97.00% | 1 | 25.00% |
Ulrich Drepper | 1 | 1.00% | 1 | 25.00% |
Ladinu Chandrasinghe | 1 | 1.00% | 1 | 25.00% |
André Goddard Rosa | 1 | 1.00% | 1 | 25.00% |
Total | 100 | 100.00% | 4 | 100.00% |
static char *page_flag_longname(uint64_t flags)
{
static char buf[1024];
size_t i, n;
for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
if (!page_flag_names[i])
continue;
if ((flags >> i) & 1)
n += snprintf(buf + n, sizeof(buf) - n, "%s,",
page_flag_names[i] + 2);
}
if (n)
n--;
buf[n] = '\0';
return buf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 103 | 98.10% | 1 | 33.33% |
Ulrich Drepper | 1 | 0.95% | 1 | 33.33% |
Ladinu Chandrasinghe | 1 | 0.95% | 1 | 33.33% |
Total | 105 | 100.00% | 3 | 100.00% |
/*
* page list and summary
*/
static void show_page_range(unsigned long voffset, unsigned long offset,
unsigned long size, uint64_t flags, uint64_t cgroup)
{
static uint64_t flags0;
static uint64_t cgroup0;
static unsigned long voff;
static unsigned long index;
static unsigned long count;
if (flags == flags0 && cgroup == cgroup0 && offset == index + count &&
size && voffset == voff + count) {
count += size;
return;
}
if (count) {
if (opt_pid)
printf("%lx\t", voff);
if (opt_file)
printf("%lu\t", voff);
if (opt_list_cgroup)
printf("@%llu\t", (unsigned long long)cgroup0);
printf("%lx\t%lx\t%s\n",
index, count, page_flag_name(flags0));
}
flags0 = flags;
cgroup0= cgroup;
index = offset;
voff = voffset;
count = size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 102 | 65.38% | 3 | 50.00% |
Konstantin Khlebnikov | 53 | 33.97% | 2 | 33.33% |
Ladinu Chandrasinghe | 1 | 0.64% | 1 | 16.67% |
Total | 156 | 100.00% | 6 | 100.00% |
static void flush_page_range(void)
{
show_page_range(0, 0, 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konstantin Khlebnikov | 19 | 90.48% | 2 | 66.67% |
Fengguang Wu | 2 | 9.52% | 1 | 33.33% |
Total | 21 | 100.00% | 3 | 100.00% |
static void show_page(unsigned long voffset, unsigned long offset,
uint64_t flags, uint64_t cgroup)
{
if (opt_pid)
printf("%lx\t", voffset);
if (opt_file)
printf("%lu\t", voffset);
if (opt_list_cgroup)
printf("@%llu\t", (unsigned long long)cgroup);
printf("%lx\t%s\n", offset, page_flag_name(flags));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 39 | 55.71% | 3 | 50.00% |
Konstantin Khlebnikov | 30 | 42.86% | 2 | 33.33% |
Ladinu Chandrasinghe | 1 | 1.43% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
static void show_summary(void)
{
size_t i;
printf(" flags\tpage-count MB"
" symbolic-flags\t\t\tlong-symbolic-flags\n");
for (i = 0; i < ARRAY_SIZE(nr_pages); i++) {
if (nr_pages[i])
printf("0x%016llx\t%10lu %8lu %s\t%s\n",
(unsigned long long)page_flags[i],
nr_pages[i],
pages2mb(nr_pages[i]),
page_flag_name(page_flags[i]),
page_flag_longname(page_flags[i]));
}
printf(" total\t%10lu %8lu\n",
total_pages, pages2mb(total_pages));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 96 | 97.96% | 1 | 33.33% |
Ulrich Drepper | 1 | 1.02% | 1 | 33.33% |
Ladinu Chandrasinghe | 1 | 1.02% | 1 | 33.33% |
Total | 98 | 100.00% | 3 | 100.00% |
/*
* page flag filters
*/
static int bit_mask_ok(uint64_t flags)
{
int i;
for (i = 0; i < nr_bit_filters; i++) {
if (opt_bits[i] == KPF_ALL_BITS) {
if ((flags & opt_mask[i]) == 0)
return 0;
} else {
if ((flags & opt_mask[i]) != opt_bits[i])
return 0;
}
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 78 | 98.73% | 1 | 50.00% |
Ladinu Chandrasinghe | 1 | 1.27% | 1 | 50.00% |
Total | 79 | 100.00% | 2 | 100.00% |
static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
{
/* SLOB/SLUB overload several page flags */
if (flags & BIT(SLAB)) {
if (flags & BIT(PRIVATE))
flags ^= BIT(PRIVATE) | BIT(SLOB_FREE);
if (flags & BIT(ACTIVE))
flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN);
if (flags & BIT(ERROR))
flags ^= BIT(ERROR) | BIT(SLUB_DEBUG);
}
/* PG_reclaim is overloaded as PG_readahead in the read path */
if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM))
flags ^= BIT(RECLAIM) | BIT(READAHEAD);
if (pme & PM_SOFT_DIRTY)
flags |= BIT(SOFTDIRTY);
if (pme & PM_FILE)
flags |= BIT(FILE);
if (pme & PM_SWAP)
flags |= BIT(SWAP);
if (pme & PM_MMAP_EXCLUSIVE)
flags |= BIT(MMAP_EXCLUSIVE);
return flags;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 122 | 68.54% | 1 | 20.00% |
Naoya Horiguchi | 29 | 16.29% | 2 | 40.00% |
Konstantin Khlebnikov | 26 | 14.61% | 1 | 20.00% |
Ladinu Chandrasinghe | 1 | 0.56% | 1 | 20.00% |
Total | 178 | 100.00% | 5 | 100.00% |
static uint64_t well_known_flags(uint64_t flags)
{
/* hide flags intended only for kernel hacker */
flags &= ~KPF_HACKERS_BITS;
/* hide non-hugeTLB compound pages */
if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE)))
flags &= ~BITS_COMPOUND;
return flags;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 41 | 97.62% | 1 | 50.00% |
Ladinu Chandrasinghe | 1 | 2.38% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
static uint64_t kpageflags_flags(uint64_t flags, uint64_t pme)
{
if (opt_raw)
flags = expand_overloaded_flags(flags, pme);
else
flags = well_known_flags(flags);
return flags;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 26 | 72.22% | 1 | 50.00% |
Naoya Horiguchi | 10 | 27.78% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
/*
* page actions
*/
static void prepare_hwpoison_fd(void)
{
char buf[MAX_PATH + 1];
hwpoison_debug_fs = debugfs__mount();
if (!hwpoison_debug_fs) {
perror("mount debugfs");
exit(EXIT_FAILURE);
}
if (opt_hwpoison && !hwpoison_inject_fd) {
snprintf(buf, MAX_PATH, "%s/hwpoison/corrupt-pfn",
hwpoison_debug_fs);
hwpoison_inject_fd = checked_open(buf, O_WRONLY);
}
if (opt_unpoison && !hwpoison_forget_fd) {
snprintf(buf, MAX_PATH, "%s/hwpoison/unpoison-pfn",
hwpoison_debug_fs);
hwpoison_forget_fd = checked_open(buf, O_WRONLY);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 51 | 53.12% | 1 | 25.00% |
Chen Gong | 35 | 36.46% | 1 | 25.00% |
Borislav Petkov | 8 | 8.33% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 2 | 2.08% | 1 | 25.00% |
Total | 96 | 100.00% | 4 | 100.00% |
static int hwpoison_page(unsigned long offset)
{
char buf[100];
int len;
len = sprintf(buf, "0x%lx\n", offset);
len = write(hwpoison_inject_fd, buf, len);
if (len < 0) {
perror("hwpoison inject");
return len;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
static int unpoison_page(unsigned long offset)
{
char buf[100];
int len;
len = sprintf(buf, "0x%lx\n", offset);
len = write(hwpoison_forget_fd, buf, len);
if (len < 0) {
perror("hwpoison forget");
return len;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
/*
* page frame walker
*/
static size_t hash_slot(uint64_t flags)
{
size_t k = HASH_KEY(flags);
size_t i;
/* Explicitly reserve slot 0 for flags 0: the following logic
* cannot distinguish an unoccupied slot from slot (flags==0).
*/
if (flags == 0)
return 0;
/* search through the remaining (HASH_SIZE-1) slots */
for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) {
if (!k || k >= ARRAY_SIZE(page_flags))
k = 1;
if (page_flags[k] == 0) {
page_flags[k] = flags;
return k;
}
if (page_flags[k] == flags)
return k;
}
fatal("hash table full: bump up HASH_SHIFT?\n");
exit(EXIT_FAILURE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 107 | 96.40% | 1 | 33.33% |
Ulrich Drepper | 3 | 2.70% | 1 | 33.33% |
Ladinu Chandrasinghe | 1 | 0.90% | 1 | 33.33% |
Total | 111 | 100.00% | 3 | 100.00% |
static void add_page(unsigned long voffset, unsigned long offset,
uint64_t flags, uint64_t cgroup, uint64_t pme)
{
flags = kpageflags_flags(flags, pme);
if (!bit_mask_ok(flags))
return;
if (opt_cgroup && cgroup != (uint64_t)opt_cgroup)
return;
if (opt_hwpoison)
hwpoison_page(offset);
if (opt_unpoison)
unpoison_page(offset);
if (opt_list == 1)
show_page_range(voffset, offset, 1, flags, cgroup);
else if (opt_list == 2)
show_page(voffset, offset, flags, cgroup);
nr_pages[hash_slot(flags)]++;
total_pages++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 93 | 77.50% | 4 | 50.00% |
Konstantin Khlebnikov | 21 | 17.50% | 2 | 25.00% |
Naoya Horiguchi | 5 | 4.17% | 1 | 12.50% |
Ladinu Chandrasinghe | 1 | 0.83% | 1 | 12.50% |
Total | 120 | 100.00% | 8 | 100.00% |
#define KPAGEFLAGS_BATCH (64 << 10)
/* 64k pages */
static void walk_pfn(unsigned long voffset,
unsigned long index,
unsigned long count,
uint64_t pme)
{
uint64_t buf[KPAGEFLAGS_BATCH];
uint64_t cgi[KPAGEFLAGS_BATCH];
unsigned long batch;
unsigned long pages;
unsigned long i;
/*
* kpagecgroup_read() reads only if kpagecgroup were opened, but
* /proc/kpagecgroup might even not exist, so it's better to fill
* them with zeros here.
*/
if (count == 1)
cgi[0] = 0;
else
memset(cgi, 0, sizeof cgi);
while (count) {
batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH);
pages = kpageflags_read(buf, index, batch);
if (pages == 0)
break;
if (kpagecgroup_read(cgi, index, pages) != pages)
fatal("kpagecgroup returned fewer pages than expected");
for (i = 0; i < pages; i++)
add_page(voffset + i, index + i, buf[i], cgi[i], pme);
index += pages;
count -= pages;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 106 | 63.47% | 3 | 37.50% |
Konstantin Khlebnikov | 39 | 23.35% | 1 | 12.50% |
Naoya Horiguchi | 20 | 11.98% | 2 | 25.00% |
Ulrich Drepper | 1 | 0.60% | 1 | 12.50% |
Ladinu Chandrasinghe | 1 | 0.60% | 1 | 12.50% |
Total | 167 | 100.00% | 8 | 100.00% |
static void walk_swap(unsigned long voffset, uint64_t pme)
{
uint64_t flags = kpageflags_flags(0, pme);
if (!bit_mask_ok(flags))
return;
if (opt_cgroup)
return;
if (opt_list == 1)
show_page_range(voffset, pagemap_swap_offset(pme), 1, flags, 0);
else if (opt_list == 2)
show_page(voffset, pagemap_swap_offset(pme), flags, 0);
nr_pages[hash_slot(flags)]++;
total_pages++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naoya Horiguchi | 83 | 90.22% | 1 | 50.00% |
Konstantin Khlebnikov | 9 | 9.78% | 1 | 50.00% |
Total | 92 | 100.00% | 2 | 100.00% |
#define PAGEMAP_BATCH (64 << 10)
static void walk_vma(unsigned long index, unsigned long count)
{
uint64_t buf[PAGEMAP_BATCH];
unsigned long batch;
unsigned long pages;
unsigned long pfn;
unsigned long i;
while (count) {
batch = min_t(unsigned long, count, PAGEMAP_BATCH);
pages = pagemap_read(buf, index, batch);
if (pages == 0)
break;
for (i = 0; i < pages; i++) {
pfn = pagemap_pfn(buf[i]);
if (pfn)
walk_pfn(index + i, pfn, 1, buf[i]);
if (buf[i] & PM_SWAP)
walk_swap(index + i, buf[i]);
}
index += pages;
count -= pages;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fengguang Wu | 118 | 81.38% | 4 | 57.14% |
Naoya Horiguchi | 26 | 17.93% | 2 | 28.57% |
Ladinu Chandrasinghe | 1 | 0.69% | 1 | 14.29% |
Total | 145 | 100.00% | 7 | 100.00% |
static void walk_task(unsigned long index, unsigned long count)
{
const unsigned long end = index + count;
unsigned long start;
int i = 0;
while (index < end) {
while (pg_end[i] <= index)
if (++i >= nr_vmas)
return;
if (pg_start[i] >= end)
return;
start = max_t(unsigned long, pg_start[i], index);
index = min_t(unsigned long, pg_end[i], end);
assert(start < index);
walk_vma(start, index - start);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
|