Release 4.10 fs/proc/task_mmu.c
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
#include <asm/elf.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>
#include "internal.h"
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
anon = get_mm_counter(mm, MM_ANONPAGES);
file = get_mm_counter(mm, MM_FILEPAGES);
shmem = get_mm_counter(mm, MM_SHMEMPAGES);
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = anon + file + shmem;
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmPin:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"RssAnon:\t%8lu kB\n"
"RssFile:\t%8lu kB\n"
"RssShmem:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmPMD:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
total_vm << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
anon << (PAGE_SHIFT-10),
file << (PAGE_SHIFT-10),
shmem << (PAGE_SHIFT-10),
mm->data_vm << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
ptes >> 10,
pmds >> 10,
swap << (PAGE_SHIFT-10));
hugetlb_report_usage(m, mm);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hugh dickins | hugh dickins | 71 | 20.94% | 1 | 8.33% |
christoph hellwig | christoph hellwig | 66 | 19.47% | 1 | 8.33% |
jerome marchand | jerome marchand | 65 | 19.17% | 1 | 8.33% |
william lee irwin iii | william lee irwin iii | 47 | 13.86% | 3 | 25.00% |
kirill a. shutemov | kirill a. shutemov | 41 | 12.09% | 1 | 8.33% |
kamezawa hiroyuki | kamezawa hiroyuki | 20 | 5.90% | 1 | 8.33% |
christoph lameter | christoph lameter | 11 | 3.24% | 1 | 8.33% |
eric w. biederman | eric w. biederman | 8 | 2.36% | 1 | 8.33% |
naoya horiguchi | naoya horiguchi | 7 | 2.06% | 1 | 8.33% |
konstantin khlebnikov | konstantin khlebnikov | 3 | 0.88% | 1 | 8.33% |
| Total | 339 | 100.00% | 12 | 100.00% |
unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 14 | 77.78% | 1 | 50.00% |
andrew morton | andrew morton | 4 | 22.22% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES) +
get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->data_vm + mm->stack_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 39 | 39.39% | 1 | 10.00% |
william lee irwin iii | william lee irwin iii | 25 | 25.25% | 2 | 20.00% |
alexey dobriyan | alexey dobriyan | 10 | 10.10% | 1 | 10.00% |
hugh dickins | hugh dickins | 9 | 9.09% | 2 | 20.00% |
jerome marchand | jerome marchand | 7 | 7.07% | 1 | 10.00% |
christoph lameter | christoph lameter | 4 | 4.04% | 1 | 10.00% |
konstantin khlebnikov | konstantin khlebnikov | 3 | 3.03% | 1 | 10.00% |
kamezawa hiroyuki | kamezawa hiroyuki | 2 | 2.02% | 1 | 10.00% |
| Total | 99 | 100.00% | 10 | 100.00% |
#ifdef CONFIG_NUMA
/*
* Save get_task_policy() for show_numa_map().
*/
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
struct task_struct *task = priv->task;
task_lock(task);
priv->task_mempolicy = get_task_policy(task);
mpol_get(priv->task_mempolicy);
task_unlock(task);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kamezawa hiroyuki | kamezawa hiroyuki | 43 | 93.48% | 1 | 50.00% |
oleg nesterov | oleg nesterov | 3 | 6.52% | 1 | 50.00% |
| Total | 46 | 100.00% | 2 | 100.00% |
static void release_task_mempolicy(struct proc_maps_private *priv)
{
mpol_put(priv->task_mempolicy);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kamezawa hiroyuki | kamezawa hiroyuki | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
#else
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kamezawa hiroyuki | kamezawa hiroyuki | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
static void release_task_mempolicy(struct proc_maps_private *priv)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kamezawa hiroyuki | kamezawa hiroyuki | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
#endif
static void vma_stop(struct proc_maps_private *priv)
{
struct mm_struct *mm = priv->mm;
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 30 | 78.95% | 2 | 40.00% |
kamezawa hiroyuki | kamezawa hiroyuki | 5 | 13.16% | 1 | 20.00% |
oleg nesterov | oleg nesterov | 2 | 5.26% | 1 | 20.00% |
mauricio lin | mauricio lin | 1 | 2.63% | 1 | 20.00% |
| Total | 38 | 100.00% | 5 | 100.00% |
static struct vm_area_struct *
m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma == priv->tail_vma)
return NULL;
return vma->vm_next ?: priv->tail_vma;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 39 | 100.00% | 1 | 100.00% |
| Total | 39 | 100.00% | 1 | 100.00% |
static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
{
if (m->count < m->size) /* vma is copied successfully */
m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 46 | 97.87% | 1 | 50.00% |
robert ho | robert ho | 1 | 2.13% | 1 | 50.00% |
| Total | 47 | 100.00% | 2 | 100.00% |
static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned int pos = *ppos;
/* See m_cache_vma(). Zero at the start or after lseek. */
if (last_addr == -1UL)
return NULL;
priv->task = get_proc_task(priv->inode);
if (!priv->task)
return ERR_PTR(-ESRCH);
mm = priv->mm;
if (!mm || !atomic_inc_not_zero(&mm->mm_users))
return NULL;
down_read(&mm->mmap_sem);
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
if (last_addr) {
vma = find_vma(mm, last_addr - 1);
if (vma && vma->vm_start <= last_addr)
vma = m_next_vma(priv, vma);
if (vma)
return vma;
}
m->version = 0;
if (pos < mm->map_count) {
for (vma = mm->mmap; pos; pos--) {
m->version = vma->vm_start;
vma = vma->vm_next;
}
return vma;
}
/* we do not bother to update m->version in this case */
if (pos == mm->map_count && priv->tail_vma)
return priv->tail_vma;
vma_stop(priv);
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 119 | 47.98% | 7 | 38.89% |
matt mackall | matt mackall | 49 | 19.76% | 1 | 5.56% |
al viro | al viro | 19 | 7.66% | 2 | 11.11% |
christoph hellwig | christoph hellwig | 18 | 7.26% | 1 | 5.56% |
ingo molnar | ingo molnar | 17 | 6.85% | 2 | 11.11% |
robert ho | robert ho | 12 | 4.84% | 1 | 5.56% |
prasanna meda | prasanna meda | 7 | 2.82% | 1 | 5.56% |
kamezawa hiroyuki | kamezawa hiroyuki | 3 | 1.21% | 1 | 5.56% |
eric w. biederman | eric w. biederman | 2 | 0.81% | 1 | 5.56% |
mauricio lin | mauricio lin | 2 | 0.81% | 1 | 5.56% |
| Total | 248 | 100.00% | 18 | 100.00% |
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *next;
(*pos)++;
next = m_next_vma(priv, v);
if (!next)
vma_stop(priv);
return next;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 26 | 41.94% | 2 | 28.57% |
oleg nesterov | oleg nesterov | 23 | 37.10% | 2 | 28.57% |
david rientjes | david rientjes | 7 | 11.29% | 1 | 14.29% |
mauricio lin | mauricio lin | 4 | 6.45% | 1 | 14.29% |
christoph hellwig | christoph hellwig | 2 | 3.23% | 1 | 14.29% |
| Total | 62 | 100.00% | 7 | 100.00% |
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
if (!IS_ERR_OR_NULL(v))
vma_stop(priv);
if (priv->task) {
put_task_struct(priv->task);
priv->task = NULL;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 35 | 60.34% | 1 | 16.67% |
oleg nesterov | oleg nesterov | 10 | 17.24% | 2 | 33.33% |
linus torvalds | linus torvalds | 6 | 10.34% | 1 | 16.67% |
mauricio lin | mauricio lin | 4 | 6.90% | 1 | 16.67% |
prasanna meda | prasanna meda | 3 | 5.17% | 1 | 16.67% |
| Total | 58 | 100.00% | 6 | 100.00% |
static int proc_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops, int psize)
{
struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
if (!priv)
return -ENOMEM;
priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR(priv->mm)) {
int err = PTR_ERR(priv->mm);
seq_release_private(inode, file);
return err;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 62 | 62.63% | 3 | 33.33% |
matt mackall | matt mackall | 30 | 30.30% | 1 | 11.11% |
christoph hellwig | christoph hellwig | 3 | 3.03% | 1 | 11.11% |
jan engelhardt | jan engelhardt | 1 | 1.01% | 1 | 11.11% |
prasanna meda | prasanna meda | 1 | 1.01% | 1 | 11.11% |
david rientjes | david rientjes | 1 | 1.01% | 1 | 11.11% |
nick piggin | nick piggin | 1 | 1.01% | 1 | 11.11% |
| Total | 99 | 100.00% | 9 | 100.00% |
static int proc_map_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private;
if (priv->mm)
mmdrop(priv->mm);
return seq_release_private(inode, file);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 55 | 100.00% | 1 | 100.00% |
| Total | 55 | 100.00% | 1 | 100.00% |
static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
return proc_maps_open(inode, file, ops,
sizeof(struct proc_maps_private));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
oleg nesterov | oleg nesterov | 30 | 78.95% | 1 | 25.00% |
mauricio lin | mauricio lin | 4 | 10.53% | 1 | 25.00% |
matt mackall | matt mackall | 4 | 10.53% | 2 | 50.00% |
| Total | 38 | 100.00% | 4 | 100.00% |
/*
* Indicate if the VMA is a stack for the given task; for
* /proc/PID/maps that is the stack of the main task.
*/
static int is_stack(struct proc_maps_private *priv,
struct vm_area_struct *vma)
{
/*
* We make no effort to guess what a given thread considers to be
* its "stack". It's not even well-defined for programs written
* languages like Go.
*/
return vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
johannes weiner | johannes weiner | 21 | 55.26% | 1 | 33.33% |
oleg nesterov | oleg nesterov | 15 | 39.47% | 1 | 33.33% |
andy lutomirski | andy lutomirski | 2 | 5.26% | 1 | 33.33% |
| Total | 38 | 100.00% | 3 | 100.00% |
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
struct proc_maps_private *priv = m->private;
vm_flags_t flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
if (file) {
struct inode *inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end;
if (stack_guard_page_end(vma, end))
end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
start,
end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
pgoff,
MAJOR(dev), MINOR(dev), ino);
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "\n");
goto done;
}
if (vma->vm_ops && vma->vm_ops->name) {
name = vma->vm_ops->name(vma);
if (name)
goto done;
}
name = arch_vma_name(vma);
if (!name) {
if (!mm) {
name = "[vdso]";
goto done;
}
if (vma->vm_start <= mm->brk &&
vma->vm_end >= mm->start_brk) {
name = "[heap]";
goto done;
}
if (is_stack(priv, vma))
name = "[stack]";
}
done:
if (name) {
seq_pad(m, ' ');
seq_puts(m, name);
}
seq_putc(m, '\n');
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 180 | 45.34% | 4 | 20.00% |
siddhesh poyarekar | siddhesh poyarekar | 42 | 10.58% | 1 | 5.00% |
andy lutomirski | andy lutomirski | 32 | 8.06% | 1 | 5.00% |
david rientjes | david rientjes | 23 | 5.79% | 1 | 5.00% |
tetsuo handa | tetsuo handa | 22 | 5.54% | 1 | 5.00% |
mikulas patocka | mikulas patocka | 22 | 5.54% | 1 | 5.00% |
kamezawa hiroyuki | kamezawa hiroyuki | 21 | 5.29% | 1 | 5.00% |
linus torvalds | linus torvalds | 18 | 4.53% | 1 | 5.00% |
mauricio lin | mauricio lin | 14 | 3.53% | 1 | 5.00% |
stefan bader | stefan bader | 7 | 1.76% | 1 | 5.00% |
joe korty | joe korty | 5 | 1.26% | 1 | 5.00% |
johannes weiner | johannes weiner | 3 | 0.76% | 1 | 5.00% |
al viro | al viro | 3 | 0.76% | 1 | 5.00% |
aaro koskinen | aaro koskinen | 2 | 0.50% | 1 | 5.00% |
kosaki motohiro | kosaki motohiro | 1 | 0.25% | 1 | 5.00% |
miklos szeredi | miklos szeredi | 1 | 0.25% | 1 | 5.00% |
oleg nesterov | oleg nesterov | 1 | 0.25% | 1 | 5.00% |
| Total | 397 | 100.00% | 20 | 100.00% |
static int show_map(struct seq_file *m, void *v, int is_pid)
{
show_map_vma(m, v, is_pid);
m_cache_vma(m, v);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
joe korty | joe korty | 20 | 54.05% | 1 | 16.67% |
oleg nesterov | oleg nesterov | 8 | 21.62% | 2 | 33.33% |
siddhesh poyarekar | siddhesh poyarekar | 5 | 13.51% | 1 | 16.67% |
matt mackall | matt mackall | 4 | 10.81% | 2 | 33.33% |
| Total | 37 | 100.00% | 6 | 100.00% |
static int show_pid_map(struct seq_file *m, void *v)
{
return show_map(m, v, 1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
siddhesh poyarekar | siddhesh poyarekar | 25 | 100.00% | 1 | 100.00% |
| Total | 25 | 100.00% | 1 | 100.00% |
static int show_tid_map(struct seq_file *m, void *v)
{
return show_map(m, v, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
siddhesh poyarekar | siddhesh poyarekar | 25 | 100.00% | 1 | 100.00% |
| Total | 25 | 100.00% | 1 | 100.00% |
static const struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_pid_map
};
static const struct seq_operations proc_tid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_tid_map
};
static int pid_maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 25 | 92.59% | 2 | 50.00% |
siddhesh poyarekar | siddhesh poyarekar | 1 | 3.70% | 1 | 25.00% |
david rientjes | david rientjes | 1 | 3.70% | 1 | 25.00% |
| Total | 27 | 100.00% | 4 | 100.00% |
static int tid_maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_tid_maps_op);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
siddhesh poyarekar | siddhesh poyarekar | 27 | 100.00% | 1 | 100.00% |
| Total | 27 | 100.00% | 1 | 100.00% |
const struct file_operations proc_pid_maps_operations = {
.open = pid_maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = proc_map_release,
};
const struct file_operations proc_tid_maps_operations = {
.open = tid_maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = proc_map_release,
};
/*
* Proportional Set Size(PSS): my share of RSS.
*
* PSS of a process is the count of pages it has in memory, where each
* page is divided by the number of processes sharing it. So if a
* process has 1000 pages all to itself, and 1000 shared with one other
* process, its PSS will be 1500.
*
* To keep (accumulated) division errors low, we adopt a 64bit
* fixed-point pss counter to minimize division errors. So (pss >>
* PSS_SHIFT) would be the real byte count.
*
* A shift of 12 before division means (assuming 4K page size):
* - 1M 3-user-pages add up to 8KB errors;
* - supports mapcount up to 2^24, or 16M;
* - supports PSS up to 2^52 bytes, or 4PB.
*/
#define PSS_SHIFT 12
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long shmem_thp;
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
u64 pss;
u64 swap_pss;
bool check_shmem_swap;
};
static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty)
{
int i, nr = compound ? 1 << compound_order(page) : 1;
unsigned long size = nr * PAGE_SIZE;
if (PageAnon(page))
mss->anonymous += size;
mss->resident += size;
/* Accumulate the size in pages that have been accessed. */
if (young || page_is_young(page) || PageReferenced(page))
mss->referenced += size;
/*
* page_count(page) == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
* page_count().
*/
if (page_count(page) == 1) {
if (dirty || PageDirty(page))
mss->private_dirty += size;
else
mss->private_clean += size;
mss->pss += (u64)size << PSS_SHIFT;
return;
}
for (i = 0; i < nr; i++, page++) {
int mapcount = page_mapcount(page);
if (mapcount >= 2) {
if (dirty || PageDirty(page))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
} else {
if (dirty || PageDirty(page))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
mss->pss += PAGE_SIZE << PSS_SHIFT;
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kirill a. shutemov | kirill a. shutemov | 231 | 97.88% | 3 | 75.00% |
vladimir davydov | vladimir davydov | 5 | 2.12% | 1 | 25.00% |
| Total | 236 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_SHMEM
static int smaps_pte_hole(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
mss->swap += shmem_partial_swap_usage(
walk->vma->vm_file->f_mapping, addr, end);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlastimil babka | vlastimil babka | 50 | 100.00% | 2 | 100.00% |
| Total | 50 | 100.00% | 2 | 100.00% |
#endif
static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
struct page *page = NULL;
if (pte_present(*pte)) {
page = vm_normal_page(vma, addr, *pte);
} else if (is_swap_pte(*pte)) {
swp_entry_t swpent = pte_to_swp_entry(*pte);
if (!non_swap_entry(swpent)) {
int mapcount;
mss->swap += PAGE_SIZE;
mapcount = swp_swapcount(swpent);
if (mapcount >= 2) {
u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
do_div(pss_delta, mapcount);
mss->swap_pss += pss_delta;
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
} else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
&& pte_none(*pte))) {
page = find_get_entry(vma->vm_file->f_mapping,
linear_page_index(vma, addr));
if (!page)
return;
if (radix_tree_exceptional_entry(page))
mss->swap += PAGE_SIZE;
else
put_page(page);
return;
}
if (!page)
return;
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlastimil babka | vlastimil babka | 66 | 24.81% | 2 | 13.33% |
minchan kim | minchan kim | 58 | 21.80% | 2 | 13.33% |
konstantin khlebnikov | konstantin khlebnikov | 42 | 15.79% | 1 | 6.67% |
kirill a. shutemov | kirill a. shutemov | 28 | 10.53% | 3 | 20.00% |
matt mackall | matt mackall | 23 | 8.65% | 1 | 6.67% |
mauricio lin | mauricio lin | 19 | 7.14% | 1 | 6.67% |
peter zijlstra | peter zijlstra | 12 | 4.51% | 1 | 6.67% |
dave hansen | dave hansen | 9 | 3.38% | 2 | 13.33% |
eric w. biederman | eric w. biederman | 8 | 3.01% | 1 | 6.67% |
naoya horiguchi | naoya horiguchi | 1 | 0.38% | 1 | 6.67% |
| Total | 266 | 100.00% | 15 | 100.00% |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */
page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
if (IS_ERR_OR_NULL(page))
return;
if (PageAnon(page))
mss->anonymous_thp += HPAGE_PMD_SIZE;
else if (PageSwapBacked(page))
mss->shmem_thp += HPAGE_PMD_SIZE;
else if (is_zone_device_page(page))
/* pass */;
else
VM_BUG_ON_PAGE(1, page);
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kirill a. shutemov | kirill a. shutemov | 96 | 73.85% | 3 | 30.00% |
matt mackall | matt mackall | 15 | 11.54% | 1 | 10.00% |
dan williams | dan williams | 10 | 7.69% | 1 | 10.00% |
kosaki motohiro | kosaki motohiro | 5 | 3.85% | 1 | 10.00% |
mauricio lin | mauricio lin | 1 | 0.77% | 1 | 10.00% |
christoph hellwig | christoph hellwig | 1 | 0.77% | 1 | 10.00% |
naoya horiguchi | naoya horiguchi | 1 | 0.77% | 1 | 10.00% |
eric w. biederman | eric w. biederman | 1 | 0.77% | 1 | 10.00% |
| Total | 130 | 100.00% | 10 | 100.00% |
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct mm_walk *walk)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kirill a. shutemov | kirill a. shutemov | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
#endif
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
smaps_pmd_entry(pmd, addr, walk)