Contributors: 28
Author Tokens Token Proportion Commits Commit Proportion
Andrew Morton 119 29.82% 6 13.04%
Dave Hansen 47 11.78% 1 2.17%
Jan Kara 44 11.03% 3 6.52%
Christoph Hellwig 42 10.53% 1 2.17%
David Chinner 22 5.51% 2 4.35%
Linus Torvalds (pre-git) 21 5.26% 7 15.22%
Kaixiong Yu 17 4.26% 1 2.17%
Jean Noel Cordenner 16 4.01% 1 2.17%
Al Viro 12 3.01% 2 4.35%
Petr Holasek 12 3.01% 1 2.17%
Linus Torvalds 7 1.75% 2 4.35%
Johannes Weiner 6 1.50% 1 2.17%
andrew.yang 6 1.50% 1 2.17%
David Howells 3 0.75% 1 2.17%
Alexei Starovoitov 3 0.75% 1 2.17%
Joel Granados 3 0.75% 3 6.52%
Eric Sandeen 3 0.75% 1 2.17%
Hideaki Yoshifuji / 吉藤英明 2 0.50% 1 2.17%
Arnaldo Carvalho de Melo 2 0.50% 1 2.17%
Eric Dumazet 2 0.50% 1 2.17%
Dmitry Safonov 2 0.50% 1 2.17%
Nicholas Piggin 2 0.50% 1 2.17%
nixiaoming 1 0.25% 1 2.17%
Andi Kleen 1 0.25% 1 2.17%
Luis R. Rodriguez 1 0.25% 1 2.17%
Greg Kroah-Hartman 1 0.25% 1 2.17%
Joe Perches 1 0.25% 1 2.17%
Mike Waychison 1 0.25% 1 2.17%
Total 399 46


// SPDX-License-Identifier: GPL-2.0
/*
 * Implement the manual drop-all-pagecache function
 */

#include <linux/pagemap.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
#include <linux/swap.h>
#include "internal.h"

/* A global variable is a bit ugly, but it keeps the code simple */
static int sysctl_drop_caches;

static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
	struct inode *inode, *toput_inode = NULL;

	spin_lock(&sb->s_inode_list_lock);
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		spin_lock(&inode->i_lock);
		/*
		 * We must skip inodes in unusual state. We may also skip
		 * inodes without pages but we deliberately won't in case
		 * we need to reschedule to avoid softlockups.
		 */
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping_empty(inode->i_mapping) && !need_resched())) {
			spin_unlock(&inode->i_lock);
			continue;
		}
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&sb->s_inode_list_lock);

		invalidate_mapping_pages(inode->i_mapping, 0, -1);
		iput(toput_inode);
		toput_inode = inode;

		cond_resched();
		spin_lock(&sb->s_inode_list_lock);
	}
	spin_unlock(&sb->s_inode_list_lock);
	iput(toput_inode);
}

static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
		void *buffer, size_t *length, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (ret)
		return ret;
	if (write) {
		static int stfu;

		if (sysctl_drop_caches & 1) {
			lru_add_drain_all();
			iterate_supers(drop_pagecache_sb, NULL);
			count_vm_event(DROP_PAGECACHE);
		}
		if (sysctl_drop_caches & 2) {
			drop_slab();
			count_vm_event(DROP_SLAB);
		}
		if (!stfu) {
			pr_info("%s (%d): drop_caches: %d\n",
				current->comm, task_pid_nr(current),
				sysctl_drop_caches);
		}
		stfu |= sysctl_drop_caches & 4;
	}
	return 0;
}

static const struct ctl_table drop_caches_table[] = {
	{
		.procname	= "drop_caches",
		.data		= &sysctl_drop_caches,
		.maxlen		= sizeof(int),
		.mode		= 0200,
		.proc_handler	= drop_caches_sysctl_handler,
		.extra1		= SYSCTL_ONE,
		.extra2		= SYSCTL_FOUR,
	},
};

static int __init init_vm_drop_caches_sysctls(void)
{
	register_sysctl_init("vm", drop_caches_table);
	return 0;
}
fs_initcall(init_vm_drop_caches_sysctls);