Contributors: 31
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Andrew Morton |
201 |
25.94% |
16 |
27.12% |
Fengguang Wu |
84 |
10.84% |
1 |
1.69% |
Heiko Carstens |
76 |
9.81% |
2 |
3.39% |
Amir Goldstein |
74 |
9.55% |
2 |
3.39% |
Linus Torvalds |
59 |
7.61% |
1 |
1.69% |
Al Viro |
40 |
5.16% |
4 |
6.78% |
Dominik Brodowski |
27 |
3.48% |
1 |
1.69% |
Oleg Drokin |
25 |
3.23% |
1 |
1.69% |
Matthew Wilcox |
25 |
3.23% |
3 |
5.08% |
Mel Gorman |
23 |
2.97% |
2 |
3.39% |
Masatake YAMATO |
22 |
2.84% |
1 |
1.69% |
Guo Ren |
20 |
2.58% |
1 |
1.69% |
Linus Torvalds (pre-git) |
12 |
1.55% |
5 |
8.47% |
shidao.ytt |
9 |
1.16% |
1 |
1.69% |
Anton Blanchard |
8 |
1.03% |
2 |
3.39% |
Shawn Bohrer |
7 |
0.90% |
1 |
1.69% |
Andrey Ryabinin |
7 |
0.90% |
1 |
1.69% |
Shakeel Butt |
7 |
0.90% |
1 |
1.69% |
Valentine Barshak |
7 |
0.90% |
1 |
1.69% |
Johannes Weiner |
6 |
0.77% |
1 |
1.69% |
Kirill A. Shutemov |
5 |
0.65% |
1 |
1.69% |
Yu Zhao |
5 |
0.65% |
1 |
1.69% |
Jan Kara |
5 |
0.65% |
1 |
1.69% |
Christoph Hellwig |
4 |
0.52% |
1 |
1.69% |
Yafang Shao |
4 |
0.52% |
1 |
1.69% |
Arnd Bergmann |
4 |
0.52% |
1 |
1.69% |
Carsten Otte |
4 |
0.52% |
1 |
1.69% |
Amit Arora |
2 |
0.26% |
1 |
1.69% |
Brian Foster |
1 |
0.13% |
1 |
1.69% |
Greg Kroah-Hartman |
1 |
0.13% |
1 |
1.69% |
Francois Cami |
1 |
0.13% |
1 |
1.69% |
Total |
775 |
|
59 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
// SPDX-License-Identifier: GPL-2.0
/*
* mm/fadvise.c
*
* Copyright (C) 2002, Linus Torvalds
*
* 11Jan2003 Andrew Morton
* Initial version.
*/
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/fadvise.h>
#include <linux/writeback.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <asm/unistd.h>
#include "internal.h"
/*
* POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
* deactivate the pages and clear PG_Referenced.
*/
int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
{
struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
loff_t endbyte; /* inclusive */
pgoff_t start_index;
pgoff_t end_index;
unsigned long nrpages;
inode = file_inode(file);
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
mapping = file->f_mapping;
if (!mapping || len < 0)
return -EINVAL;
bdi = inode_to_bdi(mapping->host);
if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) {
switch (advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
case POSIX_FADV_SEQUENTIAL:
case POSIX_FADV_WILLNEED:
case POSIX_FADV_NOREUSE:
case POSIX_FADV_DONTNEED:
/* no bad return value, but ignore advice */
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Careful about overflows. Len == 0 means "as much as possible". Use
* unsigned math because signed overflows are undefined and UBSan
* complains.
*/
endbyte = (u64)offset + (u64)len;
if (!len || endbyte < len)
endbyte = LLONG_MAX;
else
endbyte--; /* inclusive */
switch (advice) {
case POSIX_FADV_NORMAL:
file->f_ra.ra_pages = bdi->ra_pages;
spin_lock(&file->f_lock);
file->f_mode &= ~(FMODE_RANDOM | FMODE_NOREUSE);
spin_unlock(&file->f_lock);
break;
case POSIX_FADV_RANDOM:
spin_lock(&file->f_lock);
file->f_mode |= FMODE_RANDOM;
spin_unlock(&file->f_lock);
break;
case POSIX_FADV_SEQUENTIAL:
file->f_ra.ra_pages = bdi->ra_pages * 2;
spin_lock(&file->f_lock);
file->f_mode &= ~FMODE_RANDOM;
spin_unlock(&file->f_lock);
break;
case POSIX_FADV_WILLNEED:
/* First and last PARTIAL page! */
start_index = offset >> PAGE_SHIFT;
end_index = endbyte >> PAGE_SHIFT;
/* Careful about overflow on the "+1" */
nrpages = end_index - start_index + 1;
if (!nrpages)
nrpages = ~0UL;
force_page_cache_readahead(mapping, file, start_index, nrpages);
break;
case POSIX_FADV_NOREUSE:
spin_lock(&file->f_lock);
file->f_mode |= FMODE_NOREUSE;
spin_unlock(&file->f_lock);
break;
case POSIX_FADV_DONTNEED:
__filemap_fdatawrite_range(mapping, offset, endbyte,
WB_SYNC_NONE);
/*
* First and last FULL page! Partial pages are deliberately
* preserved on the expectation that it is better to preserve
* needed memory than to discard unneeded memory.
*/
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
end_index = (endbyte >> PAGE_SHIFT);
/*
* The page at end_index will be inclusively discarded according
* by invalidate_mapping_pages(), so subtracting 1 from
* end_index means we will skip the last page. But if endbyte
* is page aligned or is at the end of file, we should not skip
* that page - discarding the last page is safe enough.
*/
if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
endbyte != inode->i_size - 1) {
/* First page is tricky as 0 - 1 = -1, but pgoff_t
* is unsigned, so the end_index >= start_index
* check below would be true and we'll discard the whole
* file cache which is not what was asked.
*/
if (end_index == 0)
break;
end_index--;
}
if (end_index >= start_index) {
unsigned long nr_failed = 0;
/*
* It's common to FADV_DONTNEED right after
* the read or write that instantiates the
* pages, in which case there will be some
* sitting on the local LRU cache. Try to
* avoid the expensive remote drain and the
* second cache tree walk below by flushing
* them out right away.
*/
lru_add_drain();
mapping_try_invalidate(mapping, start_index, end_index,
&nr_failed);
/*
* The failures may be due to the folio being
* in the LRU cache of a remote CPU. Drain all
* caches and try again.
*/
if (nr_failed) {
lru_add_drain_all();
invalidate_mapping_pages(mapping, start_index,
end_index);
}
}
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(generic_fadvise);
int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
{
if (file->f_op->fadvise)
return file->f_op->fadvise(file, offset, len, advice);
return generic_fadvise(file, offset, len, advice);
}
EXPORT_SYMBOL(vfs_fadvise);
#ifdef CONFIG_ADVISE_SYSCALLS
int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return vfs_fadvise(fd_file(f), offset, len, advice);
}
SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
{
return ksys_fadvise64_64(fd, offset, len, advice);
}
#ifdef __ARCH_WANT_SYS_FADVISE64
SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, offset, len, advice);
}
#endif
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FADVISE64_64)
COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, compat_arg_u64_dual(offset),
compat_arg_u64_dual(len), int, advice)
{
return ksys_fadvise64_64(fd, compat_arg_u64_glue(offset),
compat_arg_u64_glue(len), advice);
}
#endif
#endif