Release 4.13 ipc/shm.c
/*
* linux/ipc/shm.c
* Copyright (C) 1992, 1993 Krishna Balasubramanian
* Many improvements/fixes by Bruno Haible.
* Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
* Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
*
* /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
* BIGMEM support, Andrea Arcangeli <andrea@suse.de>
* SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
* HIGHMEM support, Ingo Molnar <mingo@redhat.com>
* Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
* Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
* Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* Better ipc lock (kern_ipc_perm.lock) handling
* Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
*/
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/shmem_fs.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
#include <linux/uaccess.h>
#include "util.h"
struct shm_file_data {
int id;
struct ipc_namespace *ns;
struct file *file;
const struct vm_operations_struct *vm_ops;
};
#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
static const struct file_operations shm_file_operations;
static const struct vm_operations_struct shm_vm_ops;
#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
#define shm_unlock(shp) \
ipc_unlock(&(shp)->shm_perm)
static int newseg(struct ipc_namespace *, struct ipc_params *);
static void shm_open(struct vm_area_struct *vma);
static void shm_close(struct vm_area_struct *vma);
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
#ifdef CONFIG_PROC_FS
static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
#endif
void shm_init_ns(struct ipc_namespace *ns)
{
ns->shm_ctlmax = SHMMAX;
ns->shm_ctlall = SHMALL;
ns->shm_ctlmni = SHMMNI;
ns->shm_rmid_forced = 0;
ns->shm_tot = 0;
ipc_init_ids(&shm_ids(ns));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill Korotaev | 31 | 63.27% | 1 | 20.00% |
Vasiliy Kulikov | 6 | 12.24% | 1 | 20.00% |
Linus Torvalds (pre-git) | 6 | 12.24% | 1 | 20.00% |
Pierre Peiffer | 3 | 6.12% | 1 | 20.00% |
WANG Cong | 3 | 6.12% | 1 | 20.00% |
Total | 49 | 100.00% | 5 | 100.00% |
/*
* Called with shm_ids.rwsem (writer) and the shp structure locked.
* Only shm_ids.rwsem remains locked on exit.
*/
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_nattch) {
shp->shm_perm.mode |= SHM_DEST;
/* Do not find it any more */
shp->shm_perm.key = IPC_PRIVATE;
shm_unlock(shp);
} else
shm_destroy(ns, shp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill Korotaev | 51 | 71.83% | 1 | 33.33% |
Pierre Peiffer | 19 | 26.76% | 1 | 33.33% |
Linus Torvalds (pre-git) | 1 | 1.41% | 1 | 33.33% |
Total | 71 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_IPC_NS
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill Korotaev | 17 | 47.22% | 1 | 20.00% |
Serge E. Hallyn | 13 | 36.11% | 1 | 20.00% |
Pierre Peiffer | 3 | 8.33% | 2 | 40.00% |
Nadia Derbey | 3 | 8.33% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
#endif
static int __init ipc_ns_init(void)
{
shm_init_ns(&init_ipc_ns);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 9 | 50.00% | 2 | 40.00% |
Linus Torvalds | 7 | 38.89% | 1 | 20.00% |
Kirill Korotaev | 1 | 5.56% | 1 | 20.00% |
Pierre Peiffer | 1 | 5.56% | 1 | 20.00% |
Total | 18 | 100.00% | 5 | 100.00% |
pure_initcall(ipc_ns_init);
void __init shm_init(void)
{
ipc_init_proc_interface("sysvipc/shm",
#if BITS_PER_LONG <= 32
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#else
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#endif
IPC_SHM_IDS, sysvipc_shm_proc_show);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Helge Deller | 12 | 40.00% | 1 | 20.00% |
Linus Torvalds (pre-git) | 8 | 26.67% | 1 | 20.00% |
Linus Torvalds | 7 | 23.33% | 1 | 20.00% |
Mike Waychison | 2 | 6.67% | 1 | 20.00% |
Kirill Korotaev | 1 | 3.33% | 1 | 20.00% |
Total | 30 | 100.00% | 5 | 100.00% |
static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davidlohr Bueso A | 57 | 100.00% | 2 | 100.00% |
Total | 57 | 100.00% | 2 | 100.00% |
static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davidlohr Bueso A | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
/*
* shm_lock_(check_) routines are called in the paths where the rwsem
* is not necessarily held.
*/
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
/*
* Callers of shm_lock() must validate the status of the returned ipc
* object pointer (as returned by ipc_lock()), and error out as
* appropriate.
*/
if (IS_ERR(ipcp))
return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nadia Derbey | 36 | 61.02% | 2 | 28.57% |
Kirill A. Shutemov | 11 | 18.64% | 1 | 14.29% |
Pierre Peiffer | 4 | 6.78% | 1 | 14.29% |
Kirill Korotaev | 4 | 6.78% | 1 | 14.29% |
Linus Torvalds (pre-git) | 4 | 6.78% | 2 | 28.57% |
Total | 59 | 100.00% | 7 | 100.00% |
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
{
rcu_read_lock();
ipc_lock_object(&ipcp->shm_perm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vasiliy Kulikov | 22 | 95.65% | 1 | 50.00% |
Davidlohr Bueso A | 1 | 4.35% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static void shm_rcu_free(struct rcu_head *head)
{
struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
rcu);
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
security_shm_free(shp);
kvfree(shp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davidlohr Bueso A | 39 | 76.47% | 1 | 25.00% |
Manfred Spraul | 10 | 19.61% | 1 | 25.00% |
Kees Cook | 2 | 3.92% | 2 | 50.00% |
Total | 51 | 100.00% | 4 | 100.00% |
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{
list_del(&s->shm_clist);
ipc_rmid(&shm_ids(ns), &s->shm_perm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 33.33% | 3 | 50.00% |
Nadia Derbey | 10 | 25.64% | 1 | 16.67% |
Jack Miller | 8 | 20.51% | 1 | 16.67% |
Kirill Korotaev | 8 | 20.51% | 1 | 16.67% |
Total | 39 | 100.00% | 6 | 100.00% |
static int __shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
if (IS_ERR(shp))
return PTR_ERR(shp);
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 35 | 38.89% | 5 | 41.67% |
Eric W. Biedermann | 25 | 27.78% | 1 | 8.33% |
Kirill A. Shutemov | 19 | 21.11% | 1 | 8.33% |
Kirill Korotaev | 4 | 4.44% | 1 | 8.33% |
Pavel Emelyanov | 3 | 3.33% | 1 | 8.33% |
Andi Kleen | 2 | 2.22% | 1 | 8.33% |
Andrew Morton | 1 | 1.11% | 1 | 8.33% |
Eric Sesterhenn / Snakebyte | 1 | 1.11% | 1 | 8.33% |
Total | 90 | 100.00% | 12 | 100.00% |
/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
int err = __shm_open(vma);
/*
* We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted.
*/
WARN_ON_ONCE(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 24 | 96.00% | 1 | 50.00% |
Linus Torvalds (pre-git) | 1 | 4.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
/*
* shm_destroy - free the struct shmid_kernel
*
* @ns: namespace
* @shp: struct to free
*
* It has to be called with shp and shm_ids.rwsem (writer) locked,
* but returns with shp unlocked and freed.
*/
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
struct file *shm_file;
shm_file = shp->shm_file;
shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid(ns, shp);
shm_unlock(shp);
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
user_shm_unlock(i_size_read(file_inode(shm_file)),
shp->mlock_user);
fput(shm_file);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 39 | 33.33% | 5 | 29.41% |
Greg Thelen | 17 | 14.53% | 1 | 5.88% |
Rik Van Riel | 13 | 11.11% | 1 | 5.88% |
Hugh Dickins | 12 | 10.26% | 2 | 11.76% |
Kirill Korotaev | 9 | 7.69% | 1 | 5.88% |
Andrew Morton | 8 | 6.84% | 1 | 5.88% |
Linus Torvalds | 7 | 5.98% | 1 | 5.88% |
Manfred Spraul | 4 | 3.42% | 2 | 11.76% |
Al Viro | 3 | 2.56% | 1 | 5.88% |
Dave Hansen | 3 | 2.56% | 1 | 5.88% |
Davidlohr Bueso A | 2 | 1.71% | 1 | 5.88% |
Total | 117 | 100.00% | 17 | 100.00% |
/*
* shm_may_destroy - identifies whether shm segment should be destroyed now
*
* Returns true if and only if there are no active users of the segment and
* one of the following is true:
*
* 1) shmctl(id, IPC_RMID, NULL) was called for this shp
*
* 2) sysctl kernel.shm_rmid_forced is set to 1.
*/
static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
return (shp->shm_nattch == 0) &&
(ns->shm_rmid_forced ||
(shp->shm_perm.mode & SHM_DEST));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vasiliy Kulikov | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
/*
* remove the attach descriptor vma.
* free memory for segment if it is marked destroyed.
* The descriptor has already been removed from the current->mm->mmap list
* and will later be kfree()d.
*/
static void shm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns;
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
/*
* We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted.
*/
if (WARN_ON_ONCE(IS_ERR(shp)))
goto done; /* no-op */
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
done:
up_write(&shm_ids(ns).rwsem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 55 | 39.86% | 5 | 33.33% |
Vasiliy Kulikov | 31 | 22.46% | 1 | 6.67% |
Kirill A. Shutemov | 17 | 12.32% | 1 | 6.67% |
Eric W. Biedermann | 15 | 10.87% | 1 | 6.67% |
Kirill Korotaev | 10 | 7.25% | 1 | 6.67% |
Pavel Emelyanov | 3 | 2.17% | 1 | 6.67% |
Davidlohr Bueso A | 2 | 1.45% | 1 | 6.67% |
Nadia Derbey | 2 | 1.45% | 2 | 13.33% |
Andi Kleen | 2 | 1.45% | 1 | 6.67% |
Eric Sesterhenn / Snakebyte | 1 | 0.72% | 1 | 6.67% |
Total | 138 | 100.00% | 15 | 100.00% |
/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
struct kern_ipc_perm *ipcp = p;
struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
/*
* We want to destroy segments without users and with already
* exit'ed originating process.
*
* As shp->* are changed under rwsem, it's safe to skip shp locking.
*/
if (shp->shm_creator != NULL)
return 0;
if (shm_may_destroy(ns, shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vasiliy Kulikov | 74 | 88.10% | 3 | 37.50% |
Linus Torvalds (pre-git) | 8 | 9.52% | 3 | 37.50% |
Davidlohr Bueso A | 1 | 1.19% | 1 | 12.50% |
Andrew Morton | 1 | 1.19% | 1 | 12.50% |
Total | 84 | 100.00% | 8 | 100.00% |
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
down_write(&shm_ids(ns).rwsem);
if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
up_write(&shm_ids(ns).rwsem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vasiliy Kulikov | 55 | 96.49% | 2 | 66.67% |
Davidlohr Bueso A | 2 | 3.51% | 1 | 33.33% |
Total | 57 | 100.00% | 3 | 100.00% |
/* Locking assumes this will only be called with task == current */
void exit_shm(struct task_struct *task)
{
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
struct shmid_kernel *shp, *n;
if (list_empty(&task->sysvshm.shm_clist))
return;
/*
* If kernel.shm_rmid_forced is not set then only keep track of
* which shmids are orphaned, so that a later set of the sysctl
* can clean them up.
*/
if (!ns->shm_rmid_forced) {
down_read(&shm_ids(ns).rwsem);
list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
shp->shm_creator = NULL;
/*
* Only under read lock but we are only called on current
* so no entry on the list will be shared.
*/
list_del(&task->sysvshm.shm_clist);
up_read(&shm_ids(ns).rwsem);
return;
}
/*
* Destroy all already created segments, that were not yet mapped,
* and mark any mapped as orphan to cover the sysctl toggling.
* Destroy is skipped if shm_may_destroy() returns false.
*/
down_write(&shm_ids(ns).rwsem);
list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
shp->shm_creator = NULL;
if (shm_may_destroy(ns, shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
}
/* Remove the list head from any segments still attached. */
list_del(&task->sysvshm.shm_clist);
up_write(&shm_ids(ns).rwsem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jack Miller | 117 | 66.86% | 2 | 18.18% |
Vasiliy Kulikov | 45 | 25.71% | 3 | 27.27% |
Linus Torvalds (pre-git) | 7 | 4.00% | 3 | 27.27% |
Kirill Korotaev | 3 | 1.71% | 1 | 9.09% |
Davidlohr Bueso A | 2 | 1.14% | 1 | 9.09% |
Nadia Derbey | 1 | 0.57% | 1 | 9.09% |
Total | 175 | 100.00% | 11 | 100.00% |
static int shm_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
return sfd->vm_ops->fault(vmf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 19 | 45.24% | 1 | 8.33% |
Linus Torvalds (pre-git) | 11 | 26.19% | 4 | 33.33% |
Nicholas Piggin | 6 | 14.29% | 2 | 16.67% |
Dave Jiang | 2 | 4.76% | 1 | 8.33% |
Hugh Dickins | 1 | 2.38% | 1 | 8.33% |
Adrian Bunk | 1 | 2.38% | 1 | 8.33% |
David Howells | 1 | 2.38% | 1 | 8.33% |
Kirill Korotaev | 1 | 2.38% | 1 | 8.33% |
Total | 42 | 100.00% | 12 | 100.00% |
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
int err = 0;
if (sfd->vm_ops->set_policy)
err = sfd->vm_ops->set_policy(vma, new);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 47 | 73.44% | 1 | 33.33% |
Kirill Korotaev | 16 | 25.00% | 1 | 33.33% |
Adrian Bunk | 1 | 1.56% | 1 | 33.33% |
Total | 64 | 100.00% | 3 | 100.00% |
static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct mempolicy *pol = NULL;
if (sfd->vm_ops->get_policy)
pol = sfd->vm_ops->get_policy(vma, addr);
else if (vma->vm_policy)
pol = vma->vm_policy;
return pol;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 69 | 86.25% | 1 | 25.00% |
Adam Litke | 7 | 8.75% | 1 | 25.00% |
Kirill Korotaev | 3 | 3.75% | 1 | 25.00% |
Adrian Bunk | 1 | 1.25% | 1 | 25.00% |
Total | 80 | 100.00% | 4 | 100.00% |
#endif
static int shm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct shm_file_data *sfd = shm_file_data(file);
int ret;
/*
* In case of remap_file_pages() emulation, the file can represent
* removed IPC ID: propogate shm_lock() error to caller.
*/
ret = __shm_open(vma);
if (ret)
return ret;
ret = call_mmap(sfd->file, vma);
if (ret) {
shm_close(vma);
return ret;
}
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
WARN_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 61 | 59.80% | 1 | 14.29% |
Kirill A. Shutemov | 23 | 22.55% | 1 | 14.29% |
Nicholas Piggin | 9 | 8.82% | 1 | 14.29% |
David Howells | 5 | 4.90% | 1 | 14.29% |
Kirill Korotaev | 2 | 1.96% | 1 | 14.29% |
Davidlohr Bueso A | 1 | 0.98% | 1 | 14.29% |
Miklos Szeredi | 1 | 0.98% | 1 | 14.29% |
Total | 102 | 100.00% | 7 | 100.00% |
static int shm_release(struct inode *ino, struct file *file)
{
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fsync)
return -EINVAL;
return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adam Litke | 39 | 60.00% | 1 | 25.00% |
Josef Bacik | 10 | 15.38% | 1 | 25.00% |
Christoph Hellwig | 9 | 13.85% | 1 | 25.00% |
Jeff Layton | 7 | 10.77% | 1 | 25.00% |
Total | 65 | 100.00% | 4 | 100.00% |
static long shm_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fallocate)
return -EOPNOTSUPP;
return sfd->file->f_op->fallocate(file, mode, offset, len);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 63 | 100.00% | 1 | 100.00% |
Total | 63 | 100.00% | 1 | 100.00% |
static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
pgoff, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 54 | 90.00% | 1 | 50.00% |
Al Viro | 6 | 10.00% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
.llseek = noop_llseek,
.fallocate = shm_fallocate,
};
/*
* shm_file_operations_huge is now identical to shm_file_operations,
* but we keep it distinct for the sake of is_file_shm_hugepages().
*/
static const struct file_operations shm_file_operations_huge = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
.llseek = noop_llseek,
.fallocate = shm_fallocate,
};
bool is_file_shm_hugepages(struct file *file)
{
return file->f_op == &shm_file_operations_huge;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 17 | 94.44% | 1 | 50.00% |
Yaowei Bai | 1 | 5.56% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
#endif
};
/**
* newseg - Create a new shared memory segment
* @ns: namespace
* @params: ptr to the structure that contains key, size and shmflg
*
* Called with shm_ids.rwsem held as a writer.
*/
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
{
key_t key = params->key;
int shmflg = params->flg;
size_t size = params->u.size;
int error;
struct shmid_kernel *shp;
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file *file;
char name[13];
vm_flags_t acctflag = 0;
if (size < SHMMIN || size > ns->shm_ctlmax)
return -EINVAL;
if (numpages << PAGE_SHIFT < size)
return -ENOSPC;
if (ns->shm_tot + numpages < ns->shm_tot ||
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
if (unlikely(!shp))
return -ENOMEM;
shp->shm_perm.key = key;
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
kvfree(shp);
return error;
}
sprintf(name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
struct hstate *hs;
size_t hugesize;
hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
if (!hs) {
error = -EINVAL;
goto no_file;
}
hugesize = ALIGN(size, huge_page_size(hs));
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
* if it's asked for.
*/
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
goto no_file;
shp->shm_cprid = task_tgid_vnr(current);
shp->shm_lprid = 0;
shp->shm_atim = shp->shm_dtim = 0;
shp->shm_ctim = get_seconds();
shp->shm_segsz = size;
shp->shm_nattch = 0;
shp->shm_file = file;
shp->shm_creator = current;
error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
if (error < 0)
goto no_id;
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
*/
file_inode(file)->i_ino = shp->shm_perm.id;
ns->shm_tot += numpages;
error = shp->shm_perm.id;
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
return error;
no_id:
if (is_file_hugepages(file) && shp->mlock_user)
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 174 | 31.87% | 9 | 17.31% |
Nadia Derbey | 34 | 6.23% | 2 | 3.85% |
Manfred Spraul | 31 | 5.68% | 5 | 9.62% |
Linus Torvalds | 28 | 5.13% | 2 | 3.85% |
Badari Pulavarty | 25 | 4.58% | 2 | 3.85% |
Hugh Dickins | 24 | 4.40% | 3 | 5.77% |
Stephen D. Smalley | 24 | 4.40% | 2 | 3.85% |
Naoya Horiguchi | 24 | 4.40% | 1 | 1.92% |
Andrew Morton | 23 | 4.21% | 4 | 7.69% |
Greg Kroah-Hartman | 22 | 4.03% | 3 | 5.77% |
Li Zefan | 18 | 3.30% | 1 | 1.92% |
Mel Gorman | 17 | 3.11% | 1 | 1.92% |
Jack Miller | 15 | 2.75% | 1 | 1.92% |
Kees Cook | 14 | 2.56% | 1 | 1.92% |
Kirill Korotaev | 13 | 2.38% | 1 | 1.92% |
Eric W. Biedermann | 12 | 2.20% | 1 | 1.92% |
Rik Van Riel | 12 | 2.20% | 1 | 1.92% |
Andi Kleen | 10 | 1.83% | 2 | 3.85% |
Davidlohr Bueso A | 9 | 1.65% | 3 | 5.77% |
Vasiliy Kulikov | 6 | 1.10% | 1 | 1.92% |
Al Viro | 3 | 0.55% | 1 | 1.92% |
Pavel Emelyanov | 3 | 0.55% | 1 | 1.92% |
Eric B Munson | 2 | 0.37% | 1 | 1.92% |
Robin Holt | 1 | 0.18% | 1 | 1.92% |
Motohiro Kosaki | 1 | 0.18% | 1 | 1.92% |
Guy Streeter | 1 | 0.18% | 1 | 1.92% |
Total | 546 | 100.00% | 52 | 100.00% |
/*
* Called with shm_ids.rwsem and ipcp locked.
*/
static inline int