cregit-Linux how code gets into the kernel

Release 4.12 include/linux/fs.h

Directory: include/linux
#ifndef _LINUX_FS_H

#define _LINUX_FS_H

#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/kdev_t.h>
#include <linux/dcache.h>
#include <linux/path.h>
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/pid.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
#include <linux/migrate_mode.h>
#include <linux/uidgid.h>
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#include <linux/delayed_call.h>

#include <asm/byteorder.h>
#include <uapi/linux/fs.h>

struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct export_operations;
struct hd_geometry;
struct iovec;
struct kiocb;
struct kobject;
struct pipe_inode_info;
struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
struct fscrypt_info;
struct fscrypt_operations;

extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);

extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;

struct buffer_head;

typedef int (get_block_t)(struct inode *inode, sector_t iblock,
			struct buffer_head *bh_result, int create);

typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
			ssize_t bytes, void *private);


#define MAY_EXEC		0x00000001

#define MAY_WRITE		0x00000002

#define MAY_READ		0x00000004

#define MAY_APPEND		0x00000008

#define MAY_ACCESS		0x00000010

#define MAY_OPEN		0x00000020

#define MAY_CHDIR		0x00000040
/* called from RCU mode, don't block */

#define MAY_NOT_BLOCK		0x00000080

/*
 * flags in file.f_mode.  Note that FMODE_READ and FMODE_WRITE must correspond
 * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
 */

/* file is open for reading */

#define FMODE_READ		((__force fmode_t)0x1)
/* file is open for writing */

#define FMODE_WRITE		((__force fmode_t)0x2)
/* file is seekable */

#define FMODE_LSEEK		((__force fmode_t)0x4)
/* file can be accessed using pread */

#define FMODE_PREAD		((__force fmode_t)0x8)
/* file can be accessed using pwrite */

#define FMODE_PWRITE		((__force fmode_t)0x10)
/* File is opened for execution with sys_execve / sys_uselib */

#define FMODE_EXEC		((__force fmode_t)0x20)
/* File is opened with O_NDELAY (only set for block devices) */

#define FMODE_NDELAY		((__force fmode_t)0x40)
/* File is opened with O_EXCL (only set for block devices) */

#define FMODE_EXCL		((__force fmode_t)0x80)
/* File is opened using open(.., 3, ..) and is writeable only for ioctls
   (specialy hack for floppy.c) */

#define FMODE_WRITE_IOCTL	((__force fmode_t)0x100)
/* 32bit hashes as llseek() offset (for directories) */

#define FMODE_32BITHASH         ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */

#define FMODE_64BITHASH         ((__force fmode_t)0x400)

/*
 * Don't update ctime and mtime.
 *
 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
 */

#define FMODE_NOCMTIME		((__force fmode_t)0x800)

/* Expect random access pattern */

#define FMODE_RANDOM		((__force fmode_t)0x1000)

/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */

#define FMODE_UNSIGNED_OFFSET	((__force fmode_t)0x2000)

/* File is opened with O_PATH; almost nothing can be done with it */

#define FMODE_PATH		((__force fmode_t)0x4000)

/* File needs atomic accesses to f_pos */

#define FMODE_ATOMIC_POS	((__force fmode_t)0x8000)
/* Write access to underlying fs */

#define FMODE_WRITER		((__force fmode_t)0x10000)
/* Has read method(s) */

#define FMODE_CAN_READ          ((__force fmode_t)0x20000)
/* Has write method(s) */

#define FMODE_CAN_WRITE         ((__force fmode_t)0x40000)

/* File was opened by fanotify and shouldn't generate fanotify events */

#define FMODE_NONOTIFY		((__force fmode_t)0x4000000)

/*
 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
 * that indicates that they should check the contents of the iovec are
 * valid, but not check the memory that the iovec elements
 * points too.
 */

#define CHECK_IOVEC_ONLY -1

/*
 * Attribute flags.  These should be or-ed together to figure out what
 * has been changed!
 */

#define ATTR_MODE	(1 << 0)

#define ATTR_UID	(1 << 1)

#define ATTR_GID	(1 << 2)

#define ATTR_SIZE	(1 << 3)

#define ATTR_ATIME	(1 << 4)

#define ATTR_MTIME	(1 << 5)

#define ATTR_CTIME	(1 << 6)

#define ATTR_ATIME_SET	(1 << 7)

#define ATTR_MTIME_SET	(1 << 8)

#define ATTR_FORCE	(1 << 9) 
/* Not a change, but a change it */

#define ATTR_ATTR_FLAG	(1 << 10)

#define ATTR_KILL_SUID	(1 << 11)

#define ATTR_KILL_SGID	(1 << 12)

#define ATTR_FILE	(1 << 13)

#define ATTR_KILL_PRIV	(1 << 14)

#define ATTR_OPEN	(1 << 15) 
/* Truncating from open(O_TRUNC) */

#define ATTR_TIMES_SET	(1 << 16)

#define ATTR_TOUCH	(1 << 17)

/*
 * Whiteout is represented by a char device.  The following constants define the
 * mode and device number to use.
 */

#define WHITEOUT_MODE 0

#define WHITEOUT_DEV 0

/*
 * This is the Inode Attributes structure, used for notify_change().  It
 * uses the above definitions as flags, to know which values have changed.
 * Also, in this manner, a Filesystem can look at only the values it cares
 * about.  Basically, these are the attributes that the VFS layer can
 * request to change from the FS layer.
 *
 * Derek Atkins <warlord@MIT.EDU> 94-10-20
 */

struct iattr {
	
unsigned int	ia_valid;
	
umode_t		ia_mode;
	
kuid_t		ia_uid;
	
kgid_t		ia_gid;
	
loff_t		ia_size;
	
struct timespec	ia_atime;
	
struct timespec	ia_mtime;
	
struct timespec	ia_ctime;

	/*
         * Not an attribute, but an auxiliary info for filesystems wanting to
         * implement an ftruncate() like method.  NOTE: filesystem should
         * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
         */
	
struct file	*ia_file;
};

/*
 * Includes for diskquotas.
 */
#include <linux/quota.h>

/*
 * Maximum number of layers of fs stack.  Needs to be limited to
 * prevent kernel stack overflow
 */

#define FILESYSTEM_MAX_STACK_DEPTH 2

/** 
 * enum positive_aop_returns - aop return codes with specific semantics
 *
 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
 *                          completed, that the page is still locked, and
 *                          should be considered active.  The VM uses this hint
 *                          to return the page to the active list -- it won't
 *                          be a candidate for writeback again in the near
 *                          future.  Other callers must be careful to unlock
 *                          the page if they get this return.  Returned by
 *                          writepage(); 
 *
 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
 *                      unlocked it and the page might have been truncated.
 *                      The caller should back up to acquiring a new page and
 *                      trying again.  The aop will be taking reasonable
 *                      precautions not to livelock.  If the caller held a page
 *                      reference, it should drop it before retrying.  Returned
 *                      by readpage().
 *
 * address_space_operation functions return these large constants to indicate
 * special semantics to the caller.  These are much larger than the bytes in a
 * page to allow for functions that return the number of bytes operated on in a
 * given page.
 */


enum positive_aop_returns {
	
AOP_WRITEPAGE_ACTIVATE	= 0x80000,
	
AOP_TRUNCATED_PAGE	= 0x80001,
};


#define AOP_FLAG_CONT_EXPAND		0x0001 
/* called from cont_expand */

#define AOP_FLAG_NOFS			0x0002 
/* used by filesystem to direct
                                                * helper code (eg buffer layer)
                                                * to clear GFP_FS from alloc */

/*
 * oh the beauties of C type declarations.
 */
struct page;
struct address_space;
struct writeback_control;


#define IOCB_EVENTFD		(1 << 0)

#define IOCB_APPEND		(1 << 1)

#define IOCB_DIRECT		(1 << 2)

#define IOCB_HIPRI		(1 << 3)

#define IOCB_DSYNC		(1 << 4)

#define IOCB_SYNC		(1 << 5)

#define IOCB_WRITE		(1 << 6)


struct kiocb {
	
struct file		*ki_filp;
	
loff_t			ki_pos;
	
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
	
void			*private;
	
int			ki_flags;
};


static inline bool is_sync_kiocb(struct kiocb *kiocb) { return kiocb->ki_complete == NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig19100.00%1100.00%
Total19100.00%1100.00%

static inline int iocb_flags(struct file *file);
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = iocb_flags(filp), }; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3179.49%150.00%
Al Viro820.51%150.00%
Total39100.00%2100.00%

/* * "descriptor" for what we're up to with a read. * This allows us to use the same read code yet * have multiple different users of the data that * we read from a file. * * The simplest case just copies the data to user * mode. */ typedef struct { size_t written; size_t count; union { char __user *buf; void *data; } arg; int error; } read_descriptor_t; typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); /* swapfile support */ int (*swap_activate)(struct swap_info_struct *sis, struct file *file, sector_t *span); void (*swap_deactivate)(struct file *file); }; extern const struct address_space_operations empty_aops; /* * pagecache_write_begin/pagecache_write_end must be used by general code * to write into the pagecache. */ int pagecache_write_begin(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ spinlock_t tree_lock; /* and lock protecting it */ atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root i_mmap; /* tree of private and shared mappings */ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ /* number of shadow or DAX exceptional entries */ unsigned long nrexceptional; pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits */ spinlock_t private_lock; /* for use by the address_space */ gfp_t gfp_mask; /* implicit gfp mask for allocations */ struct list_head private_list; /* ditto */ void *private_data; /* ditto */ } __attribute__((aligned(sizeof(long)))); /* * On most architectures that alignment is already the case; but * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ struct request_queue; struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ int bd_openers; struct inode * bd_inode; /* will die */ struct super_block * bd_super; struct mutex bd_mutex; /* open/close mutex */ void * bd_claiming; void * bd_holder; int bd_holders; bool bd_write_holder; #ifdef CONFIG_SYSFS struct list_head bd_holder_disks; #endif struct block_device * bd_contains; unsigned bd_block_size; struct hd_struct * bd_part; /* number of times partitions within this device have been opened. */ unsigned bd_part_count; int bd_invalidated; struct gendisk * bd_disk; struct request_queue * bd_queue; struct backing_dev_info *bd_bdi; struct list_head bd_list; /* * Private data. You must have bd_claim'ed the block_device * to use this. NOTE: bd_claim allows an owner to claim * the same device multiple times, the owner must take special * care to not mess up bd_private for that case. */ unsigned long bd_private; /* The counter of freeze processes */ int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; }; /* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees */ #define PAGECACHE_TAG_DIRTY 0 #define PAGECACHE_TAG_WRITEBACK 1 #define PAGECACHE_TAG_TOWRITE 2 int mapping_tagged(struct address_space *mapping, int tag);
static inline void i_mmap_lock_write(struct address_space *mapping) { down_write(&mapping->i_mmap_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A20100.00%2100.00%
Total20100.00%2100.00%


static inline void i_mmap_unlock_write(struct address_space *mapping) { up_write(&mapping->i_mmap_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A20100.00%2100.00%
Total20100.00%2100.00%


static inline void i_mmap_lock_read(struct address_space *mapping) { down_read(&mapping->i_mmap_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A20100.00%1100.00%
Total20100.00%1100.00%


static inline void i_mmap_unlock_read(struct address_space *mapping) { up_read(&mapping->i_mmap_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A20100.00%1100.00%
Total20100.00%1100.00%

/* * Might pages of this file be mapped into userspace? */
static inline int mapping_mapped(struct address_space *mapping) { return !RB_EMPTY_ROOT(&mapping->i_mmap); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2195.45%150.00%
Michel Lespinasse14.55%150.00%
Total22100.00%2100.00%

/* * Might pages of this file have been modified in userspace? * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * * If i_mmap_writable is negative, no new writable mappings are allowed. You * can only deny writable mappings, if none exists right now. */
static inline int mapping_writably_mapped(struct address_space *mapping) { return atomic_read(&mapping->i_mmap_writable) > 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1252.17%150.00%
David Herrmann1147.83%150.00%
Total23100.00%2100.00%


static inline int mapping_map_writable(struct address_space *mapping) { return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 0 : -EPERM; }

Contributors

PersonTokensPropCommitsCommitProp
David Herrmann2076.92%133.33%
Andrew Morton623.08%266.67%
Total26100.00%3100.00%


static inline void mapping_unmap_writable(struct address_space *mapping) { atomic_dec(&mapping->i_mmap_writable); }

Contributors

PersonTokensPropCommitsCommitProp
David Herrmann20100.00%1100.00%
Total20100.00%1100.00%


static inline int mapping_deny_writable(struct address_space *mapping) { return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? 0 : -EBUSY; }

Contributors

PersonTokensPropCommitsCommitProp
David Herrmann26100.00%1100.00%
Total26100.00%1100.00%


static inline void mapping_allow_writable(struct address_space *mapping) { atomic_inc(&mapping->i_mmap_writable); }

Contributors

PersonTokensPropCommitsCommitProp
David Herrmann20100.00%1100.00%
Total20100.00%1100.00%

/* * Use sequence counter to get consistent i_size on 32-bit processors. */ #if BITS_PER_LONG==32 && defined(CONFIG_SMP) #include <linux/seqlock.h> #define __NEED_I_SIZE_ORDERED #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) #else #define i_size_ordered_init(inode) do { } while (0) #endif struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) #define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl * uncached_acl_sentinel(struct task_struct *task) { return (void *)task + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andreas Gruenbacher23100.00%1100.00%
Total23100.00%1100.00%


static inline bool is_uncached_acl(struct posix_acl *acl) { return (long)acl & 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andreas Gruenbacher20100.00%1100.00%
Total20100.00%1100.00%

#define IOP_FASTPERM 0x0001 #define IOP_LOOKUP 0x0002 #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 struct fsnotify_mark_connector; /* * Keep mostly read-only and often accessed (especially for * the RCU path lookup and 'stat' data) fields at the beginning * of the 'struct inode' */ struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *i_acl; struct posix_acl *i_default_acl; #endif const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; #ifdef CONFIG_SECURITY void *i_security; #endif /* Stat data, not accessed from path walking */ unsigned long i_ino; /* * Filesystems may only read i_nlink directly. They shall use the * following functions for modification: * * (set|clear|inc|drop)_nlink * inode_(inc|dec)_link_count */ union { const unsigned int i_nlink; unsigned int __i_nlink; }; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; #ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount; #endif /* Misc */ unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; /* backing dev IO list */ #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *i_wb; /* the associated cgroup wb */ /* foreign inode detection, see wbc_detach_inode() */ int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; #endif struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; struct list_head i_wb_list; /* backing dev writeback list */ union { struct hlist_head i_dentry; struct rcu_head i_rcu; }; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; #ifdef CONFIG_IMA atomic_t i_readcount; /* struct files open RO */ #endif const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned i_dir_seq; }; __u32 i_generation; #ifdef CONFIG_FSNOTIFY __u32 i_fsnotify_mask; /* all events this inode cares about */ struct fsnotify_mark_connector __rcu *i_fsnotify_marks; #endif #if IS_ENABLED(CONFIG_FS_ENCRYPTION) struct fscrypt_info *i_crypt_info; #endif void *i_private; /* fs or device private pointer */ };
static inline unsigned int i_blocksize(const struct inode *node) { return (1 << node->i_blkbits); }

Contributors

PersonTokensPropCommitsCommitProp
Fabian Frederick23100.00%1100.00%
Total23100.00%1100.00%


static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro21100.00%1100.00%
Total21100.00%1100.00%

/* * inode->i_mutex nesting subclasses for the lock validator: * * 0: the object of the current VFS operation * 1: parent * 2: child/target * 3: xattr * 4: second non-directory * 5: second parent (when locking independent directories in rename) * * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two * non-directories at once. * * The locking order between these classes is * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory */ enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, I_MUTEX_XATTR, I_MUTEX_NONDIR2, I_MUTEX_PARENT2, };
static inline void inode_lock(struct inode *inode) { down_write(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro20100.00%2100.00%
Total20100.00%2100.00%


static inline void inode_unlock(struct inode *inode) { up_write(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro20100.00%2100.00%
Total20100.00%2100.00%


static inline void inode_lock_shared(struct inode *inode) { down_read(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro20100.00%2100.00%
Total20100.00%2100.00%


static inline void inode_unlock_shared(struct inode *inode) { up_read(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro20100.00%2100.00%
Total20100.00%2100.00%


static inline int inode_trylock(struct inode *inode) { return down_write_trylock(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro21100.00%2100.00%
Total21100.00%2100.00%


static inline int inode_trylock_shared(struct inode *inode) { return down_read_trylock(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro21100.00%2100.00%
Total21100.00%2100.00%


static inline int inode_is_locked(struct inode *inode) { return rwsem_is_locked(&inode->i_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro21100.00%2100.00%
Total21100.00%2100.00%


static inline void inode_lock_nested(struct inode *inode, unsigned subclass) { down_write_nested(&inode->i_rwsem, subclass); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro25100.00%2100.00%
Total25100.00%2100.00%

void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic * with respect to the local cpu (unlike with preempt disabled), * but they don't need to be atomic with respect to other cpus like in * true SMP (so they need either to either locally disable irq around * the read or for example on x86 they can be still implemented as a * cmpxchg8b without the need of the lock prefix). For SMP compiles * and 64bit archs it makes no difference if preempt is enabled or not. */
static inline loff_t i_size_read(const struct inode *inode) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) loff_t i_size; unsigned int seq; do { seq = read_seqcount_begin(&inode->i_size_seqcount); i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size; #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) loff_t i_size; preempt_disable(); i_size = inode->i_size; preempt_enable(); return i_size; #else return inode->i_size; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton10199.02%150.00%
Jan Engelhardt10.98%150.00%
Total102100.00%2100.00%

/* * NOTE: unlike i_size_read(), i_size_write() does need locking around it * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount * can be lost, resulting in subsequent i_size_read() calls spinning forever. */
static inline void i_size_write(struct inode *inode, loff_t i_size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) preempt_disable(); write_seqcount_begin(&inode->i_size_seqcount); inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount); preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); inode->i_size = i_size; preempt_enable(); #else inode->i_size = i_size; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton7992.94%150.00%
Fan Du67.06%150.00%
Total85100.00%2100.00%


static inline unsigned iminor(const struct inode *inode) { return MINOR(inode->i_rdev); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro2095.24%266.67%
Jan Engelhardt14.76%133.33%
Total21100.00%3100.00%


static inline unsigned imajor(const struct inode *inode) { return MAJOR(inode->i_rdev); }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro2095.24%266.67%
Jan Engelhardt14.76%133.33%
Total21100.00%3100.00%

extern struct block_device *I_BDEV(struct inode *inode); struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ kuid_t uid, euid; /* uid/euid of process setting the owner */ int signum; /* posix.1b rt signal to be delivered on IO */ }; /* * Track a single file's readahead state */ struct file_ra_state { pgoff_t start; /* where readahead started */ unsigned int size; /* # of readahead pages */ unsigned int async_size; /* do asynchronous readahead when there are only # of pages ahead */ unsigned int ra_pages; /* Maximum readahead window */