Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Trond Myklebust | 553 | 47.43% | 50 | 51.55% |
Linus Torvalds (pre-git) | 247 | 21.18% | 12 | 12.37% |
Weston Andros Adamson | 146 | 12.52% | 14 | 14.43% |
Anna Schumaker | 77 | 6.60% | 3 | 3.09% |
Fred Isaman | 47 | 4.03% | 6 | 6.19% |
Linus Torvalds | 37 | 3.17% | 1 | 1.03% |
Chuck Lever | 16 | 1.37% | 2 | 2.06% |
Benjamin Coddington | 13 | 1.11% | 1 | 1.03% |
Benny Halevy | 9 | 0.77% | 2 | 2.06% |
Dave Wysochanski | 9 | 0.77% | 1 | 1.03% |
Tom Haynes | 5 | 0.43% | 1 | 1.03% |
Kirill A. Shutemov | 3 | 0.26% | 2 | 2.06% |
Mel Gorman | 3 | 0.26% | 1 | 1.03% |
Greg Kroah-Hartman | 1 | 0.09% | 1 | 1.03% |
Total | 1166 | 97 |
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_page.h * * Copyright (C) 2000 Trond Myklebust * * NFS page cache wrapper. */ #ifndef _LINUX_NFS_PAGE_H #define _LINUX_NFS_PAGE_H #include <linux/list.h> #include <linux/pagemap.h> #include <linux/wait.h> #include <linux/sunrpc/auth.h> #include <linux/nfs_xdr.h> #include <linux/kref.h> /* * Valid flags for a dirty buffer */ enum { PG_BUSY = 0, /* nfs_{un}lock_request */ PG_MAPPED, /* page private set for buffered io */ PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */ PG_CLEAN, /* write succeeded */ PG_COMMIT_TO_DS, /* used by pnfs layouts */ PG_INODE_REF, /* extra ref held by inode when in writeback */ PG_HEADLOCK, /* page group lock of wb_head */ PG_TEARDOWN, /* page group sync for destroy */ PG_UNLOCKPAGE, /* page group sync bit in read path */ PG_UPTODATE, /* page group sync bit in read path */ PG_WB_END, /* page group sync bit in write path */ PG_REMOVE, /* page group sync bit in write path */ PG_CONTENDED1, /* Is someone waiting for a lock? */ PG_CONTENDED2, /* Is someone waiting for a lock? */ }; struct nfs_inode; struct nfs_page { struct list_head wb_list; /* Defines state of page: */ union { struct page *wb_page; /* page to read in/write out */ struct folio *wb_folio; }; struct nfs_lock_context *wb_lock_context; /* lock context info */ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_MASK */ wb_pgbase, /* Start of page data */ wb_bytes; /* Length of request */ struct kref wb_kref; /* reference count */ unsigned long wb_flags; struct nfs_write_verifier wb_verf; /* Commit cookie */ struct nfs_page *wb_this_page; /* list of reqs for this page */ struct nfs_page *wb_head; /* head pointer for req list */ unsigned short wb_nio; /* Number of I/O attempts */ }; struct nfs_pgio_mirror; struct nfs_pageio_descriptor; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); int (*pg_doio)(struct nfs_pageio_descriptor *); unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); struct nfs_pgio_mirror * (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32); u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32); }; struct nfs_rw_ops { struct nfs_pgio_header *(*rw_alloc_header)(void); void (*rw_free_header)(struct nfs_pgio_header *); int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, struct inode *); void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *, const struct nfs_rpc_ops *, struct rpc_task_setup *, int); }; struct nfs_pgio_mirror { struct list_head pg_list; unsigned long pg_bytes_written; size_t pg_count; size_t pg_bsize; unsigned int pg_base; unsigned char pg_recoalesce : 1; }; struct nfs_pageio_descriptor { struct inode *pg_inode; const struct nfs_pageio_ops *pg_ops; const struct nfs_rw_ops *pg_rw_ops; int pg_ioflags; int pg_error; const struct rpc_call_ops *pg_rpc_callops; const struct nfs_pgio_completion_ops *pg_completion_ops; struct pnfs_layout_segment *pg_lseg; struct nfs_io_completion *pg_io_completion; struct nfs_direct_req *pg_dreq; #ifdef CONFIG_NFS_FSCACHE void *pg_netfs; #endif unsigned int pg_bsize; /* default bsize for mirrors */ u32 pg_mirror_count; struct nfs_pgio_mirror *pg_mirrors; struct nfs_pgio_mirror pg_mirrors_static[1]; struct nfs_pgio_mirror *pg_mirrors_dynamic; u32 pg_mirror_idx; /* current mirror */ unsigned short pg_maxretrans; unsigned char pg_moreio : 1; }; /* arbitrarily selected limit to number of mirrors */ #define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16 #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx, struct page *page, unsigned int pgbase, loff_t offset, unsigned int count); extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx, struct folio *folio, unsigned int offset, unsigned int count); extern void nfs_release_request(struct nfs_page *); extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, const struct nfs_pageio_ops *pg_ops, const struct nfs_pgio_completion_ops *compl_ops, const struct nfs_rw_ops *rw_ops, size_t bsize, int how); extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, struct nfs_page *); extern int nfs_pageio_resend(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); extern int nfs_page_group_lock_subrequests(struct nfs_page *head); extern void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, struct inode *inode); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); extern int nfs_page_set_headlock(struct nfs_page *req); extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /** * nfs_page_to_folio - Retrieve a struct folio for the request * @req: pointer to a struct nfs_page * * If a folio was assigned to @req, then return it, otherwise return NULL. */ static inline struct folio *nfs_page_to_folio(const struct nfs_page *req) { if (test_bit(PG_FOLIO, &req->wb_flags)) return req->wb_folio; return NULL; } /** * nfs_page_to_page - Retrieve a struct page for the request * @req: pointer to a struct nfs_page * @pgbase: folio byte offset * * Return the page containing the byte that is at offset @pgbase relative * to the start of the folio. * Note: The request starts at offset @req->wb_pgbase. */ static inline struct page *nfs_page_to_page(const struct nfs_page *req, size_t pgbase) { struct folio *folio = nfs_page_to_folio(req); if (folio == NULL) return req->wb_page; return folio_page(folio, pgbase >> PAGE_SHIFT); } /** * nfs_page_to_inode - Retrieve an inode for the request * @req: pointer to a struct nfs_page */ static inline struct inode *nfs_page_to_inode(const struct nfs_page *req) { struct folio *folio = nfs_page_to_folio(req); if (folio == NULL) return page_file_mapping(req->wb_page)->host; return folio_file_mapping(folio)->host; } /** * nfs_page_max_length - Retrieve the maximum possible length for a request * @req: pointer to a struct nfs_page * * Returns the maximum possible length of a request */ static inline size_t nfs_page_max_length(const struct nfs_page *req) { struct folio *folio = nfs_page_to_folio(req); if (folio == NULL) return PAGE_SIZE; return folio_size(folio); } /* * Lock the page of an asynchronous request */ static inline int nfs_lock_request(struct nfs_page *req) { return !test_and_set_bit(PG_BUSY, &req->wb_flags); } /** * nfs_list_add_request - Insert a request into a list * @req: request * @head: head of list into which to insert the request. */ static inline void nfs_list_add_request(struct nfs_page *req, struct list_head *head) { list_add_tail(&req->wb_list, head); } /** * nfs_list_move_request - Move a request to a new list * @req: request * @head: head of list into which to insert the request. */ static inline void nfs_list_move_request(struct nfs_page *req, struct list_head *head) { list_move_tail(&req->wb_list, head); } /** * nfs_list_remove_request - Remove a request from its wb_list * @req: request */ static inline void nfs_list_remove_request(struct nfs_page *req) { if (list_empty(&req->wb_list)) return; list_del_init(&req->wb_list); } static inline struct nfs_page * nfs_list_entry(struct list_head *head) { return list_entry(head, struct nfs_page, wb_list); } static inline loff_t req_offset(const struct nfs_page *req) { return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } static inline struct nfs_open_context * nfs_req_openctx(struct nfs_page *req) { return req->wb_lock_context->open_context; } #endif /* _LINUX_NFS_PAGE_H */
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1