Release 4.11 fs/nfs/read.c
/*
* linux/fs/nfs/read.c
*
* Block I/O for NFS
*
* Partial copy of Linus' read cache modifications to fs/nfs/file.c
* modified for async RPC by okir@monad.swb.de
*/
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static const struct nfs_rw_ops nfs_rw_read_ops;
static struct kmem_cache *nfs_rdata_cachep;
static struct nfs_pgio_header *nfs_readhdr_alloc(void)
{
return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 12 | 66.67% | 5 | 55.56% |
Fred Isaman | 3 | 16.67% | 2 | 22.22% |
Anna Schumaker | 2 | 11.11% | 1 | 11.11% |
Weston Andros Adamson | 1 | 5.56% | 1 | 11.11% |
Total | 18 | 100.00% | 9 | 100.00% |
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 10 | 55.56% | 2 | 40.00% |
Fred Isaman | 4 | 22.22% | 1 | 20.00% |
Anna Schumaker | 3 | 16.67% | 1 | 20.00% |
Weston Andros Adamson | 1 | 5.56% | 1 | 20.00% |
Total | 18 | 100.00% | 5 | 100.00% |
static
int nfs_return_empty_page(struct page *page)
{
zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
unlock_page(page);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 31 | 93.94% | 1 | 33.33% |
Christoph Lameter | 1 | 3.03% | 1 | 33.33% |
Kirill A. Shutemov | 1 | 3.03% | 1 | 33.33% |
Total | 33 | 100.00% | 3 | 100.00% |
void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
#ifdef CONFIG_NFS_V4_1
if (server->pnfs_curr_ld && !force_mds)
pg_ops = server->pnfs_curr_ld->pg_read_ops;
#endif
nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
server->rsize, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 45 | 52.94% | 1 | 14.29% |
Fred Isaman | 28 | 32.94% | 2 | 28.57% |
Trond Myklebust | 7 | 8.24% | 1 | 14.29% |
Anna Schumaker | 4 | 4.71% | 2 | 28.57% |
Bryan Schumaker | 1 | 1.18% | 1 | 14.29% |
Total | 85 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *mirror;
if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
pgio->pg_ops->pg_cleanup(pgio);
pgio->pg_ops = &nfs_pgio_rw_ops;
/* read path should never have more than one mirror */
WARN_ON_ONCE(pgio->pg_mirror_count != 1);
mirror = &pgio->pg_mirrors[0];
mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Weston Andros Adamson | 25 | 32.89% | 1 | 16.67% |
Kinglong Mee | 21 | 27.63% | 1 | 16.67% |
Fred Isaman | 15 | 19.74% | 1 | 16.67% |
Trond Myklebust | 14 | 18.42% | 2 | 33.33% |
Anna Schumaker | 1 | 1.32% | 1 | 16.67% |
Total | 76 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
static void nfs_readpage_release(struct nfs_page *req)
{
struct inode *inode = d_inode(req->wb_context->dentry);
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
if (PageUptodate(req->wb_page))
nfs_readpage_to_fscache(inode, req->wb_page, 0);
unlock_page(req->wb_page);
}
nfs_release_request(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peng Tao | 102 | 100.00% | 1 | 100.00% |
Total | 102 | 100.00% | 1 | 100.00% |
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
struct nfs_page *new;
unsigned int len;
struct nfs_pageio_descriptor pgio;
struct nfs_pgio_mirror *pgm;
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
new = nfs_create_request(ctx, page, NULL, 0, len);
if (IS_ERR(new)) {
unlock_page(page);
return PTR_ERR(new);
}
if (len < PAGE_SIZE)
zero_user_segment(page, len, PAGE_SIZE);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
if (!nfs_pageio_add_request(&pgio, new)) {
nfs_list_remove_request(new);
nfs_readpage_release(new);
}
nfs_pageio_complete(&pgio);
/* It doesn't make sense to do mirrored reads! */
WARN_ON_ONCE(pgio.pg_mirror_count != 1);
pgm = &pgio.pg_mirrors[0];
NFS_I(inode)->read_io += pgm->pg_bytes_written;
return pgio.pg_error < 0 ? pgio.pg_error : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 54 | 27.98% | 6 | 30.00% |
Linus Torvalds (pre-git) | 44 | 22.80% | 2 | 10.00% |
Weston Andros Adamson | 29 | 15.03% | 2 | 10.00% |
Peng Tao | 25 | 12.95% | 2 | 10.00% |
Fred Isaman | 16 | 8.29% | 2 | 10.00% |
Linus Torvalds | 10 | 5.18% | 2 | 10.00% |
Andy Adamson | 9 | 4.66% | 1 | 5.00% |
Christoph Hellwig | 3 | 1.55% | 1 | 5.00% |
Kirill A. Shutemov | 2 | 1.04% | 1 | 5.00% |
Christoph Lameter | 1 | 0.52% | 1 | 5.00% |
Total | 193 | 100.00% | 20 | 100.00% |
static void nfs_page_group_set_uptodate(struct nfs_page *req)
{
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
SetPageUptodate(req->wb_page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Weston Andros Adamson | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
unsigned long start = req->wb_pgbase;
unsigned long end = req->wb_pgbase + req->wb_bytes;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
/* note: regions of the page not covered by a
* request are zeroed in nfs_readpage_async /
* readpage_async_filler */
if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero
* the full extent of the request */
zero_user_segment(page, start, end);
} else if (hdr->good_bytes - bytes < req->wb_bytes) {
/* part of this request has good bytes, but
* not all. zero the bad bytes */
start += hdr->good_bytes - bytes;
WARN_ON(start < req->wb_pgbase);
zero_user_segment(page, start, end);
}
}
bytes += req->wb_bytes;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req);
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
nfs_readpage_release(req);
}
out:
hdr->release(hdr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fred Isaman | 138 | 61.88% | 2 | 20.00% |
Weston Andros Adamson | 56 | 25.11% | 2 | 20.00% |
Trond Myklebust | 29 | 13.00% | 6 | 60.00% |
Total | 223 | 100.00% | 10 | 100.00% |
static void nfs_initiate_read(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
struct inode *inode = hdr->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
task_setup_data->flags |= swap_flags;
rpc_ops->read_setup(hdr, msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fred Isaman | 32 | 48.48% | 1 | 11.11% |
Andy Adamson | 10 | 15.15% | 2 | 22.22% |
Anna Schumaker | 9 | 13.64% | 1 | 11.11% |
Tom Haynes | 7 | 10.61% | 1 | 11.11% |
Trond Myklebust | 4 | 6.06% | 3 | 33.33% |
Weston Andros Adamson | 4 | 6.06% | 1 | 11.11% |
Total | 66 | 100.00% | 9 | 100.00% |
static void
nfs_async_read_error(struct list_head *head)
{
struct nfs_page *req;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_readpage_release(req);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 39 | 86.67% | 1 | 25.00% |
Trond Myklebust | 5 | 11.11% | 2 | 50.00% |
Fred Isaman | 1 | 2.22% | 1 | 25.00% |
Total | 45 | 100.00% | 4 | 100.00% |
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.error_cleanup = nfs_async_read_error,
.completion = nfs_read_completion,
};
/*
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
static int nfs_readpage_done(struct rpc_task *task,
struct nfs_pgio_header *hdr,
struct inode *inode)
{
int status = NFS_PROTO(inode)->read_done(task, hdr);
if (status != 0)
return status;
nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
if (task->tk_status == -ESTALE) {
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
nfs_mark_for_revalidate(inode);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 72 | 80.00% | 1 | 20.00% |
Anna Schumaker | 7 | 7.78% | 1 | 20.00% |
Fred Isaman | 4 | 4.44% | 1 | 20.00% |
Weston Andros Adamson | 4 | 4.44% | 1 | 20.00% |
Benny Halevy | 3 | 3.33% | 1 | 20.00% |
Total | 90 | 100.00% | 5 | 100.00% |
static void nfs_readpage_retry(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
struct nfs_pgio_args *argp = &hdr->args;
struct nfs_pgio_res *resp = &hdr->res;
/* This is a short read! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0) {
nfs_set_pgio_error(hdr, -EIO, argp->offset);
return;
}
/* For non rpc-based layout drivers, retry-through-MDS */
if (!task->tk_ops) {
hdr->pnfs_error = -EAGAIN;
return;
}
/* Yes, so retry the read at the end of the hdr */
hdr->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
rpc_restart_call_prepare(task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 75 | 59.52% | 4 | 33.33% |
Kinglong Mee | 18 | 14.29% | 1 | 8.33% |
Fred Isaman | 14 | 11.11% | 2 | 16.67% |
Andy Adamson | 9 | 7.14% | 2 | 16.67% |
Weston Andros Adamson | 8 | 6.35% | 1 | 8.33% |
Anna Schumaker | 2 | 1.59% | 2 | 16.67% |
Total | 126 | 100.00% | 12 | 100.00% |
static void nfs_readpage_result(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
if (hdr->res.eof) {
loff_t bound;
bound = hdr->args.offset + hdr->res.count;
spin_lock(&hdr->lock);
if (bound < hdr->io_start + hdr->good_bytes) {
set_bit(NFS_IOHDR_EOF, &hdr->flags);
clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
hdr->good_bytes = bound - hdr->io_start;
}
spin_unlock(&hdr->lock);
} else if (hdr->res.count < hdr->args.count)
nfs_readpage_retry(task, hdr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fred Isaman | 57 | 45.60% | 1 | 6.25% |
Trond Myklebust | 50 | 40.00% | 8 | 50.00% |
Weston Andros Adamson | 8 | 6.40% | 1 | 6.25% |
Andy Adamson | 3 | 2.40% | 1 | 6.25% |
Linus Torvalds (pre-git) | 3 | 2.40% | 1 | 6.25% |
Kinglong Mee | 1 | 0.80% | 1 | 6.25% |
Anna Schumaker | 1 | 0.80% | 1 | 6.25% |
Linus Torvalds | 1 | 0.80% | 1 | 6.25% |
Bryan Schumaker | 1 | 0.80% | 1 | 6.25% |
Total | 125 | 100.00% | 16 | 100.00% |
/*
* Read a page over NFS.
* We read the page synchronously in the following case:
* - The error flag is set for this page. This happens only when a
* previous async read operation failed.
*/
int nfs_readpage(struct file *file, struct page *page)
{
struct nfs_open_context *ctx;
struct inode *inode = page_file_mapping(page)->host;
int error;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
page, PAGE_SIZE, page_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
/*
* Try to flush any pending writes to the file..
*
* NOTE! Because we own the page lock, there cannot
* be any new pending writes generated at this point
* for this page (other pages can be written to).
*/
error = nfs_wb_page(inode, page);
if (error)
goto out_unlock;
if (PageUptodate(page))
goto out_unlock;
error = -ESTALE;
if (NFS_STALE(inode))
goto out_unlock;
if (file == NULL) {
error = -EBADF;
ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (ctx == NULL)
goto out_unlock;
} else
ctx = get_nfs_open_context(nfs_file_open_context(file));
if (!IS_SYNC(inode)) {
error = nfs_readpage_from_fscache(ctx, inode, page);
if (error == 0)
goto out;
}
error = nfs_readpage_async(ctx, inode, page);
out:
put_nfs_open_context(ctx);
return error;
out_unlock:
unlock_page(page);
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 127 | 59.91% | 9 | 52.94% |
David Howells | 32 | 15.09% | 1 | 5.88% |
Linus Torvalds (pre-git) | 24 | 11.32% | 1 | 5.88% |
Chuck Lever | 13 | 6.13% | 1 | 5.88% |
Linus Torvalds | 6 | 2.83% | 1 | 5.88% |
Mel Gorman | 5 | 2.36% | 1 | 5.88% |
Nicolas Iooss | 3 | 1.42% | 1 | 5.88% |
Huang Ying | 1 | 0.47% | 1 | 5.88% |
Kirill A. Shutemov | 1 | 0.47% | 1 | 5.88% |
Total | 212 | 100.00% | 17 | 100.00% |
struct nfs_readdesc {
struct nfs_pageio_descriptor *pgio;
struct nfs_open_context *ctx;
};
static int
readpage_async_filler(void *data, struct page *page)
{
struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
struct nfs_page *new;
unsigned int len;
int error;
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
new = nfs_create_request(desc->ctx, page, NULL, 0, len);
if (IS_ERR(new))
goto out_error;
if (len < PAGE_SIZE)
zero_user_segment(page, len, PAGE_SIZE);
if (!nfs_pageio_add_request(desc->pgio, new)) {
nfs_list_remove_request(new);
nfs_readpage_release(new);
error = desc->pgio->pg_error;
goto out;
}
return 0;
out_error:
error = PTR_ERR(new);
unlock_page(page);
out:
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 116 | 73.89% | 6 | 37.50% |
Fred Isaman | 15 | 9.55% | 1 | 6.25% |
Linus Torvalds (pre-git) | 11 | 7.01% | 5 | 31.25% |
Peng Tao | 10 | 6.37% | 1 | 6.25% |
Weston Andros Adamson | 2 | 1.27% | 1 | 6.25% |
Kirill A. Shutemov | 2 | 1.27% | 1 | 6.25% |
Christoph Lameter | 1 | 0.64% | 1 | 6.25% |
Total | 157 | 100.00% | 16 | 100.00% |
int nfs_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct nfs_pageio_descriptor pgio;
struct nfs_pgio_mirror *pgm;
struct nfs_readdesc desc = {
.pgio = &pgio,
};
struct inode *inode = mapping->host;
unsigned long npages;
int ret = -ESTALE;
dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode),
nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
if (NFS_STALE(inode))
goto out;
if (filp == NULL) {
desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (desc.ctx == NULL)
return -EBADF;
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
/* attempt to read as many of the pages as possible from the cache
* - this returns -ENOBUFS immediately if the cookie is negative
*/
ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
pages, &nr_pages);
if (ret == 0)
goto read_complete; /* all pages were read */
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
nfs_pageio_complete(&pgio);
/* It doesn't make sense to do mirrored reads! */
WARN_ON_ONCE(pgio.pg_mirror_count != 1);
pgm = &pgio.pg_mirrors[0];
NFS_I(inode)->read_io += pgm->pg_bytes_written;
npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
PAGE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
put_nfs_open_context(desc.ctx);
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 159 | 56.99% | 8 | 33.33% |
David Howells | 31 | 11.11% | 1 | 4.17% |
Weston Andros Adamson | 29 | 10.39% | 1 | 4.17% |
Linus Torvalds (pre-git) | 26 | 9.32% | 8 | 33.33% |
Chuck Lever | 15 | 5.38% | 1 | 4.17% |
Andy Adamson | 9 | 3.23% | 1 | 4.17% |
Christoph Hellwig | 3 | 1.08% | 1 | 4.17% |
Fred Isaman | 3 | 1.08% | 1 | 4.17% |
Kirill A. Shutemov | 2 | 0.72% | 1 | 4.17% |
Niels de Vos | 2 | 0.72% | 1 | 4.17% |
Total | 279 | 100.00% | 24 | 100.00% |
int __init nfs_init_readpagecache(void)
{
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
sizeof(struct nfs_pgio_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 38 | 95.00% | 3 | 60.00% |
Weston Andros Adamson | 1 | 2.50% | 1 | 20.00% |
David Howells | 1 | 2.50% | 1 | 20.00% |
Total | 40 | 100.00% | 5 | 100.00% |
void nfs_destroy_readpagecache(void)
{
kmem_cache_destroy(nfs_rdata_cachep);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 12 | 100.00% | 2 | 100.00% |
Total | 12 | 100.00% | 2 | 100.00% |
static const struct nfs_rw_ops nfs_rw_read_ops = {
.rw_mode = FMODE_READ,
.rw_alloc_header = nfs_readhdr_alloc,
.rw_free_header = nfs_readhdr_free,
.rw_done = nfs_readpage_done,
.rw_result = nfs_readpage_result,
.rw_initiate = nfs_initiate_read,
};
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 790 | 37.91% | 38 | 38.78% |
Fred Isaman | 358 | 17.18% | 6 | 6.12% |
Linus Torvalds (pre-git) | 238 | 11.42% | 10 | 10.20% |
Weston Andros Adamson | 195 | 9.36% | 6 | 6.12% |
Peng Tao | 137 | 6.57% | 2 | 2.04% |
Anna Schumaker | 73 | 3.50% | 7 | 7.14% |
David Howells | 66 | 3.17% | 2 | 2.04% |
Christoph Hellwig | 54 | 2.59% | 1 | 1.02% |
Andy Adamson | 43 | 2.06% | 5 | 5.10% |
Kinglong Mee | 40 | 1.92% | 2 | 2.04% |
Chuck Lever | 28 | 1.34% | 1 | 1.02% |
Linus Torvalds | 18 | 0.86% | 4 | 4.08% |
Kirill A. Shutemov | 8 | 0.38% | 1 | 1.02% |
Bryan Schumaker | 7 | 0.34% | 3 | 3.06% |
Tom Haynes | 7 | 0.34% | 1 | 1.02% |
Benny Halevy | 5 | 0.24% | 2 | 2.04% |
Christoph Lameter | 5 | 0.24% | 2 | 2.04% |
Mel Gorman | 5 | 0.24% | 1 | 1.02% |
Nicolas Iooss | 3 | 0.14% | 1 | 1.02% |
Niels de Vos | 2 | 0.10% | 1 | 1.02% |
Dave Jones | 1 | 0.05% | 1 | 1.02% |
Huang Ying | 1 | 0.05% | 1 | 1.02% |
Total | 2084 | 100.00% | 98 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.