Release 4.7 drivers/staging/lustre/lnet/libcfs/tracefile.c
  
  
/*
 * GPL HEADER START
 *
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 only,
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License version 2 for more details (a copy is included
 * in the LICENSE file that accompanied this code).
 *
 * You should have received a copy of the GNU General Public License
 * version 2 along with this program; If not, see
 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 * CA 95054 USA or visit www.sun.com if you need additional information or
 * have any questions.
 *
 * GPL HEADER END
 */
/*
 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
 * Use is subject to license terms.
 *
 * Copyright (c) 2012, Intel Corporation.
 */
/*
 * This file is part of Lustre, http://www.lustre.org/
 * Lustre is a trademark of Sun Microsystems, Inc.
 *
 * libcfs/libcfs/tracefile.c
 *
 * Author: Zach Brown <zab@clusterfs.com>
 * Author: Phil Schwan <phil@clusterfs.com>
 */
#define DEBUG_SUBSYSTEM S_LNET
#define LUSTRE_TRACEFILE_PRIVATE
#include "tracefile.h"
#include "../../include/linux/libcfs/libcfs.h"
/* XXX move things up to the top, comment */
union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
static DEFINE_MUTEX(cfs_trace_thread_mutex);
static int thread_running;
static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
struct page_collection {
	
struct list_head	pc_pages;
	/*
         * if this flag is set, collect_pages() will spill both
         * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
         * only ->tcd_pages are spilled.
         */
	
int		pc_want_daemon_pages;
};
struct tracefiled_ctl {
	
struct completion	tctl_start;
	
struct completion	tctl_stop;
	
wait_queue_head_t		tctl_waitq;
	
pid_t			tctl_pid;
	
atomic_t		tctl_shutdown;
};
/*
 * small data-structure for each page owned by tracefiled.
 */
struct cfs_trace_page {
	/*
         * page itself
         */
	
struct page	  *page;
	/*
         * linkage into one of the lists in trace_data_union or
         * page_collection
         */
	
struct list_head	   linkage;
	/*
         * number of bytes used within this page
         */
	
unsigned int	 used;
	/*
         * cpu that owns this page
         */
	
unsigned short       cpu;
	/*
         * type(context) of this page
         */
	
unsigned short       type;
};
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
					 struct cfs_trace_cpu_data *tcd);
static inline struct cfs_trace_page *
cfs_tage_from_list(struct list_head *list)
{
	return list_entry(list, struct cfs_trace_page, linkage);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 25 | 100.00% | 1 | 100.00% | 
 | Total | 25 | 100.00% | 1 | 100.00% | 
static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
{
	struct page	    *page;
	struct cfs_trace_page *tage;
	/* My caller is trying to free memory */
	if (!in_interrupt() && memory_pressure_get())
		return NULL;
	/*
         * Don't spam console with allocation failures: they will be reported
         * by upper layer anyway.
         */
	gfp |= __GFP_NOWARN;
	page = alloc_page(gfp);
	if (!page)
		return NULL;
	tage = kmalloc(sizeof(*tage), gfp);
	if (!tage) {
		__free_page(page);
		return NULL;
	}
	tage->page = page;
	atomic_inc(&cfs_tage_allocated);
	return tage;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 94 | 96.91% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 2 | 2.06% | 1 | 33.33% | 
| vitaly osipov | vitaly osipov | 1 | 1.03% | 1 | 33.33% | 
 | Total | 97 | 100.00% | 3 | 100.00% | 
static void cfs_tage_free(struct cfs_trace_page *tage)
{
	__free_page(tage->page);
	kfree(tage);
	atomic_dec(&cfs_tage_allocated);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 29 | 100.00% | 1 | 100.00% | 
 | Total | 29 | 100.00% | 1 | 100.00% | 
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
			     struct list_head *queue)
{
	list_move_tail(&tage->linkage, queue);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 26 | 100.00% | 1 | 100.00% | 
 | Total | 26 | 100.00% | 1 | 100.00% | 
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
			   struct list_head *stock)
{
	int i;
	/*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
         * from here: this will lead to infinite recursion.
         */
	for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) {
		struct cfs_trace_page *tage;
		tage = cfs_tage_alloc(gfp);
		if (!tage)
			break;
		list_add_tail(&tage->linkage, stock);
	}
	return i;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 70 | 97.22% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 1.39% | 1 | 33.33% | 
| vitaly osipov | vitaly osipov | 1 | 1.39% | 1 | 33.33% | 
 | Total | 72 | 100.00% | 3 | 100.00% | 
/* return a page that has 'len' bytes left at the end */
static struct cfs_trace_page *
cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
{
	struct cfs_trace_page *tage;
	if (tcd->tcd_cur_pages > 0) {
		__LASSERT(!list_empty(&tcd->tcd_pages));
		tage = cfs_tage_from_list(tcd->tcd_pages.prev);
		if (tage->used + len <= PAGE_SIZE)
			return tage;
	}
	if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
		if (tcd->tcd_cur_stock_pages > 0) {
			tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
			--tcd->tcd_cur_stock_pages;
			list_del_init(&tage->linkage);
		} else {
			tage = cfs_tage_alloc(GFP_ATOMIC);
			if (unlikely(!tage)) {
				if ((!memory_pressure_get() ||
				     in_interrupt()) && printk_ratelimit())
					printk(KERN_WARNING
					       "cannot allocate a tage (%ld)\n",
					       tcd->tcd_cur_pages);
				return NULL;
			}
		}
		tage->used = 0;
		tage->cpu = smp_processor_id();
		tage->type = tcd->tcd_type;
		list_add_tail(&tage->linkage, &tcd->tcd_pages);
		tcd->tcd_cur_pages++;
		if (tcd->tcd_cur_pages > 8 && thread_running) {
			struct tracefiled_ctl *tctl = &trace_tctl;
			/*
                         * wake up tracefiled to process some pages.
                         */
			wake_up(&tctl->tctl_waitq);
		}
		return tage;
	}
	return NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 233 | 99.15% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 0.43% | 1 | 33.33% | 
| kirill a. shutemov | kirill a. shutemov | 1 | 0.43% | 1 | 33.33% | 
 | Total | 235 | 100.00% | 3 | 100.00% | 
static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
{
	int pgcount = tcd->tcd_cur_pages / 10;
	struct page_collection pc;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	/*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
         * from here: this will lead to infinite recursion.
         */
	if (printk_ratelimit())
		printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
		       pgcount + 1, tcd->tcd_cur_pages);
	INIT_LIST_HEAD(&pc.pc_pages);
	list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
		if (pgcount-- == 0)
			break;
		list_move_tail(&tage->linkage, &pc.pc_pages);
		tcd->tcd_cur_pages--;
	}
	put_pages_on_tcd_daemon_list(&pc, tcd);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 107 | 99.07% | 1 | 50.00% | 
| joe perches | joe perches | 1 | 0.93% | 1 | 50.00% | 
 | Total | 108 | 100.00% | 2 | 100.00% | 
/* return a page that has 'len' bytes left at the end */
static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
						 unsigned long len)
{
	struct cfs_trace_page *tage;
	/*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
         * from here: this will lead to infinite recursion.
         */
	if (len > PAGE_SIZE) {
		pr_err("cowardly refusing to write %lu bytes in a page\n", len);
		return NULL;
	}
	tage = cfs_trace_get_tage_try(tcd, len);
	if (tage)
		return tage;
	if (thread_running)
		cfs_tcd_shrink(tcd);
	if (tcd->tcd_cur_pages > 0) {
		tage = cfs_tage_from_list(tcd->tcd_pages.next);
		tage->used = 0;
		cfs_tage_to_tail(tage, &tcd->tcd_pages);
	}
	return tage;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 104 | 98.11% | 1 | 33.33% | 
| kirill a. shutemov | kirill a. shutemov | 1 | 0.94% | 1 | 33.33% | 
| tina johnson | tina johnson | 1 | 0.94% | 1 | 33.33% | 
 | Total | 106 | 100.00% | 3 | 100.00% | 
int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
		     const char *format, ...)
{
	va_list args;
	int     rc;
	va_start(args, format);
	rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
	va_end(args);
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 51 | 100.00% | 1 | 100.00% | 
 | Total | 51 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL(libcfs_debug_msg);
int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
		       const char *format1, va_list args,
		       const char *format2, ...)
{
	struct cfs_trace_cpu_data *tcd = NULL;
	struct ptldebug_header     header = {0};
	struct cfs_trace_page     *tage;
	/* string_buf is used only if tcd != NULL, and is always set then */
	char		      *string_buf = NULL;
	char		      *debug_buf;
	int			known_size;
	int			needed = 85; /* average message length */
	int			max_nob;
	va_list		    ap;
	int			depth;
	int			i;
	int			remain;
	int			mask = msgdata->msg_mask;
	const char		*file = kbasename(msgdata->msg_file);
	struct cfs_debug_limit_state   *cdls = msgdata->msg_cdls;
	tcd = cfs_trace_get_tcd();
	/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
         * pins us to a particular CPU.  This avoids an smp_processor_id()
         * warning on Linux when debugging is enabled.
         */
	cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
	if (!tcd)		/* arch may not log in IRQ context */
		goto console;
	if (tcd->tcd_cur_pages == 0)
		header.ph_flags |= PH_FLAG_FIRST_RECORD;
	if (tcd->tcd_shutting_down) {
		cfs_trace_put_tcd(tcd);
		tcd = NULL;
		goto console;
	}
	depth = __current_nesting_level();
	known_size = strlen(file) + 1 + depth;
	if (msgdata->msg_fn)
		known_size += strlen(msgdata->msg_fn) + 1;
	if (libcfs_debug_binary)
		known_size += sizeof(header);
	/*
         * '2' used because vsnprintf return real size required for output
         * _without_ terminating NULL.
         * if needed is to small for this format.
         */
	for (i = 0; i < 2; i++) {
		tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
		if (!tage) {
			if (needed + known_size > PAGE_SIZE)
				mask |= D_ERROR;
			cfs_trace_put_tcd(tcd);
			tcd = NULL;
			goto console;
		}
		string_buf = (char *)page_address(tage->page) +
					tage->used + known_size;
		max_nob = PAGE_SIZE - tage->used - known_size;
		if (max_nob <= 0) {
			printk(KERN_EMERG "negative max_nob: %d\n",
			       max_nob);
			mask |= D_ERROR;
			cfs_trace_put_tcd(tcd);
			tcd = NULL;
			goto console;
		}
		needed = 0;
		if (format1) {
			va_copy(ap, args);
			needed = vsnprintf(string_buf, max_nob, format1, ap);
			va_end(ap);
		}
		if (format2) {
			remain = max_nob - needed;
			if (remain < 0)
				remain = 0;
			va_start(ap, format2);
			needed += vsnprintf(string_buf + needed, remain,
					    format2, ap);
			va_end(ap);
		}
		if (needed < max_nob) /* well. printing ok.. */
			break;
	}
	if (*(string_buf + needed - 1) != '\n')
		printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
		       file, msgdata->msg_line, msgdata->msg_fn);
	header.ph_len = known_size + needed;
	debug_buf = (char *)page_address(tage->page) + tage->used;
	if (libcfs_debug_binary) {
		memcpy(debug_buf, &header, sizeof(header));
		tage->used += sizeof(header);
		debug_buf += sizeof(header);
	}
	/* indent message according to the nesting level */
	while (depth-- > 0) {
		*(debug_buf++) = '.';
		++tage->used;
	}
	strcpy(debug_buf, file);
	tage->used += strlen(file) + 1;
	debug_buf += strlen(file) + 1;
	if (msgdata->msg_fn) {
		strcpy(debug_buf, msgdata->msg_fn);
		tage->used += strlen(msgdata->msg_fn) + 1;
		debug_buf += strlen(msgdata->msg_fn) + 1;
	}
	__LASSERT(debug_buf == string_buf);
	tage->used += needed;
	__LASSERT(tage->used <= PAGE_SIZE);
console:
	if ((mask & libcfs_printk) == 0) {
		/* no console output requested */
		if (tcd)
			cfs_trace_put_tcd(tcd);
		return 1;
	}
	if (cdls) {
		if (libcfs_console_ratelimit &&
		    cdls->cdls_next != 0 &&     /* not first time ever */
		    !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
			/* skipping a console message */
			cdls->cdls_count++;
			if (tcd)
				cfs_trace_put_tcd(tcd);
			return 1;
		}
		if (cfs_time_after(cfs_time_current(),
				   cdls->cdls_next + libcfs_console_max_delay +
				   cfs_time_seconds(10))) {
			/* last timeout was a long time ago */
			cdls->cdls_delay /= libcfs_console_backoff * 4;
		} else {
			cdls->cdls_delay *= libcfs_console_backoff;
		}
		if (cdls->cdls_delay < libcfs_console_min_delay)
			cdls->cdls_delay = libcfs_console_min_delay;
		else if (cdls->cdls_delay > libcfs_console_max_delay)
			cdls->cdls_delay = libcfs_console_max_delay;
		/* ensure cdls_next is never zero after it's been seen */
		cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
	}
	if (tcd) {
		cfs_print_to_console(&header, mask, string_buf, needed, file,
				     msgdata->msg_fn);
		cfs_trace_put_tcd(tcd);
	} else {
		string_buf = cfs_trace_get_console_buffer();
		needed = 0;
		if (format1) {
			va_copy(ap, args);
			needed = vsnprintf(string_buf,
					   CFS_TRACE_CONSOLE_BUFFER_SIZE,
					   format1, ap);
			va_end(ap);
		}
		if (format2) {
			remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
			if (remain > 0) {
				va_start(ap, format2);
				needed += vsnprintf(string_buf + needed, remain,
						    format2, ap);
				va_end(ap);
			}
		}
		cfs_print_to_console(&header, mask,
				     string_buf, needed, file, msgdata->msg_fn);
		put_cpu();
	}
	if (cdls && cdls->cdls_count != 0) {
		string_buf = cfs_trace_get_console_buffer();
		needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
				  "Skipped %d previous similar message%s\n",
				  cdls->cdls_count,
				  (cdls->cdls_count > 1) ? "s" : "");
		cfs_print_to_console(&header, mask,
				     string_buf, needed, file, msgdata->msg_fn);
		put_cpu();
		cdls->cdls_count = 0;
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 981 | 98.10% | 1 | 11.11% | 
| shivani bhardwaj | shivani bhardwaj | 4 | 0.40% | 1 | 11.11% | 
| oleg drokin | oleg drokin | 4 | 0.40% | 2 | 22.22% | 
| andy shevchenko | andy shevchenko | 4 | 0.40% | 1 | 11.11% | 
| kirill a. shutemov | kirill a. shutemov | 3 | 0.30% | 1 | 11.11% | 
| lisa nguyen | lisa nguyen | 2 | 0.20% | 1 | 11.11% | 
| joe perches | joe perches | 1 | 0.10% | 1 | 11.11% | 
| ryan haasken | ryan haasken | 1 | 0.10% | 1 | 11.11% | 
 | Total | 1000 | 100.00% | 9 | 100.00% | 
EXPORT_SYMBOL(libcfs_debug_vmsg2);
void
cfs_trace_assertion_failed(const char *str,
			   struct libcfs_debug_msg_data *msgdata)
{
	struct ptldebug_header hdr;
	libcfs_panic_in_progress = 1;
	libcfs_catastrophe = 1;
	mb();
	cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
	cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
			     msgdata->msg_file, msgdata->msg_fn);
	panic("Lustre debug assertion failure\n");
	/* not reached */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 70 | 100.00% | 1 | 100.00% | 
 | Total | 70 | 100.00% | 1 | 100.00% | 
static void
panic_collect_pages(struct page_collection *pc)
{
	/* Do the collect_pages job on a single CPU: assumes that all other
         * CPUs have been stopped during a panic.  If this isn't true for some
         * arch, this will have to be implemented separately in each arch.
         */
	int			i;
	int			j;
	struct cfs_trace_cpu_data *tcd;
	INIT_LIST_HEAD(&pc->pc_pages);
	cfs_tcd_for_each(tcd, i, j) {
		list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
		tcd->tcd_cur_pages = 0;
		if (pc->pc_want_daemon_pages) {
			list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
			tcd->tcd_cur_daemon_pages = 0;
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 86 | 98.85% | 1 | 50.00% | 
| oleg drokin | oleg drokin | 1 | 1.15% | 1 | 50.00% | 
 | Total | 87 | 100.00% | 2 | 100.00% | 
static void collect_pages_on_all_cpus(struct page_collection *pc)
{
	struct cfs_trace_cpu_data *tcd;
	int i, cpu;
	for_each_possible_cpu(cpu) {
		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
			list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
			tcd->tcd_cur_pages = 0;
			if (pc->pc_want_daemon_pages) {
				list_splice_init(&tcd->tcd_daemon_pages,
						 &pc->pc_pages);
				tcd->tcd_cur_daemon_pages = 0;
			}
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 82 | 98.80% | 1 | 50.00% | 
| greg kroah-hartman | greg kroah-hartman | 1 | 1.20% | 1 | 50.00% | 
 | Total | 83 | 100.00% | 2 | 100.00% | 
static void collect_pages(struct page_collection *pc)
{
	INIT_LIST_HEAD(&pc->pc_pages);
	if (libcfs_panic_in_progress)
		panic_collect_pages(pc);
	else
		collect_pages_on_all_cpus(pc);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 34 | 100.00% | 1 | 100.00% | 
 | Total | 34 | 100.00% | 1 | 100.00% | 
static void put_pages_back_on_all_cpus(struct page_collection *pc)
{
	struct cfs_trace_cpu_data *tcd;
	struct list_head *cur_head;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	int i, cpu;
	for_each_possible_cpu(cpu) {
		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
			cur_head = tcd->tcd_pages.next;
			list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
						 linkage) {
				__LASSERT_TAGE_INVARIANT(tage);
				if (tage->cpu != cpu || tage->type != i)
					continue;
				cfs_tage_to_tail(tage, cur_head);
				tcd->tcd_cur_pages++;
			}
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 103 | 99.04% | 1 | 50.00% | 
| greg kroah-hartman | greg kroah-hartman | 1 | 0.96% | 1 | 50.00% | 
 | Total | 104 | 100.00% | 2 | 100.00% | 
static void put_pages_back(struct page_collection *pc)
{
	if (!libcfs_panic_in_progress)
		put_pages_back_on_all_cpus(pc);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 21 | 100.00% | 1 | 100.00% | 
 | Total | 21 | 100.00% | 1 | 100.00% | 
/* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
 * we have a good amount of data at all times for dumping during an LBUG, even
 * if we have been steadily writing (and otherwise discarding) pages via the
 * debug daemon.
 */
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
					 struct cfs_trace_cpu_data *tcd)
{
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
		__LASSERT_TAGE_INVARIANT(tage);
		if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
			continue;
		cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
		tcd->tcd_cur_daemon_pages++;
		if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
			struct cfs_trace_page *victim;
			__LASSERT(!list_empty(&tcd->tcd_daemon_pages));
			victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
			__LASSERT_TAGE_INVARIANT(victim);
			list_del(&victim->linkage);
			cfs_tage_free(victim);
			tcd->tcd_cur_daemon_pages--;
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 140 | 100.00% | 1 | 100.00% | 
 | Total | 140 | 100.00% | 1 | 100.00% | 
static void put_pages_on_daemon_list(struct page_collection *pc)
{
	struct cfs_trace_cpu_data *tcd;
	int i, cpu;
	for_each_possible_cpu(cpu) {
		cfs_tcd_for_each_type_lock(tcd, i, cpu)
			put_pages_on_tcd_daemon_list(pc, tcd);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 41 | 97.62% | 1 | 50.00% | 
| greg kroah-hartman | greg kroah-hartman | 1 | 2.38% | 1 | 50.00% | 
 | Total | 42 | 100.00% | 2 | 100.00% | 
void cfs_trace_debug_print(void)
{
	struct page_collection pc;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	pc.pc_want_daemon_pages = 1;
	collect_pages(&pc);
	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
		char *p, *file, *fn;
		struct page *page;
		__LASSERT_TAGE_INVARIANT(tage);
		page = tage->page;
		p = page_address(page);
		while (p < ((char *)page_address(page) + tage->used)) {
			struct ptldebug_header *hdr;
			int len;
			hdr = (void *)p;
			p += sizeof(*hdr);
			file = p;
			p += strlen(file) + 1;
			fn = p;
			p += strlen(fn) + 1;
			len = hdr->ph_len - (int)(p - (char *)hdr);
			cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
			p += len;
		}
		list_del(&tage->linkage);
		cfs_tage_free(tage);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 200 | 100.00% | 1 | 100.00% | 
 | Total | 200 | 100.00% | 1 | 100.00% | 
int cfs_tracefile_dump_all_pages(char *filename)
{
	struct page_collection	pc;
	struct file		*filp;
	struct cfs_trace_page	*tage;
	struct cfs_trace_page	*tmp;
	char			*buf;
	mm_segment_t __oldfs;
	int rc;
	cfs_tracefile_write_lock();
	filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
			 0600);
	if (IS_ERR(filp)) {
		rc = PTR_ERR(filp);
		filp = NULL;
		pr_err("LustreError: can't open %s for dump: rc %d\n",
		       filename, rc);
		goto out;
	}
	pc.pc_want_daemon_pages = 1;
	collect_pages(&pc);
	if (list_empty(&pc.pc_pages)) {
		rc = 0;
		goto close;
	}
	__oldfs = get_fs();
	set_fs(get_ds());
	/* ok, for now, just write the pages.  in the future we'll be building
         * iobufs with the pages and calling generic_direct_IO
         */
	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
		__LASSERT_TAGE_INVARIANT(tage);
		buf = kmap(tage->page);
		rc = vfs_write(filp, (__force const char __user *)buf,
			       tage->used, &filp->f_pos);
		kunmap(tage->page);
		if (rc != (int)tage->used) {
			printk(KERN_WARNING "wanted to write %u but wrote %d\n",
			       tage->used, rc);
			put_pages_back(&pc);
			__LASSERT(list_empty(&pc.pc_pages));
			break;
		}
		list_del(&tage->linkage);
		cfs_tage_free(tage);
	}
	set_fs(__oldfs);
	rc = vfs_fsync(filp, 1);
	if (rc)
		pr_err("sync returns %d\n", rc);
close:
	filp_close(filp, NULL);
out:
	cfs_tracefile_write_unlock();
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 268 | 92.73% | 4 | 57.14% | 
| james simmons | james simmons | 18 | 6.23% | 1 | 14.29% | 
| tina johnson | tina johnson | 2 | 0.69% | 1 | 14.29% | 
| joe perches | joe perches | 1 | 0.35% | 1 | 14.29% | 
 | Total | 289 | 100.00% | 7 | 100.00% | 
void cfs_trace_flush_pages(void)
{
	struct page_collection pc;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	pc.pc_want_daemon_pages = 1;
	collect_pages(&pc);
	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
		__LASSERT_TAGE_INVARIANT(tage);
		list_del(&tage->linkage);
		cfs_tage_free(tage);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 63 | 100.00% | 1 | 100.00% | 
 | Total | 63 | 100.00% | 1 | 100.00% | 
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
			    const char __user *usr_buffer, int usr_buffer_nob)
{
	int    nob;
	if (usr_buffer_nob > knl_buffer_nob)
		return -EOVERFLOW;
	if (copy_from_user((void *)knl_buffer,
			   usr_buffer, usr_buffer_nob))
		return -EFAULT;
	nob = strnlen(knl_buffer, usr_buffer_nob);
	while (nob-- >= 0)		      /* strip trailing whitespace */
		if (!isspace(knl_buffer[nob]))
			break;
	if (nob < 0)			    /* empty string */
		return -EINVAL;
	if (nob == knl_buffer_nob)	      /* no space to terminate */
		return -EOVERFLOW;
	knl_buffer[nob + 1] = 0;		/* terminate */
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 116 | 99.15% | 1 | 50.00% | 
| labbe corentin | labbe corentin | 1 | 0.85% | 1 | 50.00% | 
 | Total | 117 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL(cfs_trace_copyin_string);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
			     const char *knl_buffer, char *append)
{
	/*
         * NB if 'append' != NULL, it's a single character to append to the
         * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
         */
	int   nob = strlen(knl_buffer);
	if (nob > usr_buffer_nob)
		nob = usr_buffer_nob;
	if (copy_to_user(usr_buffer, knl_buffer, nob))
		return -EFAULT;
	if (append && nob < usr_buffer_nob) {
		if (copy_to_user(usr_buffer + nob, append, 1))
			return -EFAULT;
		nob++;
	}
	return nob;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 87 | 97.75% | 1 | 33.33% | 
| labbe corentin | labbe corentin | 1 | 1.12% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 1 | 1.12% | 1 | 33.33% | 
 | Total | 89 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL(cfs_trace_copyout_string);
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
	if (nob > 2 * PAGE_SIZE)	    /* string must be "sensible" */
		return -EINVAL;
	*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
	if (!*str)
		return -ENOMEM;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 48 | 94.12% | 1 | 25.00% | 
| kirill a. shutemov | kirill a. shutemov | 1 | 1.96% | 1 | 25.00% | 
| oleg drokin | oleg drokin | 1 | 1.96% | 1 | 25.00% | 
| mel gorman | mel gorman | 1 | 1.96% | 1 | 25.00% | 
 | Total | 51 | 100.00% | 4 | 100.00% | 
int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
{
	char	 *str;
	int	   rc;
	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
	if (rc != 0)
		return rc;
	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
				     usr_str, usr_str_nob);
	if (rc != 0)
		goto out;
	if (str[0] != '/') {
		rc = -EINVAL;
		goto out;
	}
	rc = cfs_tracefile_dump_all_pages(str);
out:
	kfree(str);
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 99 | 98.02% | 1 | 33.33% | 
| shivani bhardwaj | shivani bhardwaj | 1 | 0.99% | 1 | 33.33% | 
| labbe corentin | labbe corentin | 1 | 0.99% | 1 | 33.33% | 
 | Total | 101 | 100.00% | 3 | 100.00% | 
int cfs_trace_daemon_command(char *str)
{
	int       rc = 0;
	cfs_tracefile_write_lock();
	if (strcmp(str, "stop") == 0) {
		cfs_tracefile_write_unlock();
		cfs_trace_stop_thread();
		cfs_tracefile_write_lock();
		memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
	} else if (strncmp(str, "size=", 5) == 0) {
		unsigned long tmp;
		rc = kstrtoul(str + 5, 10, &tmp);
		if (!rc) {
			if (tmp < 10 || tmp > 20480)
				cfs_tracefile_size = CFS_TRACEFILE_SIZE;
			else
				cfs_tracefile_size = tmp << 20;
		}
	} else if (strlen(str) >= sizeof(cfs_tracefile)) {
		rc = -ENAMETOOLONG;
	} else if (str[0] != '/') {
		rc = -EINVAL;
	} else {
		strcpy(cfs_tracefile, str);
		printk(KERN_INFO
		       "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
		       cfs_tracefile,
		       (long)(cfs_tracefile_size >> 10));
		cfs_trace_start_thread();
	}
	cfs_tracefile_write_unlock();
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 164 | 88.17% | 1 | 33.33% | 
| oleg drokin | oleg drokin | 21 | 11.29% | 1 | 33.33% | 
| joe perches | joe perches | 1 | 0.54% | 1 | 33.33% | 
 | Total | 186 | 100.00% | 3 | 100.00% | 
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
{
	char *str;
	int   rc;
	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
	if (rc != 0)
		return rc;
	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
				     usr_str, usr_str_nob);
	if (rc == 0)
		rc = cfs_trace_daemon_command(str);
	kfree(str);
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 75 | 97.40% | 1 | 33.33% | 
| labbe corentin | labbe corentin | 1 | 1.30% | 1 | 33.33% | 
| shivani bhardwaj | shivani bhardwaj | 1 | 1.30% | 1 | 33.33% | 
 | Total | 77 | 100.00% | 3 | 100.00% | 
int cfs_trace_set_debug_mb(int mb)
{
	int i;
	int j;
	int pages;
	int limit = cfs_trace_max_debug_mb();
	struct cfs_trace_cpu_data *tcd;
	if (mb < num_possible_cpus()) {
		printk(KERN_WARNING
		       "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
		       mb, num_possible_cpus());
		mb = num_possible_cpus();
	}
	if (mb > limit) {
		printk(KERN_WARNING
		       "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
		       mb, limit);
		mb = limit;
	}
	mb /= num_possible_cpus();
	pages = mb << (20 - PAGE_SHIFT);
	cfs_tracefile_write_lock();
	cfs_tcd_for_each(tcd, i, j)
		tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
	cfs_tracefile_write_unlock();
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 118 | 97.52% | 1 | 33.33% | 
| joe perches | joe perches | 2 | 1.65% | 1 | 33.33% | 
| kirill a. shutemov | kirill a. shutemov | 1 | 0.83% | 1 | 33.33% | 
 | Total | 121 | 100.00% | 3 | 100.00% | 
int cfs_trace_get_debug_mb(void)
{
	int i;
	int j;
	struct cfs_trace_cpu_data *tcd;
	int total_pages = 0;
	cfs_tracefile_read_lock();
	cfs_tcd_for_each(tcd, i, j)
		total_pages += tcd->tcd_max_pages;
	cfs_tracefile_read_unlock();
	return (total_pages >> (20 - PAGE_SHIFT)) + 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 55 | 98.21% | 1 | 50.00% | 
| kirill a. shutemov | kirill a. shutemov | 1 | 1.79% | 1 | 50.00% | 
 | Total | 56 | 100.00% | 2 | 100.00% | 
static int tracefiled(void *arg)
{
	struct page_collection pc;
	struct tracefiled_ctl *tctl = arg;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	mm_segment_t __oldfs;
	struct file *filp;
	char *buf;
	int last_loop = 0;
	int rc;
	/* we're started late enough that we pick up init's fs context */
	/* this is so broken in uml?  what on earth is going on? */
	complete(&tctl->tctl_start);
	while (1) {
		wait_queue_t __wait;
		pc.pc_want_daemon_pages = 0;
		collect_pages(&pc);
		if (list_empty(&pc.pc_pages))
			goto end_loop;
		filp = NULL;
		cfs_tracefile_read_lock();
		if (cfs_tracefile[0] != 0) {
			filp = filp_open(cfs_tracefile,
					 O_CREAT | O_RDWR | O_LARGEFILE,
					 0600);
			if (IS_ERR(filp)) {
				rc = PTR_ERR(filp);
				filp = NULL;
				printk(KERN_WARNING "couldn't open %s: %d\n",
				       cfs_tracefile, rc);
			}
		}
		cfs_tracefile_read_unlock();
		if (!filp) {
			put_pages_on_daemon_list(&pc);
			__LASSERT(list_empty(&pc.pc_pages));
			goto end_loop;
		}
		__oldfs = get_fs();
		set_fs(get_ds());
		list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
			static loff_t f_pos;
			__LASSERT_TAGE_INVARIANT(tage);
			if (f_pos >= (off_t)cfs_tracefile_size)
				f_pos = 0;
			else if (f_pos > i_size_read(file_inode(filp)))
				f_pos = i_size_read(file_inode(filp));
			buf = kmap(tage->page);
			rc = vfs_write(filp, (__force const char __user *)buf,
				       tage->used, &f_pos);
			kunmap(tage->page);
			if (rc != (int)tage->used) {
				printk(KERN_WARNING "wanted to write %u but wrote %d\n",
				       tage->used, rc);
				put_pages_back(&pc);
				__LASSERT(list_empty(&pc.pc_pages));
				break;
			}
		}
		set_fs(__oldfs);
		filp_close(filp, NULL);
		put_pages_on_daemon_list(&pc);
		if (!list_empty(&pc.pc_pages)) {
			int i;
			printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
			pr_err("total cpus(%d): ", num_possible_cpus());
			for (i = 0; i < num_possible_cpus(); i++)
				if (cpu_online(i))
					pr_cont("%d(on) ", i);
				else
					pr_cont("%d(off) ", i);
			pr_cont("\n");
			i = 0;
			list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
						 linkage)
				pr_err("page %d belongs to cpu %d\n",
				       ++i, tage->cpu);
			pr_err("There are %d pages unwritten\n", i);
		}
		__LASSERT(list_empty(&pc.pc_pages));
end_loop:
		if (atomic_read(&tctl->tctl_shutdown)) {
			if (last_loop == 0) {
				last_loop = 1;
				continue;
			} else {
				break;
			}
		}
		init_waitqueue_entry(&__wait, current);
		add_wait_queue(&tctl->tctl_waitq, &__wait);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(cfs_time_seconds(1));
		remove_wait_queue(&tctl->tctl_waitq, &__wait);
	}
	complete(&tctl->tctl_stop);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 521 | 93.71% | 5 | 45.45% | 
| james simmons | james simmons | 17 | 3.06% | 1 | 9.09% | 
| tina johnson | tina johnson | 6 | 1.08% | 1 | 9.09% | 
| al viro | al viro | 6 | 1.08% | 1 | 9.09% | 
| joe perches | joe perches | 4 | 0.72% | 1 | 9.09% | 
| oleg drokin | oleg drokin | 1 | 0.18% | 1 | 9.09% | 
| li xi | li xi | 1 | 0.18% | 1 | 9.09% | 
 | Total | 556 | 100.00% | 11 | 100.00% | 
int cfs_trace_start_thread(void)
{
	struct tracefiled_ctl *tctl = &trace_tctl;
	struct task_struct *task;
	int rc = 0;
	mutex_lock(&cfs_trace_thread_mutex);
	if (thread_running)
		goto out;
	init_completion(&tctl->tctl_start);
	init_completion(&tctl->tctl_stop);
	init_waitqueue_head(&tctl->tctl_waitq);
	atomic_set(&tctl->tctl_shutdown, 0);
	task = kthread_run(tracefiled, tctl, "ktracefiled");
	if (IS_ERR(task)) {
		rc = PTR_ERR(task);
		goto out;
	}
	wait_for_completion(&tctl->tctl_start);
	thread_running = 1;
out:
	mutex_unlock(&cfs_trace_thread_mutex);
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 108 | 86.40% | 1 | 50.00% | 
| john l. hammond | john l. hammond | 17 | 13.60% | 1 | 50.00% | 
 | Total | 125 | 100.00% | 2 | 100.00% | 
void cfs_trace_stop_thread(void)
{
	struct tracefiled_ctl *tctl = &trace_tctl;
	mutex_lock(&cfs_trace_thread_mutex);
	if (thread_running) {
		printk(KERN_INFO
		       "Lustre: shutting down debug daemon thread...\n");
		atomic_set(&tctl->tctl_shutdown, 1);
		wait_for_completion(&tctl->tctl_stop);
		thread_running = 0;
	}
	mutex_unlock(&cfs_trace_thread_mutex);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 61 | 100.00% | 1 | 100.00% | 
 | Total | 61 | 100.00% | 1 | 100.00% | 
int cfs_tracefile_init(int max_pages)
{
	struct cfs_trace_cpu_data *tcd;
	int		    i;
	int		    j;
	int		    rc;
	int		    factor;
	rc = cfs_tracefile_init_arch();
	if (rc != 0)
		return rc;
	cfs_tcd_for_each(tcd, i, j) {
		/* tcd_pages_factor is initialized int tracefile_init_arch. */
		factor = tcd->tcd_pages_factor;
		INIT_LIST_HEAD(&tcd->tcd_pages);
		INIT_LIST_HEAD(&tcd->tcd_stock_pages);
		INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
		tcd->tcd_cur_pages = 0;
		tcd->tcd_cur_stock_pages = 0;
		tcd->tcd_cur_daemon_pages = 0;
		tcd->tcd_max_pages = (max_pages * factor) / 100;
		LASSERT(tcd->tcd_max_pages > 0);
		tcd->tcd_shutting_down = 0;
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 128 | 100.00% | 1 | 100.00% | 
 | Total | 128 | 100.00% | 1 | 100.00% | 
static void trace_cleanup_on_all_cpus(void)
{
	struct cfs_trace_cpu_data *tcd;
	struct cfs_trace_page *tage;
	struct cfs_trace_page *tmp;
	int i, cpu;
	for_each_possible_cpu(cpu) {
		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
			tcd->tcd_shutting_down = 1;
			list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
						 linkage) {
				__LASSERT_TAGE_INVARIANT(tage);
				list_del(&tage->linkage);
				cfs_tage_free(tage);
			}
			tcd->tcd_cur_pages = 0;
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 85 | 98.84% | 1 | 50.00% | 
| greg kroah-hartman | greg kroah-hartman | 1 | 1.16% | 1 | 50.00% | 
 | Total | 86 | 100.00% | 2 | 100.00% | 
static void cfs_trace_cleanup(void)
{
	struct page_collection pc;
	INIT_LIST_HEAD(&pc.pc_pages);
	trace_cleanup_on_all_cpus();
	cfs_tracefile_fini_arch();
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 26 | 100.00% | 1 | 100.00% | 
 | Total | 26 | 100.00% | 1 | 100.00% | 
void cfs_tracefile_exit(void)
{
	cfs_trace_stop_thread();
	cfs_trace_cleanup();
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 13 | 100.00% | 1 | 100.00% | 
 | Total | 13 | 100.00% | 1 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| peng tao | peng tao | 4629 | 95.50% | 6 | 20.00% | 
| oleg drokin | oleg drokin | 101 | 2.08% | 5 | 16.67% | 
| james simmons | james simmons | 40 | 0.83% | 2 | 6.67% | 
| john l. hammond | john l. hammond | 17 | 0.35% | 1 | 3.33% | 
| joe perches | joe perches | 10 | 0.21% | 1 | 3.33% | 
| tina johnson | tina johnson | 9 | 0.19% | 1 | 3.33% | 
| kirill a. shutemov | kirill a. shutemov | 8 | 0.17% | 1 | 3.33% | 
| al viro | al viro | 6 | 0.12% | 1 | 3.33% | 
| shivani bhardwaj | shivani bhardwaj | 6 | 0.12% | 2 | 6.67% | 
| greg kroah-hartman | greg kroah-hartman | 5 | 0.10% | 2 | 6.67% | 
| labbe corentin | labbe corentin | 4 | 0.08% | 1 | 3.33% | 
| andy shevchenko | andy shevchenko | 4 | 0.08% | 1 | 3.33% | 
| vitaly osipov | vitaly osipov | 2 | 0.04% | 1 | 3.33% | 
| lisa nguyen | lisa nguyen | 2 | 0.04% | 1 | 3.33% | 
| luca ceresoli | luca ceresoli | 1 | 0.02% | 1 | 3.33% | 
| ryan haasken | ryan haasken | 1 | 0.02% | 1 | 3.33% | 
| mel gorman | mel gorman | 1 | 0.02% | 1 | 3.33% | 
| li xi | li xi | 1 | 0.02% | 1 | 3.33% | 
 | Total | 4847 | 100.00% | 30 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.