Release 4.7 drivers/infiniband/hw/mlx4/mr.c
  
  
/*
 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/slab.h>
#include <rdma/ib_user_verbs.h>
#include "mlx4_ib.h"
static u32 convert_access(int acc)
{
	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
	       (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
	       (acc & IB_ACCESS_MW_BIND	      ? MLX4_PERM_BIND_MW      : 0) |
	       MLX4_PERM_LOCAL_READ;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 52 | 83.87% | 1 | 50.00% | 
| shani michaelli | shani michaelli | 10 | 16.13% | 1 | 50.00% | 
 | Total | 62 | 100.00% | 2 | 100.00% | 
static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
{
	switch (type) {
	case IB_MW_TYPE_1:	return MLX4_MW_TYPE_1;
	case IB_MW_TYPE_2:	return MLX4_MW_TYPE_2;
	default:		return -1;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| shani michaelli | shani michaelli | 34 | 100.00% | 1 | 100.00% | 
 | Total | 34 | 100.00% | 1 | 100.00% | 
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
	struct mlx4_ib_mr *mr;
	int err;
	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);
	err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
			    ~0ull, convert_access(acc), 0, 0, &mr->mmr);
	if (err)
		goto err_free;
	err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
	if (err)
		goto err_mr;
	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
	mr->umem = NULL;
	return &mr->ibmr;
err_mr:
	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_free:
	kfree(mr);
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 179 | 96.24% | 1 | 33.33% | 
| sagi grimberg | sagi grimberg | 4 | 2.15% | 1 | 33.33% | 
| shani michaelli | shani michaelli | 3 | 1.61% | 1 | 33.33% | 
 | Total | 186 | 100.00% | 3 | 100.00% | 
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
			   struct ib_umem *umem)
{
	u64 *pages;
	int i, k, entry;
	int n;
	int len;
	int err = 0;
	struct scatterlist *sg;
	pages = (u64 *) __get_free_page(GFP_KERNEL);
	if (!pages)
		return -ENOMEM;
	i = n = 0;
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
		len = sg_dma_len(sg) >> mtt->page_shift;
		for (k = 0; k < len; ++k) {
			pages[i++] = sg_dma_address(sg) +
				umem->page_size * k;
			/*
                         * Be friendly to mlx4_write_mtt() and
                         * pass it chunks of appropriate size.
                         */
			if (i == PAGE_SIZE / sizeof (u64)) {
				err = mlx4_write_mtt(dev->dev, mtt, n,
						     i, pages);
				if (err)
					goto out;
				n += i;
				i = 0;
			}
		}
	}
	if (i)
		err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
out:
	free_page((unsigned long) pages);
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 193 | 92.34% | 1 | 50.00% | 
| yishai hadas | yishai hadas | 16 | 7.66% | 1 | 50.00% | 
 | Total | 209 | 100.00% | 2 | 100.00% | 
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				  u64 virt_addr, int access_flags,
				  struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_mr *mr;
	int shift;
	int err;
	int n;
	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);
	/* Force registering the memory as writable. */
	/* Used for memory re-registeration. HCA protects the access */
	mr->umem = ib_umem_get(pd->uobject->context, start, length,
			       access_flags | IB_ACCESS_LOCAL_WRITE, 0);
	if (IS_ERR(mr->umem)) {
		err = PTR_ERR(mr->umem);
		goto err_free;
	}
	n = ib_umem_page_count(mr->umem);
	shift = ilog2(mr->umem->page_size);
	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
			    convert_access(access_flags), n, shift, &mr->mmr);
	if (err)
		goto err_umem;
	err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
	if (err)
		goto err_mr;
	err = mlx4_mr_enable(dev->dev, &mr->mmr);
	if (err)
		goto err_mr;
	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
	return &mr->ibmr;
err_mr:
	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_umem:
	ib_umem_release(mr->umem);
err_free:
	kfree(mr);
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 290 | 95.71% | 1 | 20.00% | 
| sagi grimberg | sagi grimberg | 4 | 1.32% | 1 | 20.00% | 
| matan barak | matan barak | 4 | 1.32% | 1 | 20.00% | 
| shani michaelli | shani michaelli | 3 | 0.99% | 1 | 20.00% | 
| arthur kepner | arthur kepner | 2 | 0.66% | 1 | 20.00% | 
 | Total | 303 | 100.00% | 5 | 100.00% | 
int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
			  u64 start, u64 length, u64 virt_addr,
			  int mr_access_flags, struct ib_pd *pd,
			  struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(mr->device);
	struct mlx4_ib_mr *mmr = to_mmr(mr);
	struct mlx4_mpt_entry *mpt_entry;
	struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
	int err;
	/* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
         * we assume that the calls can't run concurrently. Otherwise, a
         * race exists.
         */
	err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
	if (err)
		return err;
	if (flags & IB_MR_REREG_PD) {
		err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
					   to_mpd(pd)->pdn);
		if (err)
			goto release_mpt_entry;
	}
	if (flags & IB_MR_REREG_ACCESS) {
		err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
					       convert_access(mr_access_flags));
		if (err)
			goto release_mpt_entry;
	}
	if (flags & IB_MR_REREG_TRANS) {
		int shift;
		int n;
		mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
		ib_umem_release(mmr->umem);
		mmr->umem = ib_umem_get(mr->uobject->context, start, length,
					mr_access_flags |
					IB_ACCESS_LOCAL_WRITE,
					0);
		if (IS_ERR(mmr->umem)) {
			err = PTR_ERR(mmr->umem);
			/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
			mmr->umem = NULL;
			goto release_mpt_entry;
		}
		n = ib_umem_page_count(mmr->umem);
		shift = ilog2(mmr->umem->page_size);
		err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
					      virt_addr, length, n, shift,
					      *pmpt_entry);
		if (err) {
			ib_umem_release(mmr->umem);
			goto release_mpt_entry;
		}
		mmr->mmr.iova       = virt_addr;
		mmr->mmr.size       = length;
		err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
		if (err) {
			mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
			ib_umem_release(mmr->umem);
			goto release_mpt_entry;
		}
	}
	/* If we couldn't transfer the MR to the HCA, just remember to
         * return a failure. But dereg_mr will free the resources.
         */
	err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
	if (!err && flags & IB_MR_REREG_ACCESS)
		mmr->mmr.access = mr_access_flags;
release_mpt_entry:
	mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| matan barak | matan barak | 422 | 100.00% | 2 | 100.00% | 
 | Total | 422 | 100.00% | 2 | 100.00% | 
static int
mlx4_alloc_priv_pages(struct ib_device *device,
		      struct mlx4_ib_mr *mr,
		      int max_pages)
{
	int ret;
	/* Ensure that size is aligned to DMA cacheline
         * requirements.
         * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
         * so page_map_size will never cross PAGE_SIZE.
         */
	mr->page_map_size = roundup(max_pages * sizeof(u64),
				    MLX4_MR_PAGES_ALIGN);
	/* Prevent cross page boundary allocation. */
	mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
	if (!mr->pages)
		return -ENOMEM;
	mr->page_map = dma_map_single(device->dma_device, mr->pages,
				      mr->page_map_size, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dma_device, mr->page_map)) {
		ret = -ENOMEM;
		goto err;
	}
	return 0;
err:
	free_page((unsigned long)mr->pages);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 101 | 79.53% | 1 | 50.00% | 
| chuck lever | chuck lever | 26 | 20.47% | 1 | 50.00% | 
 | Total | 127 | 100.00% | 2 | 100.00% | 
static void
mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{
	if (mr->pages) {
		struct ib_device *device = mr->ibmr.device;
		dma_unmap_single(device->dma_device, mr->page_map,
				 mr->page_map_size, DMA_TO_DEVICE);
		free_page((unsigned long)mr->pages);
		mr->pages = NULL;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 55 | 85.94% | 1 | 50.00% | 
| chuck lever | chuck lever | 9 | 14.06% | 1 | 50.00% | 
 | Total | 64 | 100.00% | 2 | 100.00% | 
int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{
	struct mlx4_ib_mr *mr = to_mmr(ibmr);
	int ret;
	mlx4_free_priv_pages(mr);
	ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
	if (ret)
		return ret;
	if (mr->umem)
		ib_umem_release(mr->umem);
	kfree(mr);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 58 | 77.33% | 1 | 33.33% | 
| shani michaelli | shani michaelli | 12 | 16.00% | 1 | 33.33% | 
| sagi grimberg | sagi grimberg | 5 | 6.67% | 1 | 33.33% | 
 | Total | 75 | 100.00% | 3 | 100.00% | 
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
			       struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_mw *mw;
	int err;
	mw = kmalloc(sizeof(*mw), GFP_KERNEL);
	if (!mw)
		return ERR_PTR(-ENOMEM);
	err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
			    to_mlx4_type(type), &mw->mmw);
	if (err)
		goto err_free;
	err = mlx4_mw_enable(dev->dev, &mw->mmw);
	if (err)
		goto err_mw;
	mw->ibmw.rkey = mw->mmw.key;
	return &mw->ibmw;
err_mw:
	mlx4_mw_free(dev->dev, &mw->mmw);
err_free:
	kfree(mw);
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| shani michaelli | shani michaelli | 160 | 96.97% | 1 | 50.00% | 
| matan barak | matan barak | 5 | 3.03% | 1 | 50.00% | 
 | Total | 165 | 100.00% | 2 | 100.00% | 
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
{
	struct mlx4_ib_mw *mw = to_mmw(ibmw);
	mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
	kfree(mw);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| shani michaelli | shani michaelli | 45 | 100.00% | 1 | 100.00% | 
 | Total | 45 | 100.00% | 1 | 100.00% | 
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
			       enum ib_mr_type mr_type,
			       u32 max_num_sg)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_mr *mr;
	int err;
	if (mr_type != IB_MR_TYPE_MEM_REG ||
	    max_num_sg > MLX4_MAX_FAST_REG_PAGES)
		return ERR_PTR(-EINVAL);
	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);
	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
			    max_num_sg, 0, &mr->mmr);
	if (err)
		goto err_free;
	err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
	if (err)
		goto err_free_mr;
	mr->max_pages = max_num_sg;
	err = mlx4_mr_enable(dev->dev, &mr->mmr);
	if (err)
		goto err_free_pl;
	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
	mr->umem = NULL;
	return &mr->ibmr;
err_free_pl:
	mlx4_free_priv_pages(mr);
err_free_mr:
	(void) mlx4_mr_free(dev->dev, &mr->mmr);
err_free:
	kfree(mr);
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 142 | 60.94% | 1 | 16.67% | 
| sagi grimberg | sagi grimberg | 64 | 27.47% | 2 | 33.33% | 
| vladimir sokolovsky | vladimir sokolovsky | 24 | 10.30% | 2 | 33.33% | 
| shani michaelli | shani michaelli | 3 | 1.29% | 1 | 16.67% | 
 | Total | 233 | 100.00% | 6 | 100.00% | 
struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
				 struct ib_fmr_attr *fmr_attr)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_fmr *fmr;
	int err = -ENOMEM;
	fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
	if (!fmr)
		return ERR_PTR(-ENOMEM);
	err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
			     fmr_attr->max_pages, fmr_attr->max_maps,
			     fmr_attr->page_shift, &fmr->mfmr);
	if (err)
		goto err_free;
	err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
	if (err)
		goto err_mr;
	fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
	return &fmr->ibfmr;
err_mr:
	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
err_free:
	kfree(fmr);
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jack morgenstein | jack morgenstein | 197 | 98.50% | 2 | 66.67% | 
| shani michaelli | shani michaelli | 3 | 1.50% | 1 | 33.33% | 
 | Total | 200 | 100.00% | 3 | 100.00% | 
int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
		      int npages, u64 iova)
{
	struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
	struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
	return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
				 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jack morgenstein | jack morgenstein | 77 | 100.00% | 1 | 100.00% | 
 | Total | 77 | 100.00% | 1 | 100.00% | 
int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
{
	struct ib_fmr *ibfmr;
	int err;
	struct mlx4_dev *mdev = NULL;
	list_for_each_entry(ibfmr, fmr_list, list) {
		if (mdev && to_mdev(ibfmr->device)->dev != mdev)
			return -EINVAL;
		mdev = to_mdev(ibfmr->device)->dev;
	}
	if (!mdev)
		return 0;
	list_for_each_entry(ibfmr, fmr_list, list) {
		struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
		mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
	}
	/*
         * Make sure all MPT status updates are visible before issuing
         * SYNC_TPT firmware command.
         */
	wmb();
	err = mlx4_SYNC_TPT(mdev);
	if (err)
		pr_warn("SYNC_TPT error %d when "
		       "unmapping FMRs\n", err);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jack morgenstein | jack morgenstein | 141 | 98.60% | 1 | 50.00% | 
| shlomo pongratz | shlomo pongratz | 2 | 1.40% | 1 | 50.00% | 
 | Total | 143 | 100.00% | 2 | 100.00% | 
int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
{
	struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
	struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
	int err;
	err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
	if (!err)
		kfree(ifmr);
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jack morgenstein | jack morgenstein | 62 | 100.00% | 1 | 100.00% | 
 | Total | 62 | 100.00% | 1 | 100.00% | 
static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
	struct mlx4_ib_mr *mr = to_mmr(ibmr);
	if (unlikely(mr->npages == mr->max_pages))
		return -ENOMEM;
	mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 61 | 100.00% | 1 | 100.00% | 
 | Total | 61 | 100.00% | 1 | 100.00% | 
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		      unsigned int *sg_offset)
{
	struct mlx4_ib_mr *mr = to_mmr(ibmr);
	int rc;
	mr->npages = 0;
	ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
				   mr->page_map_size, DMA_TO_DEVICE);
	rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
	ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
				      mr->page_map_size, DMA_TO_DEVICE);
	return rc;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 85 | 90.43% | 1 | 25.00% | 
| christoph hellwig | christoph hellwig | 6 | 6.38% | 1 | 25.00% | 
| chuck lever | chuck lever | 2 | 2.13% | 1 | 25.00% | 
| bart van assche | bart van assche | 1 | 1.06% | 1 | 25.00% | 
 | Total | 94 | 100.00% | 4 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| roland dreier | roland dreier | 917 | 35.65% | 2 | 9.52% | 
| jack morgenstein | jack morgenstein | 478 | 18.58% | 3 | 14.29% | 
| matan barak | matan barak | 434 | 16.87% | 3 | 14.29% | 
| sagi grimberg | sagi grimberg | 379 | 14.74% | 2 | 9.52% | 
| shani michaelli | shani michaelli | 273 | 10.61% | 2 | 9.52% | 
| chuck lever | chuck lever | 37 | 1.44% | 1 | 4.76% | 
| vladimir sokolovsky | vladimir sokolovsky | 24 | 0.93% | 2 | 9.52% | 
| yishai hadas | yishai hadas | 16 | 0.62% | 1 | 4.76% | 
| christoph hellwig | christoph hellwig | 6 | 0.23% | 1 | 4.76% | 
| tejun heo | tejun heo | 3 | 0.12% | 1 | 4.76% | 
| shlomo pongratz | shlomo pongratz | 2 | 0.08% | 1 | 4.76% | 
| arthur kepner | arthur kepner | 2 | 0.08% | 1 | 4.76% | 
| bart van assche | bart van assche | 1 | 0.04% | 1 | 4.76% | 
 | Total | 2572 | 100.00% | 21 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.