Release 4.15 drivers/net/ethernet/mellanox/mlx5/core/dev.c
  
  
  
/*
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
static LIST_HEAD(intf_list);
static LIST_HEAD(mlx5_dev_list);
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
struct mlx5_device_context {
	
struct list_head	list;
	
struct mlx5_interface  *intf;
	
void		       *context;
	
unsigned long		state;
};
struct mlx5_delayed_event {
	
struct list_head	list;
	
struct mlx5_core_dev	*dev;
	
enum mlx5_dev_event	event;
	
unsigned long		param;
};
enum {
	
MLX5_INTERFACE_ADDED,
	
MLX5_INTERFACE_ATTACHED,
};
static void add_delayed_event(struct mlx5_priv *priv,
			      struct mlx5_core_dev *dev,
			      enum mlx5_dev_event event,
			      unsigned long param)
{
	struct mlx5_delayed_event *delayed_event;
	delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
	if (!delayed_event) {
		mlx5_core_err(dev, "event %d is missed\n", event);
		return;
	}
	mlx5_core_dbg(dev, "Accumulating event %d\n", event);
	delayed_event->dev = dev;
	delayed_event->event = event;
	delayed_event->param = param;
	list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Erez Shitrit | 99 | 100.00% | 1 | 100.00% | 
| Total | 99 | 100.00% | 1 | 100.00% | 
static void delayed_event_release(struct mlx5_device_context *dev_ctx,
				  struct mlx5_priv *priv)
{
	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
	struct mlx5_delayed_event *de;
	struct mlx5_delayed_event *n;
	struct list_head temp;
	INIT_LIST_HEAD(&temp);
	spin_lock_irq(&priv->ctx_lock);
	priv->is_accum_events = false;
	list_splice_init(&priv->waiting_events_list, &temp);
	if (!dev_ctx->context)
		goto out;
	list_for_each_entry_safe(de, n, &temp, list)
		dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
	spin_unlock_irq(&priv->ctx_lock);
	list_for_each_entry_safe(de, n, &temp, list) {
		list_del(&de->list);
		kfree(de);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Huy Nguyen | 77 | 50.66% | 2 | 66.67% | 
| Erez Shitrit | 75 | 49.34% | 1 | 33.33% | 
| Total | 152 | 100.00% | 3 | 100.00% | 
/* accumulating events that can come after mlx5_ib calls to
 * ib_register_device, till adding that interface to the events list.
 */
static void delayed_event_start(struct mlx5_priv *priv)
{
	spin_lock_irq(&priv->ctx_lock);
	priv->is_accum_events = true;
	spin_unlock_irq(&priv->ctx_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Erez Shitrit | 31 | 93.94% | 1 | 50.00% | 
| Huy Nguyen | 2 | 6.06% | 1 | 50.00% | 
| Total | 33 | 100.00% | 2 | 100.00% | 
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
	struct mlx5_device_context *dev_ctx;
	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
	if (!mlx5_lag_intf_add(intf, priv))
		return;
	dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
	if (!dev_ctx)
		return;
	dev_ctx->intf = intf;
	delayed_event_start(priv);
	dev_ctx->context = intf->add(dev);
	set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
	if (intf->attach)
		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
	if (dev_ctx->context) {
		spin_lock_irq(&priv->ctx_lock);
		list_add_tail(&dev_ctx->list, &priv->ctx_list);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
		if (dev_ctx->intf->pfault) {
			if (priv->pfault) {
				mlx5_core_err(dev, "multiple page fault handlers not supported");
			} else {
				priv->pfault_ctx = dev_ctx->context;
				priv->pfault = dev_ctx->intf->pfault;
			}
		}
#endif
		spin_unlock_irq(&priv->ctx_lock);
	}
	delayed_event_release(dev_ctx, priv);
	if (!dev_ctx->context)
		kfree(dev_ctx);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 149 | 67.73% | 1 | 25.00% | 
| Artemy Kovalyov | 51 | 23.18% | 1 | 25.00% | 
| Huy Nguyen | 15 | 6.82% | 1 | 25.00% | 
| Erez Shitrit | 5 | 2.27% | 1 | 25.00% | 
| Total | 220 | 100.00% | 4 | 100.00% | 
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
						   struct mlx5_priv *priv)
{
	struct mlx5_device_context *dev_ctx;
	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
		if (dev_ctx->intf == intf)
			return dev_ctx;
	return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 45 | 100.00% | 1 | 100.00% | 
| Total | 45 | 100.00% | 1 | 100.00% | 
void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
	struct mlx5_device_context *dev_ctx;
	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
	dev_ctx = mlx5_get_device(intf, priv);
	if (!dev_ctx)
		return;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	spin_lock_irq(&priv->ctx_lock);
	if (priv->pfault == dev_ctx->intf->pfault)
		priv->pfault = NULL;
	spin_unlock_irq(&priv->ctx_lock);
	synchronize_srcu(&priv->pfault_srcu);
#endif
	spin_lock_irq(&priv->ctx_lock);
	list_del(&dev_ctx->list);
	spin_unlock_irq(&priv->ctx_lock);
	if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
		intf->remove(dev, dev_ctx->context);
	kfree(dev_ctx);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 102 | 68.46% | 1 | 50.00% | 
| Artemy Kovalyov | 47 | 31.54% | 1 | 50.00% | 
| Total | 149 | 100.00% | 2 | 100.00% | 
static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
	struct mlx5_device_context *dev_ctx;
	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
	dev_ctx = mlx5_get_device(intf, priv);
	if (!dev_ctx)
		return;
	delayed_event_start(priv);
	if (intf->attach) {
		if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
			goto out;
		intf->attach(dev, dev_ctx->context);
		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
	} else {
		if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
			goto out;
		dev_ctx->context = intf->add(dev);
		set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
	}
out:
	delayed_event_release(dev_ctx, priv);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 128 | 86.49% | 1 | 50.00% | 
| Huy Nguyen | 20 | 13.51% | 1 | 50.00% | 
| Total | 148 | 100.00% | 2 | 100.00% | 
void mlx5_attach_device(struct mlx5_core_dev *dev)
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_interface *intf;
	mutex_lock(&mlx5_intf_mutex);
	list_for_each_entry(intf, &intf_list, list)
		mlx5_attach_interface(intf, priv);
	mutex_unlock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 52 | 100.00% | 1 | 100.00% | 
| Total | 52 | 100.00% | 1 | 100.00% | 
static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
	struct mlx5_device_context *dev_ctx;
	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
	dev_ctx = mlx5_get_device(intf, priv);
	if (!dev_ctx)
		return;
	if (intf->detach) {
		if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
			return;
		intf->detach(dev, dev_ctx->context);
		clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
	} else {
		if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
			return;
		intf->remove(dev, dev_ctx->context);
		clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 132 | 100.00% | 1 | 100.00% | 
| Total | 132 | 100.00% | 1 | 100.00% | 
void mlx5_detach_device(struct mlx5_core_dev *dev)
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_interface *intf;
	mutex_lock(&mlx5_intf_mutex);
	list_for_each_entry(intf, &intf_list, list)
		mlx5_detach_interface(intf, priv);
	mutex_unlock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 52 | 100.00% | 1 | 100.00% | 
| Total | 52 | 100.00% | 1 | 100.00% | 
bool mlx5_device_registered(struct mlx5_core_dev *dev)
{
	struct mlx5_priv *priv;
	bool found = false;
	mutex_lock(&mlx5_intf_mutex);
	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
		if (priv == &dev->priv)
			found = true;
	mutex_unlock(&mlx5_intf_mutex);
	return found;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 56 | 100.00% | 1 | 100.00% | 
| Total | 56 | 100.00% | 1 | 100.00% | 
int mlx5_register_device(struct mlx5_core_dev *dev)
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_interface *intf;
	mutex_lock(&mlx5_intf_mutex);
	list_add_tail(&priv->dev_list, &mlx5_dev_list);
	list_for_each_entry(intf, &intf_list, list)
		mlx5_add_device(intf, priv);
	mutex_unlock(&mlx5_intf_mutex);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 66 | 100.00% | 1 | 100.00% | 
| Total | 66 | 100.00% | 1 | 100.00% | 
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_interface *intf;
	mutex_lock(&mlx5_intf_mutex);
	list_for_each_entry(intf, &intf_list, list)
		mlx5_remove_device(intf, priv);
	list_del(&priv->dev_list);
	mutex_unlock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 60 | 100.00% | 1 | 100.00% | 
| Total | 60 | 100.00% | 1 | 100.00% | 
int mlx5_register_interface(struct mlx5_interface *intf)
{
	struct mlx5_priv *priv;
	if (!intf->add || !intf->remove)
		return -EINVAL;
	mutex_lock(&mlx5_intf_mutex);
	list_add_tail(&intf->list, &intf_list);
	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
		mlx5_add_device(intf, priv);
	mutex_unlock(&mlx5_intf_mutex);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 72 | 100.00% | 1 | 100.00% | 
| Total | 72 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL(mlx5_register_interface);
void mlx5_unregister_interface(struct mlx5_interface *intf)
{
	struct mlx5_priv *priv;
	mutex_lock(&mlx5_intf_mutex);
	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
		mlx5_remove_device(intf, priv);
	list_del(&intf->list);
	mutex_unlock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 50 | 100.00% | 1 | 100.00% | 
| Total | 50 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL(mlx5_unregister_interface);
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{
	struct mlx5_priv *priv = &mdev->priv;
	struct mlx5_device_context *dev_ctx;
	unsigned long flags;
	void *result = NULL;
	spin_lock_irqsave(&priv->ctx_lock, flags);
	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
		if ((dev_ctx->intf->protocol == protocol) &&
		    dev_ctx->intf->get_dev) {
			result = dev_ctx->intf->get_dev(dev_ctx->context);
			break;
		}
	spin_unlock_irqrestore(&priv->ctx_lock, flags);
	return result;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 104 | 100.00% | 1 | 100.00% | 
| Total | 104 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL(mlx5_get_protocol_dev);
/* Must be called with intf_mutex held */
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{
	struct mlx5_interface *intf;
	list_for_each_entry(intf, &intf_list, list)
		if (intf->protocol == protocol) {
			mlx5_add_device(intf, &dev->priv);
			break;
		}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 47 | 100.00% | 1 | 100.00% | 
| Total | 47 | 100.00% | 1 | 100.00% | 
/* Must be called with intf_mutex held */
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{
	struct mlx5_interface *intf;
	list_for_each_entry(intf, &intf_list, list)
		if (intf->protocol == protocol) {
			mlx5_remove_device(intf, &dev->priv);
			break;
		}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 47 | 100.00% | 1 | 100.00% | 
| Total | 47 | 100.00% | 1 | 100.00% | 
static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
	return (u16)((dev->pdev->bus->number << 8) |
		     PCI_SLOT(dev->pdev->devfn));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 38 | 100.00% | 1 | 100.00% | 
| Total | 38 | 100.00% | 1 | 100.00% | 
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
	u16 pci_id = mlx5_gen_pci_id(dev);
	struct mlx5_core_dev *res = NULL;
	struct mlx5_core_dev *tmp_dev;
	struct mlx5_priv *priv;
	list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
		tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
		if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
			res = tmp_dev;
			break;
		}
	}
	return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 86 | 100.00% | 1 | 100.00% | 
| Total | 86 | 100.00% | 1 | 100.00% | 
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
		     unsigned long param)
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_device_context *dev_ctx;
	unsigned long flags;
	spin_lock_irqsave(&priv->ctx_lock, flags);
	if (priv->is_accum_events)
		add_delayed_event(priv, dev, event, param);
	/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
         * still in priv->ctx_list. In this case, only notify the dev_ctx if its
         * ADDED or ATTACHED bit are set.
         */
	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
		if (dev_ctx->intf->event &&
		    (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
		     test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
	spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 90 | 69.23% | 1 | 33.33% | 
| Huy Nguyen | 23 | 17.69% | 1 | 33.33% | 
| Erez Shitrit | 17 | 13.08% | 1 | 33.33% | 
| Total | 130 | 100.00% | 3 | 100.00% | 
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
			  struct mlx5_pagefault *pfault)
{
	struct mlx5_priv *priv = &dev->priv;
	int srcu_idx;
	srcu_idx = srcu_read_lock(&priv->pfault_srcu);
	if (priv->pfault)
		priv->pfault(dev, priv->pfault_ctx, pfault);
	srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Artemy Kovalyov | 67 | 100.00% | 1 | 100.00% | 
| Total | 67 | 100.00% | 1 | 100.00% | 
#endif
void mlx5_dev_list_lock(void)
{
	mutex_lock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 13 | 100.00% | 1 | 100.00% | 
| Total | 13 | 100.00% | 1 | 100.00% | 
void mlx5_dev_list_unlock(void)
{
	mutex_unlock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 13 | 100.00% | 1 | 100.00% | 
| Total | 13 | 100.00% | 1 | 100.00% | 
int mlx5_dev_list_trylock(void)
{
	return mutex_trylock(&mlx5_intf_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 14 | 100.00% | 1 | 100.00% | 
| Total | 14 | 100.00% | 1 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mohamad Haj Yahia | 1489 | 72.78% | 1 | 20.00% | 
| Erez Shitrit | 249 | 12.17% | 1 | 20.00% | 
| Artemy Kovalyov | 170 | 8.31% | 1 | 20.00% | 
| Huy Nguyen | 138 | 6.74% | 2 | 40.00% | 
| Total | 2046 | 100.00% | 5 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.