cregit-Linux how code gets into the kernel

Release 4.14 drivers/rapidio/devices/rio_mport_cdev.c

/*
 * RapidIO mport character device
 *
 * Copyright 2014-2015 Integrated Device Technology, Inc.
 *    Alexandre Bounine <alexandre.bounine@idt.com>
 * Copyright 2014-2015 Prodrive Technologies
 *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
 *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
 * Copyright (C) 2014 Texas Instruments Incorporated
 *    Aurelien Jacquiot <a-jacquiot@ti.com>
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/net.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/kfifo.h>

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>

#include <linux/dma-mapping.h>
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
#include <linux/dmaengine.h>
#endif

#include <linux/rio.h>
#include <linux/rio_ids.h>
#include <linux/rio_drv.h>
#include <linux/rio_mport_cdev.h>

#include "../rio.h"


#define DRV_NAME	"rio_mport"

#define DRV_PREFIX	DRV_NAME ": "

#define DEV_NAME	"rio_mport"

#define DRV_VERSION     "1.0.0"

/* Debug output filtering masks */
enum {
	
DBG_NONE	= 0,
	
DBG_INIT	= BIT(0), /* driver init */
	
DBG_EXIT	= BIT(1), /* driver exit */
	
DBG_MPORT	= BIT(2), /* mport add/remove */
	
DBG_RDEV	= BIT(3), /* RapidIO device add/remove */
	
DBG_DMA		= BIT(4), /* DMA transfer messages */
	
DBG_MMAP	= BIT(5), /* mapping messages */
	
DBG_IBW		= BIT(6), /* inbound window */
	
DBG_EVENT	= BIT(7), /* event handling messages */
	
DBG_OBW		= BIT(8), /* outbound window messages */
	
DBG_DBELL	= BIT(9), /* doorbell messages */
	
DBG_ALL		= ~0,
};

#ifdef DEBUG

#define rmcd_debug(level, fmt, arg...)		\
	do {                                    \
                if (DBG_##level & dbg_level)    \
                        pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
        } while (0)
#else

#define rmcd_debug(level, fmt, arg...) \
		no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
#endif


#define rmcd_warn(fmt, arg...) \
	pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)


#define rmcd_error(fmt, arg...) \
	pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)

MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
MODULE_DESCRIPTION("RapidIO mport character device driver");
MODULE_LICENSE("GPL");

MODULE_VERSION(DRV_VERSION);


static int dma_timeout = 3000; 
/* DMA transfer timeout in msec */
module_param(dma_timeout, int, S_IRUGO);
MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");

#ifdef DEBUG

static u32 dbg_level = DBG_NONE;
module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif

/*
 * An internal DMA coherent buffer
 */

struct mport_dma_buf {
	
void		*ib_base;
	
dma_addr_t	ib_phys;
	
u32		ib_size;
	
u64		ib_rio_base;
	
bool		ib_map;
	
struct file	*filp;
};

/*
 * Internal memory mapping structure
 */

enum rio_mport_map_dir {
	
MAP_INBOUND,
	
MAP_OUTBOUND,
	
MAP_DMA,
};


struct rio_mport_mapping {
	
struct list_head node;
	
struct mport_dev *md;
	
enum rio_mport_map_dir dir;
	
u16 rioid;
	
u64 rio_addr;
	
dma_addr_t phys_addr; /* for mmap */
	
void *virt_addr; /* kernel address, for dma_free_coherent */
	
u64 size;
	
struct kref ref; /* refcount of vmas sharing the mapping */
	
struct file *filp;
};


struct rio_mport_dma_map {
	
int valid;
	
u64 length;
	
void *vaddr;
	
dma_addr_t paddr;
};


#define MPORT_MAX_DMA_BUFS	16

#define MPORT_EVENT_DEPTH	10

/*
 * mport_dev  driver-specific structure that represents mport device
 * @active    mport device status flag
 * @node      list node to maintain list of registered mports
 * @cdev      character device
 * @dev       associated device object
 * @mport     associated subsystem's master port device object
 * @buf_mutex lock for buffer handling
 * @file_mutex - lock for open files list
 * @file_list  - list of open files on given mport
 * @properties properties of this mport
 * @portwrites queue of inbound portwrites
 * @pw_lock    lock for port write queue
 * @mappings   queue for memory mappings
 * @dma_chan   DMA channels associated with this device
 * @dma_ref:
 * @comp:
 */

struct mport_dev {
	
atomic_t		active;
	
struct list_head	node;
	
struct cdev		cdev;
	
struct device		dev;
	
struct rio_mport	*mport;
	
struct mutex		buf_mutex;
	
struct mutex		file_mutex;
	
struct list_head	file_list;
	
struct rio_mport_properties	properties;
	
struct list_head		doorbells;
	
spinlock_t			db_lock;
	
struct list_head		portwrites;
	
spinlock_t			pw_lock;
	
struct list_head	mappings;
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
	
struct dma_chan *dma_chan;
	
struct kref	dma_ref;
	
struct completion comp;
#endif
};

/*
 * mport_cdev_priv - data structure specific to individual file object
 *                   associated with an open device
 * @md    master port character device object
 * @async_queue - asynchronous notification queue
 * @list - file objects tracking list
 * @db_filters    inbound doorbell filters for this descriptor
 * @pw_filters    portwrite filters for this descriptor
 * @event_fifo    event fifo for this descriptor
 * @event_rx_wait wait queue for this descriptor
 * @fifo_lock     lock for event_fifo
 * @event_mask    event mask for this descriptor
 * @dmach DMA engine channel allocated for specific file object
 */

struct mport_cdev_priv {
	
struct mport_dev	*md;
	
struct fasync_struct	*async_queue;
	
struct list_head	list;
	
struct list_head	db_filters;
	
struct list_head        pw_filters;
	
struct kfifo            event_fifo;
	
wait_queue_head_t       event_rx_wait;
	
spinlock_t              fifo_lock;
	
u32			event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
	
struct dma_chan		*dmach;
	
struct list_head	async_list;
	
struct list_head	pend_list;
	
spinlock_t              req_lock;
	
struct mutex		dma_lock;
	
struct kref		dma_ref;
	
struct completion	comp;
#endif
};

/*
 * rio_mport_pw_filter - structure to describe a portwrite filter
 * md_node   node in mport device's list
 * priv_node node in private file object's list
 * priv      reference to private data
 * filter    actual portwrite filter
 */

struct rio_mport_pw_filter {
	
struct list_head md_node;
	
struct list_head priv_node;
	
struct mport_cdev_priv *priv;
	
struct rio_pw_filter filter;
};

/*
 * rio_mport_db_filter - structure to describe a doorbell filter
 * @data_node reference to device node
 * @priv_node node in private data
 * @priv      reference to private data
 * @filter    actual doorbell filter
 */

struct rio_mport_db_filter {
	
struct list_head data_node;
	
struct list_head priv_node;
	
struct mport_cdev_priv *priv;
	
struct rio_doorbell_filter filter;
};

static LIST_HEAD(mport_devs);
static DEFINE_MUTEX(mport_devs_lock);

#if (0) /* used by commented out portion of poll function : FIXME */
static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
#endif


static struct class *dev_class;

static dev_t dev_number;


static struct workqueue_struct *dma_wq;

static void mport_release_mapping(struct kref *ref);


static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, int local) { struct rio_mport *mport = priv->md->mport; struct rio_mport_maint_io maint_io; u32 *buffer; u32 offset; size_t length; int ret, i; if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) return -EFAULT; if ((maint_io.offset % 4) || (maint_io.length == 0) || (maint_io.length % 4) || (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); if (buffer == NULL) return -ENOMEM; length = maint_io.length/sizeof(u32); offset = maint_io.offset; for (i = 0; i < length; i++) { if (local) ret = __rio_local_read_config_32(mport, offset, &buffer[i]); else ret = rio_mport_read_config_32(mport, maint_io.rioid, maint_io.hopcount, offset, &buffer[i]); if (ret) goto out; offset += 4; } if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, buffer, maint_io.length))) ret = -EFAULT; out: vfree(buffer); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine259100.00%2100.00%
Total259100.00%2100.00%


static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, int local) { struct rio_mport *mport = priv->md->mport; struct rio_mport_maint_io maint_io; u32 *buffer; u32 offset; size_t length; int ret = -EINVAL, i; if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) return -EFAULT; if ((maint_io.offset % 4) || (maint_io.length == 0) || (maint_io.length % 4) || (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); if (buffer == NULL) return -ENOMEM; length = maint_io.length; if (unlikely(copy_from_user(buffer, (void __user *)(uintptr_t)maint_io.buffer, length))) { ret = -EFAULT; goto out; } offset = maint_io.offset; length /= sizeof(u32); for (i = 0; i < length; i++) { if (local) ret = __rio_local_write_config_32(mport, offset, buffer[i]); else ret = rio_mport_write_config_32(mport, maint_io.rioid, maint_io.hopcount, offset, buffer[i]); if (ret) goto out; offset += 4; } out: vfree(buffer); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine265100.00%2100.00%
Total265100.00%2100.00%

/* * Inbound/outbound memory mapping functions */
static int rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport *mport = md->mport; struct rio_mport_mapping *map; int ret; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); if (ret < 0) goto err_map_outb; map->dir = MAP_OUTBOUND; map->rioid = rioid; map->rio_addr = raddr; map->size = size; map->phys_addr = *paddr; map->filp = filp; map->md = md; kref_init(&map->ref); list_add_tail(&map->node, &md->mappings); return 0; err_map_outb: kfree(map); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine185100.00%2100.00%
Total185100.00%2100.00%


static int rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport_mapping *map; int err = -ENOMEM; mutex_lock(&md->buf_mutex); list_for_each_entry(map, &md->mappings, node) { if (map->dir != MAP_OUTBOUND) continue; if (rioid == map->rioid && raddr == map->rio_addr && size == map->size) { *paddr = map->phys_addr; err = 0; break; } else if (rioid == map->rioid && raddr < (map->rio_addr + map->size - 1) && (raddr + size) > map->rio_addr) { err = -EBUSY; break; } } /* If not found, create new */ if (err == -ENOMEM) err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, size, paddr); mutex_unlock(&md->buf_mutex); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine178100.00%2100.00%
Total178100.00%2100.00%


static int rio_mport_obw_map(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *data = priv->md; struct rio_mmap map; dma_addr_t paddr; int ret; if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", map.rioid, map.rio_addr, map.length); ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, map.rio_addr, map.length, &paddr); if (ret < 0) { rmcd_error("Failed to set OBW err= %d", ret); return ret; } map.handle = paddr; if (unlikely(copy_to_user(arg, &map, sizeof(map)))) return -EFAULT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine158100.00%2100.00%
Total158100.00%2100.00%

/* * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space * * @priv: driver private data * @arg: buffer handle returned by allocation routine */
static int rio_mport_obw_free(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; u64 handle; struct rio_mport_mapping *map, *_map; if (!md->mport->ops->unmap_outb) return -EPROTONOSUPPORT; if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(OBW, "h=0x%llx", handle); mutex_lock(&md->buf_mutex); list_for_each_entry_safe(map, _map, &md->mappings, node) { if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { if (map->filp == filp) { rmcd_debug(OBW, "kref_put h=0x%llx", handle); map->filp = NULL; kref_put(&map->ref, mport_release_mapping); } break; } } mutex_unlock(&md->buf_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine171100.00%2100.00%
Total171100.00%2100.00%

/* * maint_hdid_set() - Set the host Device ID * @priv: driver private data * @arg: Device Id */
static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; u16 hdid; if (copy_from_user(&hdid, arg, sizeof(hdid))) return -EFAULT; md->mport->host_deviceid = hdid; md->properties.hdid = hdid; rio_local_set_device_id(md->mport, hdid); rmcd_debug(MPORT, "Set host device Id to %d", hdid); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine84100.00%2100.00%
Total84100.00%2100.00%

/* * maint_comptag_set() - Set the host Component Tag * @priv: driver private data * @arg: Component Tag */
static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; u32 comptag; if (copy_from_user(&comptag, arg, sizeof(comptag))) return -EFAULT; rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine70100.00%2100.00%
Total70100.00%2100.00%

#ifdef CONFIG_RAPIDIO_DMA_ENGINE struct mport_dma_req { struct list_head node; struct file *filp; struct mport_cdev_priv *priv; enum rio_transfer_sync sync; struct sg_table sgt; struct page **page_list; unsigned int nr_pages; struct rio_mport_mapping *map; struct dma_chan *dmach; enum dma_data_direction dir; dma_cookie_t cookie; enum dma_status status; struct completion req_comp; }; struct mport_faf_work { struct work_struct work; struct mport_dma_req *req; };
static void mport_release_def_dma(struct kref *dma_ref) { struct mport_dev *md = container_of(dma_ref, struct mport_dev, dma_ref); rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); rio_release_dma(md->dma_chan); md->dma_chan = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine52100.00%1100.00%
Total52100.00%1100.00%


static void mport_release_dma(struct kref *dma_ref) { struct mport_cdev_priv *priv = container_of(dma_ref, struct mport_cdev_priv, dma_ref); rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); complete(&priv->comp); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine47100.00%1100.00%
Total47100.00%1100.00%


static void dma_req_free(struct mport_dma_req *req) { struct mport_cdev_priv *priv = req->priv; unsigned int i; dma_unmap_sg(req->dmach->device->dev, req->sgt.sgl, req->sgt.nents, req->dir); sg_free_table(&req->sgt); if (req->page_list) { for (i = 0; i < req->nr_pages; i++) put_page(req->page_list[i]); kfree(req->page_list); } if (req->map) { mutex_lock(&req->map->md->buf_mutex); kref_put(&req->map->ref, mport_release_mapping); mutex_unlock(&req->map->md->buf_mutex); } kref_put(&priv->dma_ref, mport_release_dma); kfree(req); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine158100.00%1100.00%
Total158100.00%1100.00%


static void dma_xfer_callback(void *param) { struct mport_dma_req *req = (struct mport_dma_req *)param; struct mport_cdev_priv *priv = req->priv; req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, NULL, NULL); complete(&req->req_comp); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine58100.00%1100.00%
Total58100.00%1100.00%


static void dma_faf_cleanup(struct work_struct *_work) { struct mport_faf_work *work = container_of(_work, struct mport_faf_work, work); struct mport_dma_req *req = work->req; dma_req_free(req); kfree(work); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine45100.00%1100.00%
Total45100.00%1100.00%


static void dma_faf_callback(void *param) { struct mport_dma_req *req = (struct mport_dma_req *)param; struct mport_faf_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; INIT_WORK(&work->work, dma_faf_cleanup); work->req = req; queue_work(dma_wq, &work->work); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine72100.00%1100.00%
Total72100.00%1100.00%

/* * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA * transfer object. * Returns pointer to DMA transaction descriptor allocated by DMA driver on * success or ERR_PTR (and/or NULL) if failed. Caller must check returned * non-NULL pointer using IS_ERR macro. */
static struct dma_async_tx_descriptor *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, struct sg_table *sgt, int nents, enum dma_transfer_direction dir, enum dma_ctrl_flags flags) { struct rio_dma_data tx_data; tx_data.sg = sgt->sgl; tx_data.sg_len = nents; tx_data.rio_addr_u = 0; tx_data.rio_addr = transfer->rio_addr; if (dir == DMA_MEM_TO_DEV) { switch (transfer->method) { case RIO_EXCHANGE_NWRITE: tx_data.wr_type = RDW_ALL_NWRITE; break; case RIO_EXCHANGE_NWRITE_R_ALL: tx_data.wr_type = RDW_ALL_NWRITE_R; break; case RIO_EXCHANGE_NWRITE_R: tx_data.wr_type = RDW_LAST_NWRITE_R; break; case RIO_EXCHANGE_DEFAULT: tx_data.wr_type = RDW_DEFAULT; break; default: return ERR_PTR(-EINVAL); } } return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine147100.00%1100.00%
Total147100.00%1100.00%

/* Request DMA channel associated with this mport device. * Try to request DMA channel for every new process that opened given * mport. If a new DMA channel is not available use default channel * which is the first DMA channel opened on mport device. */
static int get_dma_channel(struct mport_cdev_priv *priv) { mutex_lock(&priv->dma_lock); if (!priv->dmach) { priv->dmach = rio_request_mport_dma(priv->md->mport); if (!priv->dmach) { /* Use default DMA channel if available */ if (priv->md->dma_chan) { priv->dmach = priv->md->dma_chan; kref_get(&priv->md->dma_ref); } else { rmcd_error("Failed to get DMA channel"); mutex_unlock(&priv->dma_lock); return -ENODEV; } } else if (!priv->md->dma_chan) { /* Register default DMA channel if we do not have one */ priv->md->dma_chan = priv->dmach; kref_init(&priv->md->dma_ref); rmcd_debug(DMA, "Register DMA_chan %d as default", priv->dmach->chan_id); } kref_init(&priv->dma_ref); init_completion(&priv->comp); } kref_get(&priv->dma_ref); mutex_unlock(&priv->dma_lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine182100.00%1100.00%
Total182100.00%1100.00%


static void put_dma_channel(struct mport_cdev_priv *priv) { kref_put(&priv->dma_ref, mport_release_dma); }

Contributors

PersonTokensPropCommitsCommitProp
Alexandre Bounine21100.00%1100.00%
Total21100.00%1100.00%

/* * DMA transfer functions */
static int do_dma_request(struct mport_dma_req *req, struct rio_transfer_io *xfer, enum rio_transfer_sync sync, int nents) { struct mport_cdev_priv *priv; struct sg_table *sgt; struct dma_chan *chan; struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; unsigned long tmo = msecs_to_jiffies(dma_timeout); enum dma_transfer_direction dir; long wret; int ret = 0; priv = req->priv; sgt = &req