Release 4.11 drivers/scsi/mvumi.c
/*
* Marvell UMI driver
*
* Copyright 2011 Marvell. <jyli@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/blkdev.h>
#include <linux/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_eh.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include "mvumi.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("jyli@marvell.com");
MODULE_DESCRIPTION("Marvell UMI Driver");
static const struct pci_device_id mvumi_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
static void tag_init(struct mvumi_tag *st, unsigned short size)
{
unsigned short i;
BUG_ON(size != st->size);
st->top = size;
for (i = 0; i < size; i++)
st->stack[i] = size - 1 - i;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
{
BUG_ON(st->top <= 0);
return st->stack[--st->top];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
unsigned short tag)
{
BUG_ON(st->top >= st->size);
st->stack[st->top++] = tag;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
static bool tag_is_empty(struct mvumi_tag *st)
{
if (st->top == 0)
return 1;
else
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
{
int i;
for (i = 0; i < MAX_BASE_ADDRESS; i++)
if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
addr_array[i])
pci_iounmap(dev, addr_array[i]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
{
int i;
for (i = 0; i < MAX_BASE_ADDRESS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
addr_array[i] = pci_iomap(dev, i, 0);
if (!addr_array[i]) {
dev_err(&dev->dev, "failed to map Bar[%d]\n",
i);
mvumi_unmap_pci_addr(dev, addr_array);
return -ENOMEM;
}
} else
addr_array[i] = NULL;
dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 122 | 100.00% | 1 | 100.00% |
Total | 122 | 100.00% | 1 | 100.00% |
static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
enum resource_type type, unsigned int size)
{
struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
if (!res) {
dev_err(&mhba->pdev->dev,
"Failed to allocate memory for resource manager.\n");
return NULL;
}
switch (type) {
case RESOURCE_CACHED_MEMORY:
res->virt_addr = kzalloc(size, GFP_ATOMIC);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate memory,size = %d.\n", size);
kfree(res);
return NULL;
}
break;
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
&res->bus_addr);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
"size = %d.\n", size);
kfree(res);
return NULL;
}
break;
default:
dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
kfree(res);
return NULL;
}
res->type = type;
res->size = size;
INIT_LIST_HEAD(&res->entry);
list_add_tail(&res->entry, &mhba->res_list);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 229 | 98.28% | 1 | 25.00% |
Shun Fu | 2 | 0.86% | 1 | 25.00% |
Joe Perches | 1 | 0.43% | 1 | 25.00% |
Masanari Iida | 1 | 0.43% | 1 | 25.00% |
Total | 233 | 100.00% | 4 | 100.00% |
static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
{
struct mvumi_res *res, *tmp;
list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
switch (res->type) {
case RESOURCE_UNCACHED_MEMORY:
pci_free_consistent(mhba->pdev, res->size,
res->virt_addr, res->bus_addr);
break;
case RESOURCE_CACHED_MEMORY:
kfree(res->virt_addr);
break;
default:
dev_err(&mhba->pdev->dev,
"unknown resource type %d\n", res->type);
break;
}
list_del(&res->entry);
kfree(res);
}
mhba->fw_flag &= ~MVUMI_FW_ALLOC;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 111 | 100.00% | 1 | 100.00% |
Total | 111 | 100.00% | 1 | 100.00% |
/**
* mvumi_make_sgl - Prepares SGL
* @mhba: Adapter soft state
* @scmd: SCSI command from the mid-layer
* @sgl_p: SGL to be filled in
* @sg_count return the number of SG elements
*
* If successful, this function returns 0. otherwise, it returns -1.
*/
static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
void *sgl_p, unsigned char *sg_count)
{
struct scatterlist *sg;
struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
unsigned int i;
unsigned int sgnum = scsi_sg_count(scmd);
dma_addr_t busaddr;
if (sgnum) {
sg = scsi_sglist(scmd);
*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
(int) scmd->sc_data_direction);
if (*sg_count > mhba->max_sge) {
dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
"than max sg[0x%x].\n",
*sg_count, mhba->max_sge);
return -1;
}
for (i = 0; i < *sg_count; i++) {
busaddr = sg_dma_address(&sg[i]);
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
m_sg->flags = 0;
sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
if ((i + 1) == *sg_count)
m_sg->flags |= 1U << mhba->eot_flag;
sgd_inc(mhba, m_sg);
}
} else {
scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
pci_map_single(mhba->pdev, scsi_sglist(scmd),
scsi_bufflen(scmd),
(int) scmd->sc_data_direction)
: 0;
busaddr = scmd->SCp.dma_handle;
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
m_sg->flags = 1U << mhba->eot_flag;
sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
*sg_count = 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 309 | 91.96% | 1 | 50.00% |
Shun Fu | 27 | 8.04% | 1 | 50.00% |
Total | 336 | 100.00% | 2 | 100.00% |
static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
unsigned int size)
{
struct mvumi_sgl *m_sg;
void *virt_addr;
dma_addr_t phy_addr;
if (size == 0)
return 0;
virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
if (!virt_addr)
return -1;
m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
cmd->frame->sg_counts = 1;
cmd->data_buf = virt_addr;
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
m_sg->flags = 1U << mhba->eot_flag;
sgd_setsz(mhba, m_sg, cpu_to_le32(size));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 132 | 91.67% | 1 | 33.33% |
Shun Fu | 11 | 7.64% | 1 | 33.33% |
Joe Perches | 1 | 0.69% | 1 | 33.33% |
Total | 144 | 100.00% | 3 | 100.00% |
static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
unsigned int buf_size)
{
struct mvumi_cmd *cmd;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
return NULL;
}
INIT_LIST_HEAD(&cmd->queue_pointer);
cmd->frame = pci_alloc_consistent(mhba->pdev,
mhba->ib_max_size, &cmd->frame_phys);
if (!cmd->frame) {
dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
" frame,size = %d.\n", mhba->ib_max_size);
kfree(cmd);
return NULL;
}
if (buf_size) {
if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
dev_err(&mhba->pdev->dev, "failed to allocate memory"
" for internal frame\n");
pci_free_consistent(mhba->pdev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
return NULL;
}
} else
cmd->frame->sg_counts = 0;
return cmd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 168 | 88.42% | 1 | 50.00% |
Shun Fu | 22 | 11.58% | 1 | 50.00% |
Total | 190 | 100.00% | 2 | 100.00% |
static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
struct mvumi_cmd *cmd)
{
struct mvumi_sgl *m_sg;
unsigned int size;
dma_addr_t phy_addr;
if (cmd && cmd->frame) {
if (cmd->frame->sg_counts) {
m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
sgd_getsz(mhba, m_sg, size);
phy_addr = (dma_addr_t) m_sg->baseaddr_l |
(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
pci_free_consistent(mhba->pdev, size, cmd->data_buf,
phy_addr);
}
pci_free_consistent(mhba->pdev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 118 | 86.13% | 1 | 50.00% |
Shun Fu | 19 | 13.87% | 1 | 50.00% |
Total | 137 | 100.00% | 2 | 100.00% |
/**
* mvumi_get_cmd - Get a command from the free pool
* @mhba: Adapter soft state
*
* Returns a free command from the pool
*/
static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
{
struct mvumi_cmd *cmd = NULL;
if (likely(!list_empty(&mhba->cmd_pool))) {
cmd = list_entry((&mhba->cmd_pool)->next,
struct mvumi_cmd, queue_pointer);
list_del_init(&cmd->queue_pointer);
} else
dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
return cmd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
/**
* mvumi_return_cmd - Return a cmd to free command pool
* @mhba: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
struct mvumi_cmd *cmd)
{
cmd->scmd = NULL;
list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
/**
* mvumi_free_cmds - Free all the cmds in the free cmd pool
* @mhba: Adapter soft state
*/
static void mvumi_free_cmds(struct mvumi_hba *mhba)
{
struct mvumi_cmd *cmd;
while (!list_empty(&mhba->cmd_pool)) {
cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
queue_pointer);
list_del(&cmd->queue_pointer);
if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
kfree(cmd->frame);
kfree(cmd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 64 | 85.33% | 1 | 50.00% |
Shun Fu | 11 | 14.67% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
/**
* mvumi_alloc_cmds - Allocates the command packets
* @mhba: Adapter soft state
*
*/
static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
{
int i;
struct mvumi_cmd *cmd;
for (i = 0; i < mhba->max_io; i++) {
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
goto err_exit;
INIT_LIST_HEAD(&cmd->queue_pointer);
list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
cmd->frame_phys = mhba->ib_frame_phys
+ i * mhba->ib_max_size;
} else
cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
if (!cmd->frame)
goto err_exit;
}
return 0;
err_exit:
dev_err(&mhba->pdev->dev,
"failed to allocate memory for cmd[0x%x].\n", i);
while (!list_empty(&mhba->cmd_pool)) {
cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
queue_pointer);
list_del(&cmd->queue_pointer);
if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
kfree(cmd->frame);
kfree(cmd);
}
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 172 | 77.48% | 1 | 50.00% |
Shun Fu | 50 | 22.52% | 1 | 50.00% |
Total | 222 | 100.00% | 2 | 100.00% |
static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
{
unsigned int ib_rp_reg;
struct mvumi_hw_regs *regs = mhba->regs;
ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
((ib_rp_reg & regs->cl_pointer_toggle)
!= (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
return 0;
}
if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
return 0;
} else {
return mhba->max_io - atomic_read(&mhba->fw_outstanding);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 83 | 57.24% | 1 | 50.00% |
Shun Fu | 62 | 42.76% | 1 | 50.00% |
Total | 145 | 100.00% | 2 | 100.00% |
static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
{
unsigned int count;
if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
return 0;
count = ioread32(mhba->ib_shadow);
if (count == 0xffff)
return 0;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shun Fu | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
{
unsigned int cur_ib_entry;
cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
cur_ib_entry++;
if (cur_ib_entry >= mhba->list_num_io) {
cur_ib_entry -= mhba->list_num_io;
mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
}
mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
*ib_entry = mhba->ib_list + cur_ib_entry *
sizeof(struct mvumi_dyn_list_entry);
} else {
*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
}
atomic_inc(&mhba->fw_outstanding);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shun Fu | 99 | 73.33% | 1 | 50.00% |
Jianyun Li | 36 | 26.67% | 1 | 50.00% |
Total | 135 | 100.00% | 2 | 100.00% |
static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
{
iowrite32(0xffff, mhba->ib_shadow);
iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 29 | 87.88% | 1 | 50.00% |
Shun Fu | 4 | 12.12% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
{
unsigned short tag, request_id;
udelay(1);
p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
request_id = p_outb_frame->request_id;
tag = p_outb_frame->tag;
if (tag > mhba->tag_pool.size) {
dev_err(&mhba->pdev->dev, "ob frame data error\n");
return -1;
}
if (mhba->tag_cmd[tag] == NULL) {
dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
return -1;
} else if (mhba->tag_cmd[tag]->request_id != request_id &&
mhba->request_id_enabled) {
dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
"cmd request ID:0x%x\n", request_id,
mhba->tag_cmd[tag]->request_id);
return -1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 165 | 100.00% | 1 | 100.00% |
Total | 165 | 100.00% | 1 | 100.00% |
static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
unsigned int *cur_obf, unsigned int *assign_obf_end)
{
unsigned int ob_write, ob_write_shadow;
struct mvumi_hw_regs *regs = mhba->regs;
do {
ob_write = ioread32(regs->outb_copy_pointer);
ob_write_shadow = ioread32(mhba->ob_shadow);
} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
if ((ob_write & regs->cl_pointer_toggle) !=
(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
*assign_obf_end += mhba->list_num_io;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jianyun Li | 74 | 58.73% | 1 | 50.00% |
Shun Fu | 52 | 41.27% | 1 | 50.00% |
Total | 126 | 100.00% | 2 | 100.00% |
static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
unsigned int *cur_obf, unsigned int *assign_obf_end)
{
unsigned int ob_write;
struct mvumi_hw_regs *regs = mhba->regs;
ob_write = ioread32(regs->outb_read_pointer);
ob_write = ioread32(regs->outb_copy_pointer);
*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
if (*assign_obf_end < *cur_obf)
*assign_obf_end += mhba->list_num_io;
else if (*assign_obf_end == *cur_obf)
return -1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shun Fu | 107 | 100.00% | 1 | 100.00% |
Total | 107 | 100.00% | 1 | 100.00% |
static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
{
unsigned int cur_obf, assign_obf_end, i;
struct mvumi_ob_data *ob_data;
struct mvumi_rsp_frame *p_outb_frame;
struct mvumi_hw_regs *regs = mhba->regs;
if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
return;
for (i = (assign_obf_end - cur_obf); i != 0; i--) {
cur_obf++;
if (cur_obf >= mhba->list_num_io) {
cur_obf -= mhba->list_num_io;
mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
}
p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
/* Copy pointer may point to entry in outbound list
* before entry has valid data
*/
if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
mhba->tag_cmd[p_outb_frame->tag] == NULL ||
p_outb_frame->request_id !=
mhba->tag_cmd[p_outb_frame->tag]->request_id))
if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
continue;
if (!list_empty(&mhba->ob_data_list)) {
ob_data = (struct mvumi_ob_data *)
list_first_entry(&mhba->ob_data_list,
struct mvumi_ob_data, list);
list_del_init(&ob_data->list);
} else {
ob_data = NULL;
if (cur_obf == 0) {
cur_obf = mhba->list_num_io - 1;
mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
} else
cur_obf -= 1;
break;
}
memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
p_outb_frame->tag = 0xff;
list_add_tail(&ob_data->list, &mhba->free_ob_list);
}
mhba->ob_cur_slot &= ~