Release 4.18 drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
#include "ixgbe.h"
#include "ixgbe_mbx.h"
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (!mbx->ops)
return IXGBE_ERR_MBX;
return mbx->ops->read(hw, msg, size, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 65 | 91.55% | 1 | 33.33% |
Mark D Rustad | 6 | 8.45% | 2 | 66.67% |
Total | 71 | 100.00% | 3 | 100.00% |
/**
* ixgbe_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (size > mbx->size)
return IXGBE_ERR_MBX;
if (!mbx->ops)
return IXGBE_ERR_MBX;
return mbx->ops->write(hw, msg, size, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 59 | 88.06% | 1 | 33.33% |
Mark D Rustad | 8 | 11.94% | 2 | 66.67% |
Total | 67 | 100.00% | 3 | 100.00% |
/**
* ixgbe_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
return IXGBE_ERR_MBX;
return mbx->ops->check_for_msg(hw, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 39 | 86.67% | 1 | 33.33% |
Mark D Rustad | 6 | 13.33% | 2 | 66.67% |
Total | 45 | 100.00% | 3 | 100.00% |
/**
* ixgbe_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
return IXGBE_ERR_MBX;
return mbx->ops->check_for_ack(hw, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 39 | 86.67% | 1 | 33.33% |
Mark D Rustad | 6 | 13.33% | 2 | 66.67% |
Total | 45 | 100.00% | 3 | 100.00% |
/**
* ixgbe_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
return IXGBE_ERR_MBX;
return mbx->ops->check_for_rst(hw, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 39 | 86.67% | 1 | 33.33% |
Mark D Rustad | 6 | 13.33% | 2 | 66.67% |
Total | 45 | 100.00% | 3 | 100.00% |
/**
* ixgbe_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
udelay(mbx->usec_delay);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 73 | 91.25% | 1 | 33.33% |
Mark D Rustad | 7 | 8.75% | 2 | 66.67% |
Total | 80 | 100.00% | 3 | 100.00% |
/**
* ixgbe_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
udelay(mbx->usec_delay);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 73 | 91.25% | 1 | 33.33% |
Mark D Rustad | 7 | 8.75% | 2 | 66.67% |
Total | 80 | 100.00% | 3 | 100.00% |
/**
* ixgbe_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
if (!mbx->ops)
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (ret_val)
return ret_val;
/* if ack received read message */
return mbx->ops->read(hw, msg, size, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 68 | 88.31% | 1 | 25.00% |
Mark D Rustad | 8 | 10.39% | 2 | 50.00% |
Emil Tantilov | 1 | 1.30% | 1 | 25.00% |
Total | 77 | 100.00% | 4 | 100.00% |
/**
* ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
return IXGBE_ERR_MBX;
/* send msg */
ret_val = mbx->ops->write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
/* if msg sent wait until we receive an ack */
return ixgbe_poll_for_ack(hw, mbx_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 75 | 89.29% | 1 | 25.00% |
Mark D Rustad | 8 | 9.52% | 2 | 50.00% |
Emil Tantilov | 1 | 1.19% | 1 | 25.00% |
Total | 84 | 100.00% | 4 | 100.00% |
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
if (mbvficr & mask) {
IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
return 0;
}
return IXGBE_ERR_MBX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 52 | 92.86% | 1 | 50.00% |
Mark D Rustad | 4 | 7.14% | 1 | 50.00% |
Total | 56 | 100.00% | 2 | 100.00% |
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
hw->mbx.stats.reqs++;
return 0;
}
return IXGBE_ERR_MBX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 56 | 93.33% | 1 | 50.00% |
Mark D Rustad | 4 | 6.67% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
hw->mbx.stats.acks++;
return 0;
}
return IXGBE_ERR_MBX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 56 | 93.33% | 1 | 50.00% |
Mark D Rustad | 4 | 6.67% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
u32 vflre = 0;
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
if (vflre & BIT(vf_shift)) {
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
return IXGBE_ERR_MBX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 102 | 76.69% | 2 | 28.57% |
Donald Skidmore | 18 | 13.53% | 2 | 28.57% |
Mark D Rustad | 7 | 5.26% | 2 | 28.57% |
Jacob E Keller | 6 | 4.51% | 1 | 14.29% |
Total | 133 | 100.00% | 7 | 100.00% |
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
/* reserve mailbox for vf use */
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
return 0;
return IXGBE_ERR_MBX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 52 | 94.55% | 1 | 50.00% |
Mark D Rustad | 3 | 5.45% | 1 | 50.00% |
Total | 55 | 100.00% | 2 | 100.00% |
/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
return ret_val;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_pf(hw, vf_number);
ixgbe_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 112 | 96.55% | 1 | 50.00% |
Mark D Rustad | 4 | 3.45% | 1 | 50.00% |
Total | 116 | 100.00% | 2 | 100.00% |
/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
return ret_val;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 97 | 96.04% | 1 | 50.00% |
Mark D Rustad | 4 | 3.96% | 1 | 50.00% |
Total | 101 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
mbx->usec_delay = 0;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
mbx->size = IXGBE_VFMAILBOX_SIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 77 | 63.64% | 1 | 16.67% |
Donald Skidmore | 18 | 14.88% | 2 | 33.33% |
Emil Tantilov | 17 | 14.05% | 1 | 16.67% |
Mark D Rustad | 8 | 6.61% | 1 | 16.67% |
Andy Gospodarek | 1 | 0.83% | 1 | 16.67% |
Total | 121 | 100.00% | 6 | 100.00% |
#endif /* CONFIG_PCI_IOV */
const struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
.write_posted = ixgbe_write_posted_mbx,
.check_for_msg = ixgbe_check_for_msg_pf,
.check_for_ack = ixgbe_check_for_ack_pf,
.check_for_rst = ixgbe_check_for_rst_pf,
};
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Rose | 1200 | 87.34% | 2 | 11.76% |
Mark D Rustad | 102 | 7.42% | 5 | 29.41% |
Donald Skidmore | 43 | 3.13% | 4 | 23.53% |
Emil Tantilov | 19 | 1.38% | 2 | 11.76% |
Jacob E Keller | 6 | 0.44% | 1 | 5.88% |
Jeff Kirsher | 2 | 0.15% | 1 | 5.88% |
Andy Gospodarek | 1 | 0.07% | 1 | 5.88% |
Joe Perches | 1 | 0.07% | 1 | 5.88% |
Total | 1374 | 100.00% | 17 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.