Release 4.11 drivers/net/usb/asix_common.c
/*
* ASIX AX8817X based USB 2.0 Ethernet Devices
* Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2006 James Painter <jamie.painter@iname.com>
* Copyright (c) 2002-2003 TiVo Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
if (!in_pm)
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
if (unlikely(ret < 0))
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
index, ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 65 | 52.42% | 1 | 14.29% |
David Brownell | 39 | 31.45% | 1 | 14.29% |
Al Viro | 9 | 7.26% | 1 | 14.29% |
Ming Lei | 4 | 3.23% | 1 | 14.29% |
Russ Dill | 4 | 3.23% | 1 | 14.29% |
David T. Hollis | 3 | 2.42% | 2 | 28.57% |
Total | 124 | 100.00% | 7 | 100.00% |
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
if (!in_pm)
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
if (unlikely(ret < 0))
netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
index, ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 78 | 62.40% | 1 | 25.00% |
David Brownell | 44 | 35.20% | 1 | 25.00% |
Ming Lei | 2 | 1.60% | 1 | 25.00% |
David T. Hollis | 1 | 0.80% | 1 | 25.00% |
Total | 125 | 100.00% | 4 | 100.00% |
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
usbnet_write_cmd_async(dev, cmd,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 43 | 91.49% | 2 | 50.00% |
Ming Lei | 3 | 6.38% | 1 | 25.00% |
Christian Riesch | 1 | 2.13% | 1 | 25.00% |
Total | 47 | 100.00% | 4 | 100.00% |
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx)
{
int offset = 0;
u16 size;
/* When an Ethernet frame spans multiple URB socket buffers,
* do a sanity test for the Data header synchronisation.
* Attempt to detect the situation of the previous socket buffer having
* been truncated or a socket buffer was missing. These situations
* cause a discontinuity in the data stream and therefore need to avoid
* appending bad data to the end of the current netdev socket buffer.
* Also avoid unnecessarily discarding a good current netdev socket
* buffer.
*/
if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
offset = ((rx->remaining + 1) & 0xfffe);
rx->header = get_unaligned_le32(skb->data + offset);
offset = 0;
size = (u16)(rx->header & 0x7ff);
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
rx->remaining);
if (rx->ax_skb) {
kfree_skb(rx->ax_skb);
rx->ax_skb = NULL;
/* Discard the incomplete netdev Ethernet frame
* and assume the Data header is at the start of
* the current URB socket buffer.
*/
}
rx->remaining = 0;
}
}
while (offset + sizeof(u16) <= skb->len) {
u16 copy_length;
unsigned char *data;
if (!rx->remaining) {
if (skb->len - offset == sizeof(u16)) {
rx->header = get_unaligned_le16(
skb->data + offset);
rx->split_head = true;
offset += sizeof(u16);
break;
}
if (rx->split_head == true) {
rx->header |= (get_unaligned_le16(
skb->data + offset) << 16);
rx->split_head = false;
offset += sizeof(u16);
} else {
rx->header = get_unaligned_le32(skb->data +
offset);
offset += sizeof(u32);
}
/* take frame length from Data header 32-bit word */
size = (u16)(rx->header & 0x7ff);
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
rx->header, offset);
return 0;
}
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
return 0;
}
/* Sometimes may fail to get a netdev socket buffer but
* continue to process the URB socket buffer so that
* synchronisation of the Ethernet frame Data header
* word is maintained.
*/
rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
rx->remaining = size;
}
if (rx->remaining > skb->len - offset) {
copy_length = skb->len - offset;
rx->remaining -= copy_length;
} else {
copy_length = rx->remaining;
rx->remaining = 0;
}
if (rx->ax_skb) {
data = skb_put(rx->ax_skb, copy_length);
memcpy(data, skb->data + offset, copy_length);
if (!rx->remaining)
usbnet_skb_return(dev, rx->ax_skb);
}
offset += (copy_length + 1) & 0xfffe;
}
if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
skb->len, offset);
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dean Jenkins | 190 | 36.12% | 5 | 29.41% |
Lucas Stach | 155 | 29.47% | 1 | 5.88% |
David T. Hollis | 100 | 19.01% | 2 | 11.76% |
Eric Dumazet | 50 | 9.51% | 2 | 11.76% |
Joe Perches | 9 | 1.71% | 1 | 5.88% |
Jussi Kivilinna | 7 | 1.33% | 1 | 5.88% |
Neil Jones | 5 | 0.95% | 1 | 5.88% |
Holger Eitzenberger | 4 | 0.76% | 1 | 5.88% |
Marek Vašut | 3 | 0.57% | 1 | 5.88% |
Arnaldo Carvalho de Melo | 2 | 0.38% | 1 | 5.88% |
Stephen Hemminger | 1 | 0.19% | 1 | 5.88% |
Total | 526 | 100.00% | 17 | 100.00% |
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
{
struct asix_common_private *dp = dev->driver_priv;
struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
return asix_rx_fixup_internal(dev, skb, rx);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lucas Stach | 44 | 100.00% | 1 | 100.00% |
Total | 44 | 100.00% | 1 | 100.00% |
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
int padlen;
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
u32 packet_len;
u32 padbytes = 0xffff0000;
padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
/* We need to push 4 bytes in front of frame (packet_len)
* and maybe add 4 bytes after the end (if padlen is 4)
*
* Avoid skb_copy_expand() expensive call, using following rules :
* - We are allowed to push 4 bytes in headroom if skb_header_cloned()
* is false (and if we have 4 bytes of headroom)
* - We are allowed to put 4 bytes at tail if skb_cloned()
* is false (and if we have 4 bytes of tailroom)
*
* TCP packets for example are cloned, but skb_header_release()
* was called in tcp stack, allowing us to use headroom for our needs.
*/
if (!skb_header_cloned(skb) &&
!(padlen && skb_cloned(skb)) &&
headroom + tailroom >= 4 + padlen) {
/* following should not happen, but better be safe */
if (headroom < 4 ||
tailroom < padlen) {
skb->data = memmove(skb->head + 4, skb->data, skb->len);
skb_set_tail_pointer(skb, skb->len);
}
} else {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, 4, padlen, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
skb_push(skb, 4);
cpu_to_le32s(&packet_len);
skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
if (padlen) {
cpu_to_le32s(&padbytes);
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
}
usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 158 | 57.45% | 2 | 22.22% |
David Brownell | 74 | 26.91% | 1 | 11.11% |
Eric Dumazet | 17 | 6.18% | 1 | 11.11% |
Ben Hutchings | 9 | 3.27% | 2 | 22.22% |
Ingo van Lil | 9 | 3.27% | 1 | 11.11% |
Arnaldo Carvalho de Melo | 8 | 2.91% | 2 | 22.22% |
Total | 275 | 100.00% | 9 | 100.00% |
int asix_set_sw_mii(struct usbnet *dev, int in_pm)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to enable software MII access\n");
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 31 | 58.49% | 1 | 25.00% |
David Brownell | 13 | 24.53% | 1 | 25.00% |
Robert Foss | 5 | 9.43% | 1 | 25.00% |
Joe Perches | 4 | 7.55% | 1 | 25.00% |
Total | 53 | 100.00% | 4 | 100.00% |
int asix_set_hw_mii(struct usbnet *dev, int in_pm)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to enable hardware MII access\n");
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Brownell | 23 | 43.40% | 1 | 25.00% |
David T. Hollis | 21 | 39.62% | 1 | 25.00% |
Robert Foss | 5 | 9.43% | 1 | 25.00% |
Joe Perches | 4 | 7.55% | 1 | 25.00% |
Total | 53 | 100.00% | 4 | 100.00% |
int asix_read_phy_addr(struct usbnet *dev, int internal)
{
int offset = (internal ? 1 : 0);
u8 buf[2];
int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
if (ret < 0) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
*((__le16 *)buf));
ret = buf[offset];
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 40 | 36.04% | 1 | 16.67% |
Al Viro | 21 | 18.92% | 1 | 16.67% |
David Brownell | 20 | 18.02% | 1 | 16.67% |
Christian Riesch | 16 | 14.41% | 1 | 16.67% |
Joe Perches | 12 | 10.81% | 1 | 16.67% |
Robert Foss | 2 | 1.80% | 1 | 16.67% |
Total | 111 | 100.00% | 6 | 100.00% |
int asix_get_phy_addr(struct usbnet *dev)
{
/* return the address of the internal phy */
return asix_read_phy_addr(dev, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christian Riesch | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 35 | 60.34% | 1 | 25.00% |
David Brownell | 14 | 24.14% | 1 | 25.00% |
Robert Foss | 5 | 8.62% | 1 | 25.00% |
Joe Perches | 4 | 6.90% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
u16 asix_read_rx_ctl(struct usbnet *dev, int in_pm)
{
__le16 v;
int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v, in_pm);
if (ret < 0) {
netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
goto out;
}
ret = le16_to_cpu(v);
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 25 | 35.21% | 1 | 20.00% |
David Brownell | 23 | 32.39% | 1 | 20.00% |
Al Viro | 14 | 19.72% | 1 | 20.00% |
Robert Foss | 5 | 7.04% | 1 | 20.00% |
Joe Perches | 4 | 5.63% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
mode, ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 40 | 56.34% | 2 | 40.00% |
David Brownell | 18 | 25.35% | 1 | 20.00% |
Joe Perches | 8 | 11.27% | 1 | 20.00% |
Robert Foss | 5 | 7.04% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
u16 asix_read_medium_status(struct usbnet *dev, int in_pm)
{
__le16 v;
int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS,
0, 0, 2, &v, in_pm);
if (ret < 0) {
netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
ret);
return ret; /* TODO: callers not checking for error ret */
}
return le16_to_cpu(v);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Brownell | 21 | 31.82% | 1 | 14.29% |
David T. Hollis | 19 | 28.79% | 2 | 28.57% |
Al Viro | 12 | 18.18% | 1 | 14.29% |
Robert Foss | 5 | 7.58% | 1 | 14.29% |
Grant Grundler | 5 | 7.58% | 1 | 14.29% |
Joe Perches | 4 | 6.06% | 1 | 14.29% |
Total | 66 | 100.00% | 7 | 100.00% |
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE,
mode, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
mode, ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 32 | 45.07% | 2 | 40.00% |
David Brownell | 26 | 36.62% | 1 | 20.00% |
Joe Perches | 8 | 11.27% | 1 | 20.00% |
Robert Foss | 5 | 7.04% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
value, ret);
if (sleep)
msleep(sleep);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 61 | 73.49% | 1 | 25.00% |
David Brownell | 9 | 10.84% | 1 | 25.00% |
Joe Perches | 8 | 9.64% | 1 | 25.00% |
Robert Foss | 5 | 6.02% | 1 | 25.00% |
Total | 83 | 100.00% | 4 | 100.00% |
/*
* AX88772 & AX88178 have a 16-bit RX_CTL value
*/
void asix_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
u16 rx_ctl = AX_DEFAULT_RX_CTL;
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
netdev_for_each_mc_addr(ha, net) {
crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
AX_MCAST_FILTER_SIZE, data->multi_filter);
rx_ctl |= AX_RX_CTL_AM;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 123 | 66.49% | 1 | 20.00% |
David Brownell | 46 | 24.86% | 1 | 20.00% |
Jiri Pirko | 16 | 8.65% | 3 | 60.00% |
Total | 185 | 100.00% | 5 | 100.00% |
int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
u8 smsr;
int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
(__u16)loc, 2, &res, 0);
asix_set_hw_mii(dev, 0);
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 101 | 48.33% | 2 | 25.00% |
David T. Hollis | 48 | 22.97% | 1 | 12.50% |
David Brownell | 29 | 13.88% | 1 | 12.50% |
Arnd Bergmann | 16 | 7.66% | 1 | 12.50% |
Guenter Roeck | 10 | 4.78% | 1 | 12.50% |
Joe Perches | 4 | 1.91% | 1 | 12.50% |
Al Viro | 1 | 0.48% | 1 | 12.50% |
Total | 209 | 100.00% | 8 | 100.00% |
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
u8 smsr;
int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
if (ret == -ENODEV)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV) {
mutex_unlock(&dev->phy_mutex);
return;
}
asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
(__u16)loc, 2, &res, 0);
asix_set_hw_mii(dev, 0);
mutex_unlock(&dev->phy_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 99 | 50.51% | 2 | 25.00% |
David T. Hollis | 48 | 24.49% | 1 | 12.50% |
David Brownell | 27 | 13.78% | 1 | 12.50% |
Arnd Bergmann | 16 | 8.16% | 1 | 12.50% |
Joe Perches | 4 | 2.04% | 1 | 12.50% |
Al Viro | 1 | 0.51% | 1 | 12.50% |
Christian Riesch | 1 | 0.51% | 1 | 12.50% |
Total | 196 | 100.00% | 8 | 100.00% |
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
u8 smsr;
int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
(__u16)loc, 2, &res, 1);
asix_set_hw_mii(dev, 1);
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 194 | 92.82% | 2 | 33.33% |
Guenter Roeck | 10 | 4.78% | 1 | 16.67% |
David Brownell | 2 | 0.96% | 1 | 16.67% |
David T. Hollis | 2 | 0.96% | 1 | 16.67% |
Christian Riesch | 1 | 0.48% | 1 | 16.67% |
Total | 209 | 100.00% | 6 | 100.00% |
void
asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
u8 smsr;
int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
if (ret == -ENODEV)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
if (ret == -ENODEV) {
mutex_unlock(&dev->phy_mutex);
return;
}
asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
(__u16)loc, 2, &res, 1);
asix_set_hw_mii(dev, 1);
mutex_unlock(&dev->phy_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Foss | 196 | 100.00% | 2 | 100.00% |
Total | 196 | 100.00% | 2 | 100.00% |
void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
u8 opt;
if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE,
0, 0, 1, &opt, 0) < 0) {
wolinfo->supported = 0;
wolinfo->wolopts = 0;
return;
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
if (opt & AX_MONITOR_LINK)
wolinfo->wolopts |= WAKE_PHY;
if (opt & AX_MONITOR_MAGIC)
wolinfo->wolopts |= WAKE_MAGIC;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 36 | 34.95% | 2 | 40.00% |
David Brownell | 28 | 27.18% | 1 | 20.00% |
Allan Chou | 24 | 23.30% | 1 | 20.00% |
Robert Foss | 15 | 14.56% | 1 | 20.00% |
Total | 103 | 100.00% | 5 | 100.00% |
int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
opt |= AX_MONITOR_MAGIC;
if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
opt, 0, 0, NULL, 0) < 0)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 48 | 58.54% | 2 | 33.33% |
David Brownell | 30 | 36.59% | 1 | 16.67% |
Robert Foss | 2 | 2.44% | 1 | 16.67% |
Al Viro | 1 | 1.22% | 1 | 16.67% |
Christian Riesch | 1 | 1.22% | 1 | 16.67% |
Total | 82 | 100.00% | 6 | 100.00% |
int asix_get_eeprom_len(struct net_device *net)
{
return AX_EEPROM_LEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 12 | 92.31% | 1 | 50.00% |
Christian Riesch | 1 | 7.69% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct usbnet *dev = netdev_priv(net);
u16 *eeprom_buff;
int first_word, last_word;
int i;
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = AX_EEPROM_MAGIC;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
/* ax8817x returns 2 bytes from eeprom on read */
for (i = first_word; i <= last_word; i++) {
if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
&eeprom_buff[i - first_word], 0) < 0) {
kfree(eeprom_buff);
return -EIO;
}
}
memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
kfree(eeprom_buff);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christian Riesch | 104 | 52.79% | 1 | 33.33% |
David T. Hollis | 91 | 46.19% | 1 | 33.33% |
Robert Foss | 2 | 1.02% | 1 | 33.33% |
Total | 197 | 100.00% | 3 | 100.00% |
int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct usbnet *dev = netdev_priv(net);
u16 *eeprom_buff;
int first_word, last_word;
int i;
int ret;
netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
eeprom->len, eeprom->offset, eeprom->magic);
if (eeprom->len == 0)
return -EINVAL;
if (eeprom->magic != AX_EEPROM_MAGIC)
return -EINVAL;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
/* align data to 16 bit boundaries, read the missing data from
the EEPROM */
if (eeprom->offset & 1) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
&eeprom_buff[0], 0);
if (ret < 0) {
netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
goto free;
}
}
if ((eeprom->offset + eeprom->len) & 1) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
&eeprom_buff[last_word - first_word], 0);
if (ret < 0) {
netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
goto free;
}
}
memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
/* write data to EEPROM */
ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to enable EEPROM write\n");
goto free;
}
msleep(20);
for (i = first_word; i <= last_word; i++) {
netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
i, eeprom_buff[i - first_word]);
ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
eeprom_buff[i - first_word], 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
i);
goto free;
}
msleep(20);
}
ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to disable EEPROM write\n");
goto free;
}
ret = 0;
free:
kfree(eeprom_buff);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christian Riesch | 442 | 97.79% | 1 | 50.00% |
Robert Foss | 10 | 2.21% | 1 | 50.00% |
Total | 452 | 100.00% | 2 | 100.00% |
void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 48 | 87.27% | 1 | 33.33% |
Jiri Pirko | 6 | 10.91% | 1 | 33.33% |
Grant Grundler | 1 | 1.82% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
int asix_set_mac_address(struct net_device *net, void *p)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
struct sockaddr *addr = p;
if (netif_running(net))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
* to avoid allocating memory that
* is tricky to free later */
memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
data->mac_addr);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jussi Kivilinna | 118 | 100.00% | 1 | 100.00% |
Total | 118 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David T. Hollis | 1066 | 27.93% | 3 | 6.98% |
Robert Foss | 804 | 21.06% | 2 | 4.65% |
Christian Riesch | 587 | 15.38% | 4 | 9.30% |
David Brownell | 488 | 12.78% | 1 | 2.33% |
Lucas Stach | 199 | 5.21% | 1 | 2.33% |
Dean Jenkins | 190 | 4.98% | 5 | 11.63% |
Jussi Kivilinna | 125 | 3.27% | 2 | 4.65% |
Joe Perches | 73 | 1.91% | 1 | 2.33% |
Eric Dumazet | 67 | 1.76% | 3 | 6.98% |
Al Viro | 59 | 1.55% | 1 | 2.33% |
Arnd Bergmann | 32 | 0.84% | 1 | 2.33% |
Allan Chou | 24 | 0.63% | 1 | 2.33% |
Jiri Pirko | 22 | 0.58% | 4 | 9.30% |
Guenter Roeck | 20 | 0.52% | 1 | 2.33% |
Arnaldo Carvalho de Melo | 10 | 0.26% | 2 | 4.65% |
Ingo van Lil | 9 | 0.24% | 1 | 2.33% |
Ben Hutchings | 9 | 0.24% | 2 | 4.65% |
Ming Lei | 9 | 0.24% | 1 | 2.33% |
Grant Grundler | 6 | 0.16% | 1 | 2.33% |
Neil Jones | 5 | 0.13% | 1 | 2.33% |
Holger Eitzenberger | 4 | 0.10% | 1 | 2.33% |
Russ Dill | 4 | 0.10% | 1 | 2.33% |
Marek Vašut | 3 | 0.08% | 1 | 2.33% |
Stephen Hemminger | 1 | 0.03% | 1 | 2.33% |
Jeff Kirsher | 1 | 0.03% | 1 | 2.33% |
Total | 3817 | 100.00% | 43 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.