Release 4.11 drivers/net/wireless/ti/wlcore/rx.c
/*
* This file is part of wl1271
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/gfp.h>
#include <linux/sched.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
#include "rx.h"
#include "tx.h"
#include "io.h"
#include "hw_ops.h"
/*
* TODO: this is here just for now, it must be removed when the data
* operations are in place.
*/
#include "../wl12xx/reg.h"
static u32 wlcore_rx_get_buf_size(struct wl1271 *wl,
u32 rx_pkt_desc)
{
if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >>
ALIGNED_RX_BUF_SIZE_SHIFT;
return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arik Nemtsov | 22 | 55.00% | 1 | 50.00% |
Luciano Coelho | 18 | 45.00% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
{
if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE);
return pkt_len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arik Nemtsov | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
u8 beacon)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
status->band = NL80211_BAND_2GHZ;
else
status->band = NL80211_BAND_5GHZ;
status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
/* 11n support */
if (desc->rate <= wl->hw_min_ht_rate)
status->flag |= RX_FLAG_HT;
/*
* Read the signal level and antenna diversity indication.
* The msb in the signal level is always set as it is a
* negative number.
* The antenna indication is the msb of the rssi.
*/
status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
/*
* FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
* need to divide by two for now, but TI has been discussing about
* changing it. This needs to be rechecked.
*/
wl->noise = desc->rssi - (desc->snr >> 1);
status->freq = ieee80211_channel_to_frequency(desc->channel,
status->band);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK;
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) {
status->flag |= RX_FLAG_MMIC_ERROR;
wl1271_warning("Michael MIC error. Desc: 0x%x",
desc_err_code);
}
}
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luciano Coelho | 91 | 38.40% | 1 | 6.25% |
Guy Mishol | 28 | 11.81% | 2 | 12.50% |
Arik Nemtsov | 26 | 10.97% | 3 | 18.75% |
Teemu Paasikivi | 25 | 10.55% | 3 | 18.75% |
Victor Goldenshtein | 20 | 8.44% | 1 | 6.25% |
John W. Linville | 17 | 7.17% | 1 | 6.25% |
Shahar Levi | 14 | 5.91% | 1 | 6.25% |
Juuso Oikarinen | 13 | 5.49% | 2 | 12.50% |
Johannes Berg | 2 | 0.84% | 1 | 6.25% |
Bruno Randolf | 1 | 0.42% | 1 | 6.25% |
Total | 237 | 100.00% | 16 | 100.00% |
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
enum wl_rx_buf_align rx_align, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
u8 reserved = 0, offset_to_data = 0;
u16 seq_num;
u32 pkt_data_len;
/*
* In PLT mode we seem to get frames and mac80211 warns about them,
* workaround this by not retrieving them at all.
*/
if (unlikely(wl->plt))
return -EINVAL;
pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length);
if (!pkt_data_len) {
wl1271_error("Invalid packet arrived from HW. length %d",
length);
return -EINVAL;
}
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
reserved = RX_BUF_ALIGN;
else if (rx_align == WLCORE_RX_BUF_PADDED)
offset_to_data = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) {
size_t len = length - sizeof(*desc);
wl12xx_copy_fwlog(wl, data + sizeof(*desc), len);
return 0;
}
/* discard corrupted packets */
if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) {
hdr = (void *)(data + sizeof(*desc) + offset_to_data);
wl1271_warning("corrupted packet in RX: status: 0x%x len: %d",
desc->status & WL1271_RX_DESC_STATUS_MASK,
pkt_data_len);
wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc),
min(pkt_data_len,
ieee80211_hdrlen(hdr->frame_control)));
return -EINVAL;
}
/* skb length not including rx descriptor */
skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
return -ENOMEM;
}
/* reserve the unaligned payload(if any) */
skb_reserve(skb, reserved);
buf = skb_put(skb, pkt_data_len);
/*
* Copy packets from aggregation buffer to the skbs without rx
* descriptor and with packet payload aligned care. In case of unaligned
* packets copy the packets in offset of 2 bytes guarantee IP header
* payload aligned to 4 bytes.
*/
memcpy(buf, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
skb_pull(skb, RX_BUF_ALIGN);
*hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
seq_num, *hlid);
skb_queue_tail(&wl->deferred_rx_queue, skb);
queue_work(wl->freezable_wq, &wl->netstack_work);
return is_data;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arik Nemtsov | 171 | 36.46% | 5 | 23.81% |
Luciano Coelho | 116 | 24.73% | 1 | 4.76% |
Eliad Peller | 77 | 16.42% | 8 | 38.10% |
Ido Yariv | 69 | 14.71% | 3 | 14.29% |
Shahar Levi | 22 | 4.69% | 1 | 4.76% |
Kalle Valo | 9 | 1.92% | 1 | 4.76% |
Luis R. Rodriguez | 3 | 0.64% | 1 | 4.76% |
Eyal Shapira | 2 | 0.43% | 1 | 4.76% |
Total | 469 | 100.00% | 21 | 100.00% |
int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
u32 rx_counter;
u32 pkt_len, align_pkt_len;
u32 pkt_offset, des;
u8 hlid;
enum wl_rx_buf_align rx_align;
int ret = 0;
/* update rates per link */
hlid = status->counters.hlid;
if (hlid < WLCORE_MAX_LINKS)
wl->links[hlid].fw_rate_mbps =
status->counters.tx_last_rate_mbps;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
rx_counter = drv_rx_counter;
while (rx_counter != fw_rx_counter) {
des = le32_to_cpu(status->rx_pkt_descs[rx_counter]);
pkt_len = wlcore_rx_get_buf_size(wl, des);
align_pkt_len = wlcore_rx_get_align_buf_size(wl,
pkt_len);
if (buf_size + align_pkt_len > wl->aggr_buf_size)
break;
buf_size += align_pkt_len;
rx_counter++;
rx_counter %= wl->num_rx_desc;
}
if (buf_size == 0) {
wl1271_warning("received empty data");
break;
}
/* Read all available packets at once */
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
ret = wlcore_hw_prepare_read(wl, des, buf_size);
if (ret < 0)
goto out;
ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
buf_size, true);
if (ret < 0)
goto out;
/* Split data into separate packets */
pkt_offset = 0;
while (pkt_offset < buf_size) {
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
pkt_len = wlcore_rx_get_buf_size(wl, des);
rx_align = wlcore_hw_get_rx_buf_align(wl, des);
/*
* the handle data call can only fail in memory-outage
* conditions, in that case the received frame will just
* be dropped.
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
"hlid (%d) exceeded MAX_LINKS\n",
hlid);
}
wl->rx_counter++;
drv_rx_counter++;
drv_rx_counter %= wl->num_rx_desc;
pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
}
}
/*
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
wl->rx_counter);
if (ret < 0)
goto out;
}
wl12xx_rearm_rx_streaming(wl, active_hlids);
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ido Yariv | 141 | 33.49% | 5 | 20.83% |
Luciano Coelho | 113 | 26.84% | 6 | 25.00% |
Arik Nemtsov | 77 | 18.29% | 3 | 12.50% |
Eliad Peller | 48 | 11.40% | 5 | 20.83% |
Maxim Altshul | 30 | 7.13% | 1 | 4.17% |
Shahar Levi | 7 | 1.66% | 1 | 4.17% |
Igal Chernobelsky | 3 | 0.71% | 1 | 4.17% |
Juuso Oikarinen | 2 | 0.48% | 2 | 8.33% |
Total | 421 | 100.00% | 24 | 100.00% |
#ifdef CONFIG_PM
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter)
{
int ret;
if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
}
ret = wl1271_acx_set_rx_filter(wl, index, enable, filter);
if (ret) {
wl1271_error("Failed to %s rx data filter %d (err=%d)",
enable ? "enable" : "disable", index, ret);
return ret;
}
if (enable)
__set_bit(index, wl->rx_filter_enabled);
else
__clear_bit(index, wl->rx_filter_enabled);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eyal Shapira | 90 | 78.26% | 1 | 50.00% |
Nadim Zubidat | 25 | 21.74% | 1 | 50.00% |
Total | 115 | 100.00% | 2 | 100.00% |
int wl1271_rx_filter_clear_all(struct wl1271 *wl)
{
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
goto out;
}
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eyal Shapira | 46 | 65.71% | 1 | 33.33% |
Arik Nemtsov | 19 | 27.14% | 1 | 33.33% |
Nadim Zubidat | 5 | 7.14% | 1 | 33.33% |
Total | 70 | 100.00% | 3 | 100.00% |
#endif /* CONFIG_PM */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luciano Coelho | 360 | 25.30% | 8 | 13.33% |
Arik Nemtsov | 351 | 24.67% | 9 | 15.00% |
Ido Yariv | 213 | 14.97% | 7 | 11.67% |
Eyal Shapira | 144 | 10.12% | 3 | 5.00% |
Eliad Peller | 127 | 8.92% | 11 | 18.33% |
Shahar Levi | 44 | 3.09% | 3 | 5.00% |
Nadim Zubidat | 30 | 2.11% | 1 | 1.67% |
Maxim Altshul | 30 | 2.11% | 1 | 1.67% |
Guy Mishol | 28 | 1.97% | 2 | 3.33% |
Teemu Paasikivi | 25 | 1.76% | 3 | 5.00% |
Victor Goldenshtein | 20 | 1.41% | 1 | 1.67% |
John W. Linville | 17 | 1.19% | 1 | 1.67% |
Juuso Oikarinen | 15 | 1.05% | 4 | 6.67% |
Kalle Valo | 9 | 0.63% | 1 | 1.67% |
Luis R. Rodriguez | 3 | 0.21% | 1 | 1.67% |
Igal Chernobelsky | 3 | 0.21% | 1 | 1.67% |
Johannes Berg | 2 | 0.14% | 1 | 1.67% |
Bruno Randolf | 1 | 0.07% | 1 | 1.67% |
Tejun Heo | 1 | 0.07% | 1 | 1.67% |
Total | 1423 | 100.00% | 60 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.