Release 4.13 drivers/net/wireless/ti/wlcore/tx.c
  
  
  
/*
 * This file is part of wl1271
 *
 * Copyright (C) 2009 Nokia Corporation
 *
 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
#include "hw_ops.h"
/*
 * TODO: this is here just for now, it must be removed when the data
 * operations are in place.
 */
#include "../wl12xx/reg.h"
static int wl1271_set_default_wep_key(struct wl1271 *wl,
				      struct wl12xx_vif *wlvif, u8 id)
{
	int ret;
	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
	if (is_ap)
		ret = wl12xx_cmd_set_default_wep_key(wl, id,
						     wlvif->ap.bcast_hlid);
	else
		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
	if (ret < 0)
		return ret;
	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 73 | 79.35% | 2 | 33.33% | 
| Eliad Peller | 19 | 20.65% | 4 | 66.67% | 
| Total | 92 | 100.00% | 6 | 100.00% | 
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{
	int id;
	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
	if (id >= wl->num_tx_desc)
		return -EBUSY;
	__set_bit(id, wl->tx_frames_map);
	wl->tx_frames[id] = skb;
	wl->tx_frames_cnt++;
	return id;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ido Yariv | 40 | 57.14% | 1 | 33.33% | 
| Luciano Coelho | 24 | 34.29% | 1 | 33.33% | 
| Arik Nemtsov | 6 | 8.57% | 1 | 33.33% | 
| Total | 70 | 100.00% | 3 | 100.00% | 
void wl1271_free_tx_id(struct wl1271 *wl, int id)
{
	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
		wl->tx_frames[id] = NULL;
		wl->tx_frames_cnt--;
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ido Yariv | 45 | 71.43% | 2 | 40.00% | 
| Luciano Coelho | 11 | 17.46% | 1 | 20.00% | 
| Juuso Oikarinen | 4 | 6.35% | 1 | 20.00% | 
| Arik Nemtsov | 3 | 4.76% | 1 | 20.00% | 
| Total | 63 | 100.00% | 5 | 100.00% | 
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
						 struct wl12xx_vif *wlvif,
						 struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	hdr = (struct ieee80211_hdr *)(skb->data +
				       sizeof(struct wl1271_tx_hw_descr));
	if (!ieee80211_is_auth(hdr->frame_control))
		return;
	/*
         * add the station to the known list before transmitting the
         * authentication response. this way it won't get de-authed by FW
         * when transmitting too soon.
         */
	wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
	/*
         * ROC for 1 second on the AP channel for completing the connection.
         * Note the ROC will be continued by the update_sta_state callbacks
         * once the station reaches the associated state.
         */
	wlcore_update_inconn_sta(wl, wlvif, NULL, true);
	wlvif->pending_auth_reply_time = jiffies;
	cancel_delayed_work(&wlvif->pending_auth_complete_work);
	ieee80211_queue_delayed_work(wl->hw,
				&wlvif->pending_auth_complete_work,
				msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 109 | 98.20% | 2 | 66.67% | 
| Eliad Peller | 2 | 1.80% | 1 | 33.33% | 
| Total | 111 | 100.00% | 3 | 100.00% | 
static void wl1271_tx_regulate_link(struct wl1271 *wl,
				    struct wl12xx_vif *wlvif,
				    u8 hlid)
{
	bool fw_ps;
	u8 tx_pkts;
	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
		return;
	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
	tx_pkts = wl->links[hlid].allocated_pkts;
	/*
         * if in FW PS and there is enough data in FW we can put the link
         * into high-level PS and clean out its TX queues.
         * Make an exception if this is the only connected link. In this
         * case FW-memory congestion is less of a problem.
         * Note that a single connected STA means 2*ap_count + 1 active links,
         * since we must account for the global and broadcast AP links
         * for each AP. The "fw_ps" check assures us the other link is a STA
         * connected to the AP. Otherwise the FW would not set the PSM bit.
         */
	if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
	    tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
		wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 75 | 76.53% | 5 | 62.50% | 
| Eliad Peller | 23 | 23.47% | 3 | 37.50% | 
| Total | 98 | 100.00% | 8 | 100.00% | 
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
{
	return wl->dummy_packet == skb;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Eliad Peller | 22 | 100.00% | 1 | 100.00% | 
| Total | 22 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL(wl12xx_is_dummy_packet);
static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				struct sk_buff *skb, struct ieee80211_sta *sta)
{
	if (sta) {
		struct wl1271_station *wl_sta;
		wl_sta = (struct wl1271_station *)sta->drv_priv;
		return wl_sta->hlid;
	} else {
		struct ieee80211_hdr *hdr;
		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
			return wl->system_hlid;
		hdr = (struct ieee80211_hdr *)skb->data;
		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
			return wlvif->ap.bcast_hlid;
		else
			return wlvif->ap.global_hlid;
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 74 | 64.35% | 3 | 42.86% | 
| Eliad Peller | 41 | 35.65% | 4 | 57.14% | 
| Total | 115 | 100.00% | 7 | 100.00% | 
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
		      struct sk_buff *skb, struct ieee80211_sta *sta)
{
	struct ieee80211_tx_info *control;
	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
	control = IEEE80211_SKB_CB(skb);
	if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
		wl1271_debug(DEBUG_TX, "tx offchannel");
		return wlvif->dev_hlid;
	}
	return wlvif->sta.hlid;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Eliad Peller | 79 | 91.86% | 6 | 85.71% | 
| Arik Nemtsov | 7 | 8.14% | 1 | 14.29% | 
| Total | 86 | 100.00% | 7 | 100.00% | 
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
					  unsigned int packet_length)
{
	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
	else
		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ido Yariv | 36 | 70.59% | 1 | 25.00% | 
| Ido Reis | 13 | 25.49% | 1 | 25.00% | 
| Arik Nemtsov | 2 | 3.92% | 2 | 50.00% | 
| Total | 51 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL(wlcore_calc_packet_alignment);
static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			      struct sk_buff *skb, u32 extra, u32 buf_offset,
			      u8 hlid, bool is_gem)
{
	struct wl1271_tx_hw_descr *desc;
	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
	u32 total_blocks;
	int id, ret = -EBUSY, ac;
	u32 spare_blocks;
	if (buf_offset + total_len > wl->aggr_buf_size)
		return -EAGAIN;
	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
	/* allocate free identifier for the packet */
	id = wl1271_alloc_tx_id(wl, skb);
	if (id < 0)
		return id;
	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
	if (total_blocks <= wl->tx_blocks_available) {
		desc = skb_push(skb, total_len - skb->len);
		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
					     spare_blocks);
		desc->id = id;
		wl->tx_blocks_available -= total_blocks;
		wl->tx_allocated_blocks += total_blocks;
		/*
                 * If the FW was empty before, arm the Tx watchdog. Also do
                 * this on the first Tx after resume, as we always cancel the
                 * watchdog on suspend.
                 */
		if (wl->tx_allocated_blocks == total_blocks ||
		    test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
			wl12xx_rearm_tx_watchdog_locked(wl);
		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
		wl->tx_allocated_pkts[ac]++;
		if (test_bit(hlid, wl->links_map))
			wl->links[hlid].allocated_pkts++;
		ret = 0;
		wl1271_debug(DEBUG_TX,
			     "tx_allocate: size: %d, blocks: %d, id: %d",
			     total_len, total_blocks, id);
	} else {
		wl1271_free_tx_id(wl, id);
	}
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Luciano Coelho | 132 | 49.44% | 2 | 8.70% | 
| Arik Nemtsov | 88 | 32.96% | 10 | 43.48% | 
| Ido Yariv | 25 | 9.36% | 4 | 17.39% | 
| Eliad Peller | 11 | 4.12% | 3 | 13.04% | 
| Shahar Levi | 5 | 1.87% | 2 | 8.70% | 
| Juuso Oikarinen | 3 | 1.12% | 1 | 4.35% | 
| Igal Chernobelsky | 3 | 1.12% | 1 | 4.35% | 
| Total | 267 | 100.00% | 23 | 100.00% | 
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			       struct sk_buff *skb, u32 extra,
			       struct ieee80211_tx_info *control, u8 hlid)
{
	struct timespec ts;
	struct wl1271_tx_hw_descr *desc;
	int ac, rate_idx;
	s64 hosttime;
	u16 tx_attr = 0;
	__le16 frame_control;
	struct ieee80211_hdr *hdr;
	u8 *frame_start;
	bool is_dummy;
	desc = (struct wl1271_tx_hw_descr *) skb->data;
	frame_start = (u8 *)(desc + 1);
	hdr = (struct ieee80211_hdr *)(frame_start + extra);
	frame_control = hdr->frame_control;
	/* relocate space for security header */
	if (extra) {
		int hdrlen = ieee80211_hdrlen(frame_control);
		memmove(frame_start, hdr, hdrlen);
		skb_set_network_header(skb, skb_network_offset(skb) + extra);
	}
	/* configure packet life time */
	getnstimeofday(&ts);
	hosttime = (timespec_to_ns(&ts) >> 10);
	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
	is_dummy = wl12xx_is_dummy_packet(wl, skb);
	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
	else
		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
	/* queue */
	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
	desc->tid = skb->priority;
	if (is_dummy) {
		/*
                 * FW expects the dummy packet to have an invalid session id -
                 * any session id that is different than the one set in the join
                 */
		tx_attr = (SESSION_COUNTER_INVALID <<
			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
			   TX_HW_ATTR_SESSION_COUNTER;
		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
	} else if (wlvif) {
		u8 session_id = wl->session_ids[hlid];
		if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
		    (wlvif->bss_type == BSS_TYPE_AP_BSS))
			session_id = 0;
		/* configure the tx attributes */
		tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
	}
	desc->hlid = hlid;
	if (is_dummy || !wlvif)
		rate_idx = 0;
	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
		/*
                 * if the packets are data packets
                 * send them with AP rate policies (EAPOLs are an exception),
                 * otherwise use default basic rates
                 */
		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
			rate_idx = wlvif->sta.basic_rate_idx;
		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
			rate_idx = wlvif->sta.p2p_rate_idx;
		else if (ieee80211_is_data(frame_control))
			rate_idx = wlvif->sta.ap_rate_idx;
		else
			rate_idx = wlvif->sta.basic_rate_idx;
	} else {
		if (hlid == wlvif->ap.global_hlid)
			rate_idx = wlvif->ap.mgmt_rate_idx;
		else if (hlid == wlvif->ap.bcast_hlid ||
			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
			 !ieee80211_is_data(frame_control))
			/*
                         * send non-data, bcast and EAPOLs using the
                         * min basic rate
                         */
			rate_idx = wlvif->ap.bcast_rate_idx;
		else
			rate_idx = wlvif->ap.ucast_rate_idx[ac];
	}
	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
	/* for WEP shared auth - no fw encryption is needed */
	if (ieee80211_is_auth(frame_control) &&
	    ieee80211_has_protected(frame_control))
		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
	/* send EAPOL frames as voice */
	if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
		tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
	desc->tx_attr = cpu_to_le16(tx_attr);
	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Eliad Peller | 169 | 31.95% | 12 | 38.71% | 
| Arik Nemtsov | 142 | 26.84% | 7 | 22.58% | 
| Luciano Coelho | 71 | 13.42% | 2 | 6.45% | 
| Juuso Oikarinen | 49 | 9.26% | 2 | 6.45% | 
| Igal Chernobelsky | 37 | 6.99% | 2 | 6.45% | 
| Shahar Levi | 35 | 6.62% | 2 | 6.45% | 
| Eyal Shapira | 12 | 2.27% | 1 | 3.23% | 
| Kalle Valo | 11 | 2.08% | 1 | 3.23% | 
| John W. Linville | 2 | 0.38% | 1 | 3.23% | 
| Ido Yariv | 1 | 0.19% | 1 | 3.23% | 
| Total | 529 | 100.00% | 31 | 100.00% | 
/* caller must hold wl->mutex */
static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
{
	struct ieee80211_tx_info *info;
	u32 extra = 0;
	int ret = 0;
	u32 total_len;
	bool is_dummy;
	bool is_gem = false;
	if (!skb) {
		wl1271_error("discarding null skb");
		return -EINVAL;
	}
	if (hlid == WL12XX_INVALID_LINK_ID) {
		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
		return -EINVAL;
	}
	info = IEEE80211_SKB_CB(skb);
	is_dummy = wl12xx_is_dummy_packet(wl, skb);
	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
	    info->control.hw_key &&
	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
		extra = WL1271_EXTRA_SPACE_TKIP;
	if (info->control.hw_key) {
		bool is_wep;
		u8 idx = info->control.hw_key->hw_key_idx;
		u32 cipher = info->control.hw_key->cipher;
		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
			 (cipher == WLAN_CIPHER_SUITE_WEP104);
		if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
			if (ret < 0)
				return ret;
			wlvif->default_key = idx;
		}
		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
	}
	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
				 is_gem);
	if (ret < 0)
		return ret;
	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
		wl1271_tx_regulate_link(wl, wlvif, hlid);
	}
	/*
         * The length of each packet is stored in terms of
         * words. Thus, we must pad the skb data to make sure its
         * length is aligned.  The number of padding bytes is computed
         * and set in wl1271_tx_fill_hdr.
         * In special cases, we want to align to a specific block size
         * (eg. for wl128x with SDIO we align to 256).
         */
	total_len = wlcore_calc_packet_alignment(wl, skb->len);
	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
	/* Revert side effects in the dummy packet skb, so it can be reused */
	if (is_dummy)
		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
	return total_len;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Luciano Coelho | 140 | 36.94% | 1 | 3.70% | 
| Arik Nemtsov | 119 | 31.40% | 11 | 40.74% | 
| Ido Yariv | 61 | 16.09% | 3 | 11.11% | 
| Eliad Peller | 37 | 9.76% | 6 | 22.22% | 
| Eyal Shapira | 7 | 1.85% | 1 | 3.70% | 
| Juuso Oikarinen | 5 | 1.32% | 1 | 3.70% | 
| Shahar Levi | 5 | 1.32% | 1 | 3.70% | 
| Johannes Berg | 2 | 0.53% | 1 | 3.70% | 
| Victor Goldenshtein | 2 | 0.53% | 1 | 3.70% | 
| Yoni Divinsky | 1 | 0.26% | 1 | 3.70% | 
| Total | 379 | 100.00% | 27 | 100.00% | 
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
				enum nl80211_band rate_band)
{
	struct ieee80211_supported_band *band;
	u32 enabled_rates = 0;
	int bit;
	band = wl->hw->wiphy->bands[rate_band];
	for (bit = 0; bit < band->n_bitrates; bit++) {
		if (rate_set & 0x1)
			enabled_rates |= band->bitrates[bit].hw_value;
		rate_set >>= 1;
	}
	/* MCS rates indication are on bits 16 - 31 */
	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
	for (bit = 0; bit < 16; bit++) {
		if (rate_set & 0x1)
			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
		rate_set >>= 1;
	}
	return enabled_rates;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Juuso Oikarinen | 79 | 62.70% | 1 | 20.00% | 
| Shahar Levi | 40 | 31.75% | 1 | 20.00% | 
| Eliad Peller | 4 | 3.17% | 1 | 20.00% | 
| Arik Nemtsov | 2 | 1.59% | 1 | 20.00% | 
| Johannes Berg | 1 | 0.79% | 1 | 20.00% | 
| Total | 126 | 100.00% | 5 | 100.00% | 
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
	int i;
	struct wl12xx_vif *wlvif;
	wl12xx_for_each_wlvif(wl, wlvif) {
		for (i = 0; i < NUM_TX_QUEUES; i++) {
			if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
					WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
			    wlvif->tx_queue_count[i] <=
					WL1271_TX_QUEUE_LOW_WATERMARK)
				/* firmware buffer has space, restart queues */
				wlcore_wake_queue(wl, wlvif, i,
					WLCORE_QUEUE_STOP_REASON_WATERMARK);
		}
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 49 | 65.33% | 5 | 83.33% | 
| Ido Yariv | 26 | 34.67% | 1 | 16.67% | 
| Total | 75 | 100.00% | 6 | 100.00% | 
static int wlcore_select_ac(struct wl1271 *wl)
{
	int i, q = -1, ac;
	u32 min_pkts = 0xffffffff;
	/*
         * Find a non-empty ac where:
         * 1. There are packets to transmit
         * 2. The FW has the least allocated blocks
         *
         * We prioritize the ACs according to VO>VI>BE>BK
         */
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		ac = wl1271_tx_get_queue(i);
		if (wl->tx_queue_count[ac] &&
		    wl->tx_allocated_pkts[ac] < min_pkts) {
			q = ac;
			min_pkts = wl->tx_allocated_pkts[q];
		}
	}
	return q;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 65 | 76.47% | 3 | 75.00% | 
| Juuso Oikarinen | 20 | 23.53% | 1 | 25.00% | 
| Total | 85 | 100.00% | 4 | 100.00% | 
static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
					  struct wl1271_link *lnk, u8 q)
{
	struct sk_buff *skb;
	unsigned long flags;
	skb = skb_dequeue(&lnk->tx_queue[q]);
	if (skb) {
		spin_lock_irqsave(&wl->wl_lock, flags);
		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
		wl->tx_queue_count[q]--;
		if (lnk->wlvif) {
			WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
			lnk->wlvif->tx_queue_count[q]--;
		}
		spin_unlock_irqrestore(&wl->wl_lock, flags);
	}
	return skb;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 76 | 61.29% | 5 | 71.43% | 
| Juuso Oikarinen | 42 | 33.87% | 1 | 14.29% | 
| Eliad Peller | 6 | 4.84% | 1 | 14.29% | 
| Total | 124 | 100.00% | 7 | 100.00% | 
static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
						    u8 hlid, u8 ac,
						    u8 *low_prio_hlid)
{
	struct wl1271_link *lnk = &wl->links[hlid];
	if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
		if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
		    !skb_queue_empty(&lnk->tx_queue[ac]) &&
		    wlcore_hw_lnk_low_prio(wl, hlid, lnk))
			/* we found the first non-empty low priority queue */
			*low_prio_hlid = hlid;
		return NULL;
	}
	return wlcore_lnk_dequeue(wl, lnk, ac);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 96 | 98.97% | 3 | 75.00% | 
| Juuso Oikarinen | 1 | 1.03% | 1 | 25.00% | 
| Total | 97 | 100.00% | 4 | 100.00% | 
static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
						    struct wl12xx_vif *wlvif,
						    u8 ac, u8 *hlid,
						    u8 *low_prio_hlid)
{
	struct sk_buff *skb = NULL;
	int i, h, start_hlid;
	/* start from the link after the last one */
	start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
	/* dequeue according to AC, round robin on each link */
	for (i = 0; i < wl->num_links; i++) {
		h = (start_hlid + i) % wl->num_links;
		/* only consider connected stations */
		if (!test_bit(h, wlvif->links_map))
			continue;
		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
						   low_prio_hlid);
		if (!skb)
			continue;
		wlvif->last_tx_hlid = h;
		break;
	}
	if (!skb)
		wlvif->last_tx_hlid = 0;
	*hlid = wlvif->last_tx_hlid;
	return skb;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 111 | 74.50% | 4 | 44.44% | 
| Eliad Peller | 25 | 16.78% | 4 | 44.44% | 
| Juuso Oikarinen | 13 | 8.72% | 1 | 11.11% | 
| Total | 149 | 100.00% | 9 | 100.00% | 
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
{
	unsigned long flags;
	struct wl12xx_vif *wlvif = wl->last_wlvif;
	struct sk_buff *skb = NULL;
	int ac;
	u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
	ac = wlcore_select_ac(wl);
	if (ac < 0)
		goto out;
	/* continue from last wlvif (round robin) */
	if (wlvif) {
		wl12xx_for_each_wlvif_continue(wl, wlvif) {
			if (!wlvif->tx_queue_count[ac])
				continue;
			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
							   &low_prio_hlid);
			if (!skb)
				continue;
			wl->last_wlvif = wlvif;
			break;
		}
	}
	/* dequeue from the system HLID before the restarting wlvif list */
	if (!skb) {
		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
						   ac, &low_prio_hlid);
		if (skb) {
			*hlid = wl->system_hlid;
			wl->last_wlvif = NULL;
		}
	}
	/* Do a new pass over the wlvif list. But no need to continue
         * after last_wlvif. The previous pass should have found it. */
	if (!skb) {
		wl12xx_for_each_wlvif(wl, wlvif) {
			if (!wlvif->tx_queue_count[ac])
				goto next;
			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
							   &low_prio_hlid);
			if (skb) {
				wl->last_wlvif = wlvif;
				break;
			}
next:
			if (wlvif == wl->last_wlvif)
				break;
		}
	}
	/* no high priority skbs found - but maybe a low priority one? */
	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
		struct wl1271_link *lnk = &wl->links[low_prio_hlid];
		skb = wlcore_lnk_dequeue(wl, lnk, ac);
		WARN_ON(!skb); /* we checked this before */
		*hlid = low_prio_hlid;
		/* ensure proper round robin in the vif/link levels */
		wl->last_wlvif = lnk->wlvif;
		if (lnk->wlvif)
			lnk->wlvif->last_tx_hlid = low_prio_hlid;
	}
out:
	if (!skb &&
	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
		int q;
		skb = wl->dummy_packet;
		*hlid = wl->system_hlid;
		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
		spin_lock_irqsave(&wl->wl_lock, flags);
		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
		wl->tx_queue_count[q]--;
		spin_unlock_irqrestore(&wl->wl_lock, flags);
	}
	return skb;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 249 | 64.34% | 7 | 58.33% | 
| Eliad Peller | 76 | 19.64% | 4 | 33.33% | 
| Ido Yariv | 62 | 16.02% | 1 | 8.33% | 
| Total | 387 | 100.00% | 12 | 100.00% | 
static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				  struct sk_buff *skb, u8 hlid)
{
	unsigned long flags;
	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
	if (wl12xx_is_dummy_packet(wl, skb)) {
		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
	} else {
		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
		/* make sure we dequeue the same packet next time */
		wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
				      wl->num_links;
	}
	spin_lock_irqsave(&wl->wl_lock, flags);
	wl->tx_queue_count[q]++;
	if (wlvif)
		wlvif->tx_queue_count[q]++;
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 108 | 77.14% | 4 | 44.44% | 
| Ido Yariv | 19 | 13.57% | 1 | 11.11% | 
| Eliad Peller | 13 | 9.29% | 4 | 44.44% | 
| Total | 140 | 100.00% | 9 | 100.00% | 
static bool wl1271_tx_is_data_present(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
	return ieee80211_is_data_present(hdr->frame_control);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Eliad Peller | 35 | 100.00% | 1 | 100.00% | 
| Total | 35 | 100.00% | 1 | 100.00% | 
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
{
	struct wl12xx_vif *wlvif;
	u32 timeout;
	u8 hlid;
	if (!wl->conf.rx_streaming.interval)
		return;
	if (!wl->conf.rx_streaming.always &&
	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
		return;
	timeout = wl->conf.rx_streaming.duration;
	wl12xx_for_each_wlvif_sta(wl, wlvif) {
		bool found = false;
		for_each_set_bit(hlid, active_hlids, wl->num_links) {
			if (test_bit(hlid, wlvif->links_map)) {
				found  = true;
				break;
			}
		}
		if (!found)
			continue;
		/* enable rx streaming */
		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
			ieee80211_queue_work(wl->hw,
					     &wlvif->rx_streaming_enable_work);
		mod_timer(&wlvif->rx_streaming_timer,
			  jiffies + msecs_to_jiffies(timeout));
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Eliad Peller | 159 | 100.00% | 3 | 100.00% | 
| Total | 159 | 100.00% | 3 | 100.00% | 
/*
 * Returns failure values only in case of failed bus ops within this function.
 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
 * triggering recovery by higher layers when not necessary.
 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
 * within prepare_tx_frame code but there's nothing we should do about those
 * as well.
 */
int wlcore_tx_work_locked(struct wl1271 *wl)
{
	struct wl12xx_vif *wlvif;
	struct sk_buff *skb;
	struct wl1271_tx_hw_descr *desc;
	u32 buf_offset = 0, last_len = 0;
	bool sent_packets = false;
	unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
	int ret = 0;
	int bus_ret = 0;
	u8 hlid;
	if (unlikely(wl->state != WLCORE_STATE_ON))
		return 0;
	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
		bool has_data = false;
		wlvif = NULL;
		if (!wl12xx_is_dummy_packet(wl, skb))
			wlvif = wl12xx_vif_to_data(info->control.vif);
		else
			hlid = wl->system_hlid;
		has_data = wlvif && wl1271_tx_is_data_present(skb);
		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
					      hlid);
		if (ret == -EAGAIN) {
			/*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
							    last_len);
			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
					     wl->aggr_buf, buf_offset, true);
			if (bus_ret < 0)
				goto out;
			sent_packets = true;
			buf_offset = 0;
			continue;
		} else if (ret == -EBUSY) {
			/*
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
			/* No work left, avoid scheduling redundant tx work */
			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
			goto out_ack;
		} else if (ret < 0) {
			if (wl12xx_is_dummy_packet(wl, skb))
				/*
                                 * fw still expects dummy packet,
                                 * so re-enqueue it
                                 */
				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
			else
				ieee80211_free_txskb(wl->hw, skb);
			goto out_ack;
		}
		last_len = ret;
		buf_offset += last_len;
		wl->tx_packets_count++;
		if (has_data) {
			desc = (struct wl1271_tx_hw_descr *) skb->data;
			__set_bit(desc->hlid, active_hlids);
		}
	}
out_ack:
	if (buf_offset) {
		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
					     buf_offset, true);
		if (bus_ret < 0)
			goto out;
		sent_packets = true;
	}
	if (sent_packets) {
		/*
                 * Interrupt the firmware with the new packets. This is only
                 * required for older hardware revisions
                 */
		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
					     wl->tx_packets_count);
			if (bus_ret < 0)
				goto out;
		}
		wl1271_handle_tx_low_watermark(wl);
	}
	wl12xx_rearm_rx_streaming(wl, active_hlids);
out:
	return bus_ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ido Yariv | 151 | 33.04% | 8 | 30.77% | 
| Eliad Peller | 138 | 30.20% | 8 | 30.77% | 
| Luciano Coelho | 81 | 17.72% | 3 | 11.54% | 
| Ido Reis | 31 | 6.78% | 1 | 3.85% | 
| Arik Nemtsov | 22 | 4.81% | 2 | 7.69% | 
| Juuso Oikarinen | 22 | 4.81% | 3 | 11.54% | 
| Eyal Shapira | 12 | 2.63% | 1 | 3.85% | 
| Total | 457 | 100.00% | 26 | 100.00% | 
void wl1271_tx_work(struct work_struct *work)
{
	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
	int ret;
	mutex_lock(&wl->mutex);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;
	ret = wlcore_tx_work_locked(wl);
	if (ret < 0) {
		wl12xx_queue_recovery_work(wl);
		goto out;
	}
	wl1271_ps_elp_sleep(wl);
out:
	mutex_unlock(&wl->mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ido Yariv | 55 | 61.11% | 2 | 40.00% | 
| Eliad Peller | 26 | 28.89% | 2 | 40.00% | 
| Luciano Coelho | 9 | 10.00% | 1 | 20.00% | 
| Total | 90 | 100.00% | 5 | 100.00% | 
static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
{
	u8 flags = 0;
	/*
         * TODO: use wl12xx constants when this code is moved to wl12xx, as
         * only it uses Tx-completion.
         */
	if (rate_class_index <= 8)
		flags |= IEEE80211_TX_RC_MCS;
	/*
         * TODO: use wl12xx constants when this code is moved to wl12xx, as
         * only it uses Tx-completion.
         */
	if (rate_class_index == 0)
		flags |= IEEE80211_TX_RC_SHORT_GI;
	return flags;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Pontus Fuchs | 35 | 89.74% | 2 | 66.67% | 
| Arik Nemtsov | 4 | 10.26% | 1 | 33.33% | 
| Total | 39 | 100.00% | 3 | 100.00% | 
static void wl1271_tx_complete_packet(struct wl1271 *wl,
				      struct wl1271_tx_hw_res_descr *result)
{
	struct ieee80211_tx_info *info;
	struct ieee80211_vif *vif;
	struct wl12xx_vif *wlvif;
	struct sk_buff *skb;
	int id = result->id;
	int rate = -1;
	u8 rate_flags = 0;
	u8 retries = 0;
	/* check for id legality */
	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
		wl1271_warning("TX result illegal id: %d", id);
		return;
	}
	skb = wl->tx_frames[id];
	info = IEEE80211_SKB_CB(skb);
	if (wl12xx_is_dummy_packet(wl, skb)) {
		wl1271_free_tx_id(wl, id);
		return;
	}
	/* info->control is valid as long as we don't update info->status */
	vif = info->control.vif;
	wlvif = wl12xx_vif_to_data(vif);
	/* update the TX status info */
	if (result->status == TX_SUCCESS) {
		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
			info->flags |= IEEE80211_TX_STAT_ACK;
		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
					  wlvif->band);
		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
		retries = result->ack_failures;
	} else if (result->status == TX_RETRY_EXCEEDED) {
		wl->stats.excessive_retries++;
		retries = result->ack_failures;
	}
	info->status.rates[0].idx = rate;
	info->status.rates[0].count = retries;
	info->status.rates[0].flags = rate_flags;
	info->status.ack_signal = -1;
	wl->stats.retry_count += result->ack_failures;
	/* remove private header from packet */
	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
	/* remove TKIP header space if present */
	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
	    info->control.hw_key &&
	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
			hdrlen);
		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
	}
	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
		     " status 0x%x",
		     result->id, skb, result->ack_failures,
		     result->rate_class_index, result->status);
	/* return the packet to the stack */
	skb_queue_tail(&wl->deferred_tx_queue, skb);
	queue_work(wl->freezable_wq, &wl->netstack_work);
	wl1271_free_tx_id(wl, result->id);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Luciano Coelho | 177 | 43.17% | 1 | 5.26% | 
| Juuso Oikarinen | 132 | 32.20% | 4 | 21.05% | 
| Eliad Peller | 31 | 7.56% | 4 | 21.05% | 
| Ido Yariv | 22 | 5.37% | 3 | 15.79% | 
| Pontus Fuchs | 15 | 3.66% | 1 | 5.26% | 
| Arik Nemtsov | 14 | 3.41% | 3 | 15.79% | 
| Shahar Levi | 14 | 3.41% | 1 | 5.26% | 
| Teemu Paasikivi | 3 | 0.73% | 1 | 5.26% | 
| Johannes Berg | 2 | 0.49% | 1 | 5.26% | 
| Total | 410 | 100.00% | 19 | 100.00% | 
/* Called upon reception of a TX complete interrupt */
int wlcore_tx_complete(struct wl1271 *wl)
{
	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
	u32 count, fw_counter;
	u32 i;
	int ret;
	/* read the tx results from the chipset */
	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
	if (ret < 0)
		goto out;
	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
	/* write host counter to chipset (to ack) */
	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
			     offsetof(struct wl1271_tx_hw_res_if,
				      tx_result_host_counter), fw_counter);
	if (ret < 0)
		goto out;
	count = fw_counter - wl->tx_results_count;
	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
	/* verify that the result buffer is not getting overrun */
	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
		wl1271_warning("TX result overflow from chipset: %d", count);
	/* process the results */
	for (i = 0; i < count; i++) {
		struct wl1271_tx_hw_res_descr *result;
		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
		/* process the packet */
		result =  &(wl->tx_res_if->tx_results_queue[offset]);
		wl1271_tx_complete_packet(wl, result);
		wl->tx_results_count++;
	}
out:
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Luciano Coelho | 114 | 54.55% | 2 | 28.57% | 
| Juuso Oikarinen | 61 | 29.19% | 3 | 42.86% | 
| Ido Yariv | 34 | 16.27% | 2 | 28.57% | 
| Total | 209 | 100.00% | 7 | 100.00% | 
EXPORT_SYMBOL(wlcore_tx_complete);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
	struct sk_buff *skb;
	int i;
	unsigned long flags;
	struct ieee80211_tx_info *info;
	int total[NUM_TX_QUEUES];
	struct wl1271_link *lnk = &wl->links[hlid];
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		total[i] = 0;
		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
			if (!wl12xx_is_dummy_packet(wl, skb)) {
				info = IEEE80211_SKB_CB(skb);
				info->status.rates[0].idx = -1;
				info->status.rates[0].count = 0;
				ieee80211_tx_status_ni(wl->hw, skb);
			}
			total[i]++;
		}
	}
	spin_lock_irqsave(&wl->wl_lock, flags);
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		wl->tx_queue_count[i] -= total[i];
		if (lnk->wlvif)
			lnk->wlvif->tx_queue_count[i] -= total[i];
	}
	spin_unlock_irqrestore(&wl->wl_lock, flags);
	wl1271_handle_tx_low_watermark(wl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 231 | 99.57% | 5 | 83.33% | 
| Eliad Peller | 1 | 0.43% | 1 | 16.67% | 
| Total | 232 | 100.00% | 6 | 100.00% | 
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
	int i;
	/* TX failure */
	for_each_set_bit(i, wlvif->links_map, wl->num_links) {
		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
		    i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
			/* this calls wl12xx_free_link */
			wl1271_free_sta(wl, wlvif, i);
		} else {
			u8 hlid = i;
			wl12xx_free_link(wl, wlvif, &hlid);
		}
	}
	wlvif->last_tx_hlid = 0;
	for (i = 0; i < NUM_TX_QUEUES; i++)
		wlvif->tx_queue_count[i] = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 80 | 72.07% | 7 | 53.85% | 
| Eliad Peller | 21 | 18.92% | 5 | 38.46% | 
| Luciano Coelho | 10 | 9.01% | 1 | 7.69% | 
| Total | 111 | 100.00% | 13 | 100.00% | 
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl)
{
	int i;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;
	/* only reset the queues if something bad happened */
	if (wl1271_tx_total_queue_count(wl) != 0) {
		for (i = 0; i < wl->num_links; i++)
			wl1271_tx_reset_link_queues(wl, i);
		for (i = 0; i < NUM_TX_QUEUES; i++)
			wl->tx_queue_count[i] = 0;
	}
	/*
         * Make sure the driver is at a consistent state, in case this
         * function is called from a context other than interface removal.
         * This call will always wake the TX queues.
         */
	wl1271_handle_tx_low_watermark(wl);
	for (i = 0; i < wl->num_tx_desc; i++) {
		if (wl->tx_frames[i] == NULL)
			continue;
		skb = wl->tx_frames[i];
		wl1271_free_tx_id(wl, i);
		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
		if (!wl12xx_is_dummy_packet(wl, skb)) {
			/*
                         * Remove private headers before passing the skb to
                         * mac80211
                         */
			info = IEEE80211_SKB_CB(skb);
			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
			    info->control.hw_key &&
			    info->control.hw_key->cipher ==
			    WLAN_CIPHER_SUITE_TKIP) {
				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
					skb->data, hdrlen);
				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
			}
			info->status.rates[0].idx = -1;
			info->status.rates[0].count = 0;
			ieee80211_tx_status_ni(wl->hw, skb);
		}
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 96 | 36.36% | 8 | 36.36% | 
| Ido Yariv | 76 | 28.79% | 4 | 18.18% | 
| Luciano Coelho | 40 | 15.15% | 1 | 4.55% | 
| Eliad Peller | 24 | 9.09% | 4 | 18.18% | 
| Juuso Oikarinen | 19 | 7.20% | 4 | 18.18% | 
| Shahar Levi | 9 | 3.41% | 1 | 4.55% | 
| Total | 264 | 100.00% | 22 | 100.00% | 
#define WL1271_TX_FLUSH_TIMEOUT 500000
/* caller must *NOT* hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{
	unsigned long timeout, start_time;
	int i;
	start_time = jiffies;
	timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
	/* only one flush should be in progress, for consistent queue state */
	mutex_lock(&wl->flush_mutex);
	mutex_lock(&wl->mutex);
	if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
		mutex_unlock(&wl->mutex);
		goto out;
	}
	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
	while (!time_after(jiffies, timeout)) {
		wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
			     wl->tx_frames_cnt,
			     wl1271_tx_total_queue_count(wl));
		/* force Tx and give the driver some time to flush data */
		mutex_unlock(&wl->mutex);
		if (wl1271_tx_total_queue_count(wl))
			wl1271_tx_work(&wl->tx_work);
		msleep(20);
		mutex_lock(&wl->mutex);
		if ((wl->tx_frames_cnt == 0) &&
		    (wl1271_tx_total_queue_count(wl) == 0)) {
			wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
				     jiffies_to_msecs(jiffies - start_time));
			goto out_wake;
		}
	}
	wl1271_warning("Unable to flush all TX buffers, "
		       "timed out (timeout %d ms",
		       WL1271_TX_FLUSH_TIMEOUT / 1000);
	/* forcibly flush all Tx buffers on our queues */
	for (i = 0; i < wl->num_links; i++)
		wl1271_tx_reset_link_queues(wl, i);
out_wake:
	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
	mutex_unlock(&wl->mutex);
out:
	mutex_unlock(&wl->flush_mutex);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 179 | 72.47% | 6 | 60.00% | 
| Juuso Oikarinen | 64 | 25.91% | 2 | 20.00% | 
| Eliad Peller | 3 | 1.21% | 1 | 10.00% | 
| Luciano Coelho | 1 | 0.40% | 1 | 10.00% | 
| Total | 247 | 100.00% | 10 | 100.00% | 
EXPORT_SYMBOL_GPL(wl1271_tx_flush);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{
	if (WARN_ON(!rate_set))
		return 0;
	return BIT(__ffs(rate_set));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 18 | 54.55% | 1 | 50.00% | 
| Eliad Peller | 15 | 45.45% | 1 | 50.00% | 
| Total | 33 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			      u8 queue, enum wlcore_queue_stop_reason reason)
{
	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
	bool stopped = !!wl->queue_stop_reasons[hwq];
	/* queue should not be stopped for this reason */
	WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
	if (stopped)
		return;
	ieee80211_stop_queue(wl->hw, hwq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 75 | 100.00% | 2 | 100.00% | 
| Total | 75 | 100.00% | 2 | 100.00% | 
void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{
	unsigned long flags;
	spin_lock_irqsave(&wl->wl_lock, flags);
	wlcore_stop_queue_locked(wl, wlvif, queue, reason);
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 57 | 100.00% | 2 | 100.00% | 
| Total | 57 | 100.00% | 2 | 100.00% | 
void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{
	unsigned long flags;
	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
	spin_lock_irqsave(&wl->wl_lock, flags);
	/* queue should not be clear for this reason */
	WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
	if (wl->queue_stop_reasons[hwq])
		goto out;
	ieee80211_wake_queue(wl->hw, hwq);
out:
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 97 | 100.00% | 2 | 100.00% | 
| Total | 97 | 100.00% | 2 | 100.00% | 
void wlcore_stop_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{
	int i;
	unsigned long flags;
	spin_lock_irqsave(&wl->wl_lock, flags);
	/* mark all possible queues as stopped */
        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
                WARN_ON_ONCE(test_and_set_bit(reason,
					      &wl->queue_stop_reasons[i]));
	/* use the global version to make sure all vifs in mac80211 we don't
         * know are stopped.
         */
	ieee80211_stop_queues(wl->hw);
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 81 | 100.00% | 3 | 100.00% | 
| Total | 81 | 100.00% | 3 | 100.00% | 
void wlcore_wake_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{
	int i;
	unsigned long flags;
	spin_lock_irqsave(&wl->wl_lock, flags);
	/* mark all possible queues as awake */
        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
		WARN_ON_ONCE(!test_and_clear_bit(reason,
						 &wl->queue_stop_reasons[i]));
	/* use the global version to make sure all vifs in mac80211 we don't
         * know are woken up.
         */
	ieee80211_wake_queues(wl->hw);
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 82 | 100.00% | 2 | 100.00% | 
| Total | 82 | 100.00% | 2 | 100.00% | 
bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
				       struct wl12xx_vif *wlvif, u8 queue,
				       enum wlcore_queue_stop_reason reason)
{
	unsigned long flags;
	bool stopped;
	spin_lock_irqsave(&wl->wl_lock, flags);
	stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
							   reason);
	spin_unlock_irqrestore(&wl->wl_lock, flags);
	return stopped;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 65 | 100.00% | 3 | 100.00% | 
| Total | 65 | 100.00% | 3 | 100.00% | 
bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
				       struct wl12xx_vif *wlvif, u8 queue,
				       enum wlcore_queue_stop_reason reason)
{
	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
	assert_spin_locked(&wl->wl_lock);
	return test_bit(reason, &wl->queue_stop_reasons[hwq]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 54 | 100.00% | 4 | 100.00% | 
| Total | 54 | 100.00% | 4 | 100.00% | 
bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				    u8 queue)
{
	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
	assert_spin_locked(&wl->wl_lock);
	return !!wl->queue_stop_reasons[hwq];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 46 | 100.00% | 4 | 100.00% | 
| Total | 46 | 100.00% | 4 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Arik Nemtsov | 2691 | 44.95% | 60 | 38.46% | 
| Eliad Peller | 986 | 16.47% | 40 | 25.64% | 
| Luciano Coelho | 836 | 13.96% | 7 | 4.49% | 
| Ido Yariv | 654 | 10.92% | 16 | 10.26% | 
| Juuso Oikarinen | 519 | 8.67% | 12 | 7.69% | 
| Shahar Levi | 109 | 1.82% | 5 | 3.21% | 
| Pontus Fuchs | 50 | 0.84% | 2 | 1.28% | 
| Ido Reis | 44 | 0.73% | 1 | 0.64% | 
| Igal Chernobelsky | 40 | 0.67% | 3 | 1.92% | 
| Eyal Shapira | 32 | 0.53% | 2 | 1.28% | 
| Kalle Valo | 11 | 0.18% | 1 | 0.64% | 
| Teemu Paasikivi | 5 | 0.08% | 2 | 1.28% | 
| Johannes Berg | 5 | 0.08% | 2 | 1.28% | 
| Victor Goldenshtein | 2 | 0.03% | 1 | 0.64% | 
| John W. Linville | 2 | 0.03% | 1 | 0.64% | 
| Yoni Divinsky | 1 | 0.02% | 1 | 0.64% | 
| Total | 5987 | 100.00% | 156 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.