Release 4.11 drivers/net/wireless/ti/wlcore/event.c
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "event.h"
#include "ps.h"
#include "scan.h"
#include "wl12xx_80211.h"
#include "hw_ops.h"
#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
#define WL18XX_LOGGER_READ_POINT_OFFSET (12)
int wlcore_event_fw_logger(struct wl1271 *wl)
{
int ret;
struct fw_logger_information fw_log;
u8 *buffer;
u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
WL18XX_LOGGER_BUFF_OFFSET;
u32 available_len;
u32 actual_len;
u32 clear_addr;
size_t len;
u32 start_loc;
buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
if (!buffer) {
wl1271_error("Fail to allocate fw logger memory");
fw_log.actual_buff_size = cpu_to_le32(0);
goto out;
}
ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
false);
if (ret < 0) {
wl1271_error("Fail to read logger buffer, error_id = %d",
ret);
fw_log.actual_buff_size = cpu_to_le32(0);
goto free_out;
}
memcpy(&fw_log, buffer, sizeof(fw_log));
if (le32_to_cpu(fw_log.actual_buff_size) == 0)
goto free_out;
actual_len = le32_to_cpu(fw_log.actual_buff_size);
start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
internal_fw_addrbase) - addr;
end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
available_len = end_buff_addr -
(le32_to_cpu(fw_log.buff_read_ptr) -
internal_fw_addrbase);
actual_len = min(actual_len, available_len);
len = actual_len;
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
internal_fw_addrbase;
len = le32_to_cpu(fw_log.actual_buff_size) - len;
if (len) {
wl12xx_copy_fwlog(wl,
&buffer[WL18XX_LOGGER_BUFF_OFFSET],
len);
clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
internal_fw_addrbase;
}
/* double check that clear address and write pointer are the same */
if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
wl1271_error("Calculate of clear addr Clear = %x, write = %x",
clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
}
/* indicate FW about Clear buffer */
ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
fw_log.buff_write_ptr);
free_out:
kfree(buffer);
out:
return le32_to_cpu(fw_log.actual_buff_size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shahar Patury | 344 | 99.71% | 1 | 50.00% |
Andrzej Hajda | 1 | 0.29% | 1 | 50.00% |
Total | 345 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
{
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = metric_arr[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
/* TODO: check actual multi-role support */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
vif = wl12xx_wlvif_to_vif(wlvif);
if (event != wlvif->last_rssi_event)
ieee80211_cqm_rssi_notify(vif, event, metric,
GFP_KERNEL);
wlvif->last_rssi_event = event;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juuso Oikarinen | 66 | 64.08% | 1 | 20.00% |
Eliad Peller | 35 | 33.98% | 3 | 60.00% |
Andrzej Zaborowski | 2 | 1.94% | 1 | 20.00% |
Total | 103 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
u8 hlid = wlvif->sta.hlid;
if (!wl->links[hlid].ba_bitmap)
return;
ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
vif->bss_conf.bssid);
} else {
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arik Nemtsov | 70 | 53.85% | 2 | 22.22% |
Eliad Peller | 33 | 25.38% | 6 | 66.67% |
Shahar Levi | 27 | 20.77% | 1 | 11.11% |
Total | 130 | 100.00% | 9 | 100.00% |
void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
{
struct wl12xx_vif *wlvif;
if (enable) {
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 62 | 100.00% | 4 | 100.00% |
Total | 62 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
void wlcore_event_sched_scan_completed(struct wl1271 *wl,
u8 status)
{
wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
status);
if (wl->sched_vif) {
ieee80211_sched_scan_stopped(wl->hw);
wl->sched_vif = NULL;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 21 | 48.84% | 3 | 50.00% |
Luciano Coelho | 20 | 46.51% | 2 | 33.33% |
Mircea Gherzan | 2 | 4.65% | 1 | 16.67% |
Total | 43 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
unsigned long roles_bitmap,
unsigned long allowed_bitmap)
{
struct wl12xx_vif *wlvif;
wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
__func__, roles_bitmap, allowed_bitmap);
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
wlvif->ba_allowed = !!test_bit(wlvif->role_id,
&allowed_bitmap);
if (!wlvif->ba_allowed)
wl1271_stop_ba_event(wl, wlvif);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 66 | 70.21% | 1 | 33.33% |
Luciano Coelho | 28 | 29.79% | 2 | 66.67% |
Total | 94 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
void wlcore_event_channel_switch(struct wl1271 *wl,
unsigned long roles_bitmap,
bool success)
{
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
__func__, roles_bitmap, success);
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
&wlvif->flags))
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
ieee80211_chswitch_done(vif, success);
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
ieee80211_csa_finish(vif);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 104 | 78.79% | 7 | 46.67% |
Shahar Levi | 13 | 9.85% | 3 | 20.00% |
Luciano Coelho | 7 | 5.30% | 1 | 6.67% |
Arik Nemtsov | 6 | 4.55% | 2 | 13.33% |
Bartosz Markowski | 1 | 0.76% | 1 | 6.67% |
Victor Goldenshtein | 1 | 0.76% | 1 | 6.67% |
Total | 132 | 100.00% | 15 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
void wlcore_event_dummy_packet(struct wl1271 *wl)
{
if (wl->plt) {
wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
return;
}
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
wl1271_tx_dummy_packet(wl);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 22 | 61.11% | 2 | 66.67% |
Luciano Coelho | 14 | 38.89% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
{
u32 num_packets = wl->conf.tx.max_tx_retries;
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
if (!test_bit(h, wlvif->ap.sta_hlid_map))
continue;
found = true;
break;
}
if (!found)
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
}
rcu_read_unlock();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arik Nemtsov | 88 | 57.14% | 1 | 20.00% |
Eliad Peller | 66 | 42.86% | 4 | 80.00% |
Total | 154 | 100.00% | 5 | 100.00% |
void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
{
wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
wlcore_disconnect_sta(wl, sta_bitmap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
{
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
wlcore_disconnect_sta(wl, sta_bitmap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
void wlcore_event_roc_complete(struct wl1271 *wl)
{
wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
if (wl->roc_vif)
ieee80211_ready_on_channel(wl->hw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
{
/*
* We are HW_MONITOR device. On beacon loss - queue
* connection loss work. Cancel it on REGAINED event.
*/
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
int delay = wl->conf.conn.synch_fail_thold *
wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
/* don't attempt roaming in case of p2p */
if (wlvif->p2p) {
ieee80211_connection_loss(vif);
continue;
}
/*
* if the work is already queued, it should take place.
* We don't want to delay the connection loss
* indication any more.
*/
ieee80211_queue_delayed_work(wl->hw,
&wlvif->connection_loss_work,
msecs_to_jiffies(delay));
ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 102 | 80.95% | 1 | 25.00% |
Eyal Shapira | 22 | 17.46% | 1 | 25.00% |
Luciano Coelho | 1 | 0.79% | 1 | 25.00% |
Johannes Berg | 1 | 0.79% | 1 | 25.00% |
Total | 126 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
int wl1271_event_unmask(struct wl1271 *wl)
{
int ret;
wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
if (ret < 0)
return ret;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luciano Coelho | 39 | 78.00% | 1 | 50.00% |
Eliad Peller | 11 | 22.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
int ret;
wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
if (mbox_num > 1)
return -EINVAL;
/* first we read the mbox descriptor */
ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
wl->mbox_size, false);
if (ret < 0)
return ret;
/* process the descriptor */
ret = wl->ops->process_mailbox_events(wl);
if (ret < 0)
return ret;
/*
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
ret = wl->ops->ack_event(wl);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luciano Coelho | 78 | 74.29% | 2 | 28.57% |
Ido Yariv | 15 | 14.29% | 2 | 28.57% |
Eliad Peller | 6 | 5.71% | 1 | 14.29% |
Mircea Gherzan | 4 | 3.81% | 1 | 14.29% |
Juuso Oikarinen | 2 | 1.90% | 1 | 14.29% |
Total | 105 | 100.00% | 7 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eliad Peller | 657 | 41.95% | 18 | 36.73% |
Shahar Patury | 372 | 23.75% | 1 | 2.04% |
Luciano Coelho | 201 | 12.84% | 9 | 18.37% |
Arik Nemtsov | 167 | 10.66% | 3 | 6.12% |
Juuso Oikarinen | 74 | 4.73% | 4 | 8.16% |
Shahar Levi | 44 | 2.81% | 4 | 8.16% |
Eyal Shapira | 22 | 1.40% | 1 | 2.04% |
Ido Yariv | 15 | 0.96% | 2 | 4.08% |
Mircea Gherzan | 6 | 0.38% | 1 | 2.04% |
Teemu Paasikivi | 2 | 0.13% | 1 | 2.04% |
Andrzej Zaborowski | 2 | 0.13% | 1 | 2.04% |
Bartosz Markowski | 1 | 0.06% | 1 | 2.04% |
Andrzej Hajda | 1 | 0.06% | 1 | 2.04% |
Johannes Berg | 1 | 0.06% | 1 | 2.04% |
Victor Goldenshtein | 1 | 0.06% | 1 | 2.04% |
Total | 1566 | 100.00% | 49 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.