#include <linux/etherdevice.h>
#include "iwl-trans.h"
#include "iwl-modparams.h"
#include "dev.h"
#include "agn.h"
#include "calib.h"
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
memset(&ctx->staging, 0, sizeof(ctx->staging));
if (!ctx->vif) {
ctx->staging.dev_type = ctx->unused_devtype;
} else
switch (ctx->vif->type) {
case NL80211_IFTYPE_AP:
ctx->staging.dev_type = ctx->ap_devtype;
break;
case NL80211_IFTYPE_STATION:
ctx->staging.dev_type = ctx->station_devtype;
ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
break;
case NL80211_IFTYPE_ADHOC:
ctx->staging.dev_type = ctx->ibss_devtype;
ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
RXON_FILTER_ACCEPT_GRP_MSK;
break;
case NL80211_IFTYPE_MONITOR:
ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
break;
default:
IWL_ERR(priv, "Unsupported interface type %d\n",
ctx->vif->type);
break;
}
#if 0
if (!hw_to_local(priv->hw)->short_preamble)
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
else
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
#endif
ctx->staging.channel =
cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
priv->band = priv->hw->conf.chandef.chan->band;
iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
RXON_FLG_CHANNEL_MODE_PURE_40);
if (ctx->vif)
memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
}
static int iwlagn_disable_bss(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_rxon_cmd *send)
{
__le32 old_filter = send->filter_flags;
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
0, sizeof(*send), send);
send->filter_flags = old_filter;
if (ret)
IWL_DEBUG_QUIET_RFKILL(priv,
"Error clearing ASSOC_MSK on BSS (%d)\n", ret);
return ret;
}
static int iwlagn_disable_pan(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_rxon_cmd *send)
{
struct iwl_notification_wait disable_wait;
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
static const u16 deactivate_cmd[] = {
REPLY_WIPAN_DEACTIVATION_COMPLETE
};
iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
NULL, NULL);
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
0, sizeof(*send), send);
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
iwl_remove_notification(&priv->notif_wait, &disable_wait);
} else {
ret = iwl_wait_notification(&priv->notif_wait,
&disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
return ret;
}
static int iwlagn_disconn_pan(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_rxon_cmd *send)
{
__le32 old_filter = send->filter_flags;
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(*send), send);
send->filter_flags = old_filter;
return ret;
}
static void iwlagn_update_qos(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
if (!ctx->is_active)
return;
ctx->qos_data.def_qos_parm.qos_flags = 0;
if (ctx->qos_data.qos_active)
ctx->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
if (ctx->ht.enabled)
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
lockdep_assert_held(&priv->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif, 0);
if (!priv->beacon_skb)
return -ENOMEM;
return iwlagn_send_beacon_cmd(priv);
}
static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret = 0;
struct iwl_rxon_assoc_cmd rxon_assoc;
const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
const struct iwl_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
(rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
(rxon1->ofdm_ht_single_stream_basic_rates ==
rxon2->ofdm_ht_single_stream_basic_rates) &&
(rxon1->ofdm_ht_dual_stream_basic_rates ==
rxon2->ofdm_ht_dual_stream_basic_rates) &&
(rxon1->ofdm_ht_triple_stream_basic_rates ==
rxon2->ofdm_ht_triple_stream_basic_rates) &&
(rxon1->acquisition_data == rxon2->acquisition_data) &&
(rxon1->rx_chain == rxon2->rx_chain) &&
(rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
return 0;
}
rxon_assoc.flags = ctx->staging.flags;
rxon_assoc.filter_flags = ctx->staging.filter_flags;
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved1 = 0;
rxon_assoc.reserved2 = 0;
rxon_assoc.reserved3 = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
ctx->staging.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
ctx->staging.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
rxon_assoc.ofdm_ht_triple_stream_basic_rates =
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
{
u16 new_val;
u16 beacon_factor;
if (!beacon_val)
return DEFAULT_BEACON_INTERVAL;
beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
new_val = beacon_val / beacon_factor;
if (!new_val)
new_val = max_beacon_val;
return new_val;
}
static int iwl_send_rxon_timing(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
u64 tsf;
s32 interval_tm, rem;
struct ieee80211_conf *conf = NULL;
u16 beacon_int;
struct ieee80211_vif *vif = ctx->vif;
conf = &priv->hw->conf;
lockdep_assert_held(&priv->mutex);
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
beacon_int = vif ? vif->bss_conf.beacon_int : 0;
ctx->timing.atim_window = 0;
if (ctx->ctxid == IWL_RXON_CTX_PAN &&
(!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
priv->contexts[IWL_RXON_CTX_BSS].vif &&
priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
ctx->timing.beacon_interval =
priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
priv->contexts[IWL_RXON_CTX_PAN].vif &&
priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
(!iwl_is_associated_ctx(ctx) || !ctx->vif ||
!ctx->vif->bss_conf.beacon_int)) {
ctx->timing.beacon_interval =
priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
} else {
beacon_int = iwl_adjust_beacon_interval(beacon_int,
IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
}
ctx->beacon_int = beacon_int;
tsf = priv->timestamp;
interval_tm = beacon_int * TIME_UNIT;
rem = do_div(tsf, interval_tm);
ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
IWL_DEBUG_ASSOC(priv,
"beacon interval %d beacon timer %d beacon tim %d\n",
le16_to_cpu(ctx->timing.beacon_interval),
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
0, sizeof(ctx->timing), &ctx->timing);
}
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
struct iwl_rxon_cmd *active = (void *)&ctx->active;
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
} else {
ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
if (ret)
return ret;
if (ctx->vif) {
ret = iwl_send_rxon_timing(priv, ctx);
if (ret) {
IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
return ret;
}
ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
}
}
if (ret)
return ret;
iwl_clear_ucode_stations(priv, ctx);
iwl_update_bcast_station(priv, ctx);
iwl_restore_stations(priv, ctx);
ret = iwl_restore_default_wep_keys(priv, ctx);
if (ret) {
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
return ret;
}
memcpy(active, &ctx->staging, sizeof(*active));
return 0;
}
static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret;
s8 prev_tx_power;
bool defer;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
return 0;
lockdep_assert_held(&priv->mutex);
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
IWL_WARN(priv,
"Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
IWLAGN_TX_POWER_TARGET_POWER_MIN);
return -EINVAL;
}
if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
IWL_WARN(priv,
"Requested user TXPOWER %d above upper limit %d.\n",
tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
return -EINVAL;
}
if (!iwl_is_ready_rf(priv))
return -EIO;
priv->tx_power_next = tx_power;
defer = test_bit(STATUS_SCANNING, &priv->status) ||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
if (defer && !force) {
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
return 0;
}
prev_tx_power = priv->tx_power_user_lmt;
priv->tx_power_user_lmt = tx_power;
ret = iwlagn_send_tx_power(priv);
if (ret) {
priv->tx_power_user_lmt = prev_tx_power;
priv->tx_power_next = prev_tx_power;
}
return ret;
}
static int iwlagn_rxon_connect(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
struct iwl_rxon_cmd *active = (void *)&ctx->active;
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
ret = iwl_send_rxon_timing(priv, ctx);
if (ret) {
IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
return ret;
}
}
iwlagn_update_qos(priv, ctx);
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
ret = iwlagn_update_beacon(priv, ctx->vif);
if (ret) {
IWL_ERR(priv,
"Error sending required beacon (%d)!\n",
ret);
return ret;
}
}
priv->start_calib = 0;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
memcpy(active, &ctx->staging, sizeof(*active));
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
if (iwlagn_update_beacon(priv, ctx->vif))
IWL_ERR(priv, "Error sending IBSS beacon\n");
iwl_init_sensitivity(priv);
ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
if (ret) {
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
}
return 0;
}
int iwlagn_set_pan_params(struct iwl_priv *priv)
{
struct iwl_wipan_params_cmd cmd;
struct iwl_rxon_context *ctx_bss, *ctx_pan;
int slot0 = 300, slot1 = 0;
int ret;
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
return 0;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
lockdep_assert_held(&priv->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
if (!ctx_pan->is_active)
return 0;
memset(&cmd, 0, sizeof(cmd));
cmd.num_slots = 2;
cmd.slots[0].type = 0;
cmd.slots[1].type = 1;
if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
bcnint &&
bcnint != ctx_bss->beacon_int) {
IWL_ERR(priv,
"beacon intervals don't match (%d, %d)\n",
ctx_bss->beacon_int, ctx_pan->beacon_int);
} else
bcnint = max_t(int, bcnint,
ctx_bss->beacon_int);
if (!bcnint)
bcnint = DEFAULT_BEACON_INTERVAL;
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->cfg.idle &&
!ctx_bss->vif->cfg.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->cfg.idle &&
!ctx_pan->vif->cfg.assoc) {
slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
slot0 = 0;
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
}
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
return ret;
}
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf,
struct iwl_rxon_context *ctx)
{
struct iwl_rxon_cmd *rxon = &ctx->staging;
if (!ctx->ht.enabled) {
rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
RXON_FLG_HT40_PROT_MSK |
RXON_FLG_HT_PROT_MSK);
return;
}
rxon->flags |= cpu_to_le32(ctx->ht.protection <<
RXON_FLG_HT_OPERATING_MODE_POS);
rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
if (ctx->ht.protection ==
IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
switch (ctx->ht.extension_chan_offset) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
rxon->flags &=
~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
rxon->flags |=
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
break;
}
} else {
switch (ctx->ht.extension_chan_offset) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
rxon->flags &=
~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
break;
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
default:
IWL_ERR(priv,
"invalid extension channel offset\n");
break;
}
}
} else {
rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
}
iwlagn_set_rxon_chain(priv, ctx);
IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
"extension channel offset 0x%x\n",
le32_to_cpu(rxon->flags), ctx->ht.protection,
ctx->ht.extension_chan_offset);
}
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
{
struct iwl_rxon_context *ctx;
for_each_context(priv, ctx)
_iwl_set_rxon_ht(priv, ht_conf, ctx);
}
void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx)
{
enum nl80211_band band = ch->band;
u16 channel = ch->hw_value;
if ((le16_to_cpu(ctx->staging.channel) == channel) &&
(priv->band == band))
return;
ctx->staging.channel = cpu_to_le16(channel);
if (band == NL80211_BAND_5GHZ)
ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
priv->band = band;
IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
}
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
enum nl80211_band band,
struct ieee80211_vif *vif)
{
if (band == NL80211_BAND_5GHZ) {
ctx->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
| RXON_FLG_CCK_MSK);
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
if (vif && vif->bss_conf.use_short_slot)
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
}
}
static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
struct iwl_rxon_context *ctx, int hw_decrypt)
{
struct iwl_rxon_cmd *rxon = &ctx->staging;
if (hw_decrypt)
rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
else
rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
}
static int iwl_check_rxon_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
struct iwl_rxon_cmd *rxon = &ctx->staging;
u32 errors = 0;
if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
IWL_WARN(priv, "check 2.4G: wrong narrow\n");
errors |= BIT(0);
}
if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
IWL_WARN(priv, "check 2.4G: wrong radar\n");
errors |= BIT(1);
}
} else {
if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
IWL_WARN(priv, "check 5.2G: not short slot!\n");
errors |= BIT(2);
}
if (rxon->flags & RXON_FLG_CCK_MSK) {
IWL_WARN(priv, "check 5.2G: CCK!\n");
errors |= BIT(3);
}
}
if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
IWL_WARN(priv, "mac/bssid mcast!\n");
errors |= BIT(4);
}
if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
(rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
IWL_WARN(priv, "neither 1 nor 6 are basic\n");
errors |= BIT(5);
}
if (le16_to_cpu(rxon->assoc_id) > 2007) {
IWL_WARN(priv, "aid > 2007\n");
errors |= BIT(6);
}
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
IWL_WARN(priv, "CCK and short slot\n");
errors |= BIT(7);
}
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
IWL_WARN(priv, "CCK and auto detect\n");
errors |= BIT(8);
}
if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
RXON_FLG_TGG_PROTECT_MSK)) ==
RXON_FLG_TGG_PROTECT_MSK) {
IWL_WARN(priv, "TGg but no auto-detect\n");
errors |= BIT(9);
}
if (rxon->channel == 0) {
IWL_WARN(priv, "zero channel is invalid\n");
errors |= BIT(10);
}
WARN(errors, "Invalid RXON (%#x), channel %d",
errors, le16_to_cpu(rxon->channel));
return errors ? -EINVAL : 0;
}
static int iwl_full_rxon_required(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
const struct iwl_rxon_cmd *staging = &ctx->staging;
const struct iwl_rxon_cmd *active = &ctx->active;
#define CHK(cond) \
if ((cond)) { \
IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
return 1; \
}
#define CHK_NEQ(c1, c2) \
if ((c1) != (c2)) { \
IWL_DEBUG_INFO(priv, "need full RXON - " \
#c1 " != " #c2 " - %d != %d\n", \
(c1), (c2)); \
return 1; \
}
CHK(!iwl_is_associated_ctx(ctx));
CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
CHK(!ether_addr_equal(staging->wlap_bssid_addr,
active->wlap_bssid_addr));
CHK_NEQ(staging->dev_type, active->dev_type);
CHK_NEQ(staging->channel, active->channel);
CHK_NEQ(staging->air_propagation, active->air_propagation);
CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
active->ofdm_ht_single_stream_basic_rates);
CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
active->ofdm_ht_dual_stream_basic_rates);
CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
active->ofdm_ht_triple_stream_basic_rates);
CHK_NEQ(staging->assoc_id, active->assoc_id);
CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
active->flags & RXON_FLG_BAND_24G_MSK);
CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
active->filter_flags & RXON_FILTER_ASSOC_MSK);
#undef CHK
#undef CHK_NEQ
return 0;
}
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
enum iwl_rxon_context_id ctxid)
{
struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
struct iwl_rxon_cmd *rxon = &ctx->staging;
IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
le16_to_cpu(rxon->channel));
IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
le32_to_cpu(rxon->flags));
IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
le32_to_cpu(rxon->filter_flags));
IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
rxon->ofdm_basic_rates);
IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
rxon->cck_basic_rates);
IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
le16_to_cpu(rxon->assoc_id));
}
#endif
static void iwl_calc_basic_rates(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int lowest_present_ofdm = 100;
int lowest_present_cck = 100;
u8 cck = 0;
u8 ofdm = 0;
if (ctx->vif) {
struct ieee80211_supported_band *sband;
unsigned long basic = ctx->vif->bss_conf.basic_rates;
int i;
sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
for_each_set_bit(i, &basic, BITS_PER_LONG) {
int hw = sband->bitrates[i].hw_value;
if (hw >= IWL_FIRST_OFDM_RATE) {
ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
if (lowest_present_ofdm > hw)
lowest_present_ofdm = hw;
} else {
BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
cck |= BIT(hw);
if (lowest_present_cck > hw)
lowest_present_cck = hw;
}
}
}
if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
if (IWL_RATE_11M_INDEX < lowest_present_cck)
cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
if (IWL_RATE_5M_INDEX < lowest_present_cck)
cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
if (IWL_RATE_2M_INDEX < lowest_present_cck)
cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
cck, ofdm);
ctx->staging.cck_basic_rates = cck;
ctx->staging.ofdm_basic_rates = ofdm;
}
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
struct iwl_rxon_cmd *active = (void *)&ctx->active;
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
lockdep_assert_held(&priv->mutex);
if (!iwl_is_alive(priv))
return -EBUSY;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
if (!ctx->is_active)
return 0;
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
iwl_calc_basic_rates(priv, ctx);
if (!priv->hw_params.use_rts_for_aggregation)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
iwl_print_rx_config_cmd(priv, ctx->ctxid);
ret = iwl_check_rxon_cmd(priv, ctx);
if (ret) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
return -EINVAL;
}
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
iwl_chswitch_done(priv, false);
}
if (!iwl_full_rxon_required(priv, ctx)) {
ret = iwlagn_send_rxon_assoc(priv, ctx);
if (ret) {
IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
return ret;
}
memcpy(active, &ctx->staging, sizeof(*active));
iwl_set_tx_power(priv, priv->tx_power_next, false);
iwl_power_update_mode(priv, true);
return 0;
}
iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.swcrypto);
IWL_DEBUG_INFO(priv,
"Going to commit RXON\n"
" * with%s RXON_FILTER_ASSOC_MSK\n"
" * channel = %d\n"
" * bssid = %pM\n",
(new_assoc ? "" : "out"),
le16_to_cpu(ctx->staging.channel),
ctx->staging.bssid_addr);
ret = iwlagn_rxon_disconn(priv, ctx);
if (ret)
return ret;
ret = iwlagn_set_pan_params(priv);
if (ret)
return ret;
if (new_assoc)
return iwlagn_rxon_connect(priv, ctx);
return 0;
}
void iwlagn_config_ht40(struct ieee80211_conf *conf,
struct iwl_rxon_context *ctx)
{
if (conf_is_ht40_minus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
ctx->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
ctx->ht.is_40mhz = true;
} else {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
ctx->ht.is_40mhz = false;
}
}
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = conf->chandef.chan;
int ret = 0;
IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
mutex_lock(&priv->mutex);
if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
if (changed & (IEEE80211_CONF_CHANGE_SMPS |
IEEE80211_CONF_CHANGE_CHANNEL)) {
priv->current_ht_config.smps = conf->smps_mode;
for_each_context(priv, ctx)
iwlagn_set_rxon_chain(priv, ctx);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
for_each_context(priv, ctx) {
if (ctx->ht.enabled != conf_is_ht(conf))
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
if (!ctx->ht.is_40mhz ||
!iwl_is_associated_ctx(ctx))
iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
if (le16_to_cpu(ctx->staging.channel) !=
channel->hw_value)
ctx->staging.flags = 0;
iwl_set_rxon_channel(priv, channel, ctx);
iwl_set_rxon_ht(priv, &priv->current_ht_config);
iwl_set_flags_for_band(priv, ctx, channel->band,
ctx->vif);
}
iwl_update_bcast_stations(priv);
}
if (changed & (IEEE80211_CONF_CHANGE_PS |
IEEE80211_CONF_CHANGE_IDLE)) {
ret = iwl_power_update_mode(priv, false);
if (ret)
IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
priv->tx_power_user_lmt, conf->power_level);
iwl_set_tx_power(priv, conf->power_level, false);
}
for_each_context(priv, ctx) {
if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
continue;
iwlagn_commit_rxon(priv, ctx);
}
out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
static void iwlagn_check_needed_chains(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = ctx->vif;
struct iwl_rxon_context *tmp;
struct ieee80211_sta *sta;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
lockdep_assert_held(&priv->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!sta) {
need_multiple = false;
rcu_read_unlock();
break;
}
ht_cap = &sta->deflink.ht_cap;
need_multiple = true;
if (ht_cap->mcs.rx_mask[1] == 0 &&
ht_cap->mcs.rx_mask[2] == 0) {
need_multiple = false;
} else if (!(ht_cap->mcs.tx_params &
IEEE80211_HT_MCS_TX_DEFINED)) {
need_multiple = false;
} else if (ht_cap->mcs.tx_params &
IEEE80211_HT_MCS_TX_RX_DIFF) {
int maxstreams;
maxstreams = (ht_cap->mcs.tx_params &
IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
maxstreams >>=
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
maxstreams += 1;
if (maxstreams <= 1)
need_multiple = false;
}
rcu_read_unlock();
break;
case NL80211_IFTYPE_ADHOC:
need_multiple = false;
break;
default:
need_multiple = true;
break;
}
ctx->ht_need_multiple_chains = need_multiple;
if (!need_multiple) {
for_each_context(priv, tmp) {
if (!tmp->vif)
continue;
if (tmp->ht_need_multiple_chains) {
need_multiple = true;
break;
}
}
}
ht_conf->single_chain_sufficient = !need_multiple;
}
static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
return;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
iwl_is_any_associated(priv)) {
struct iwl_calib_chain_noise_reset_cmd cmd;
data->chain_noise_a = 0;
data->chain_noise_b = 0;
data->chain_noise_c = 0;
data->chain_signal_a = 0;
data->chain_signal_b = 0;
data->chain_signal_c = 0;
data->beacon_count = 0;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_PHY_CALIBRATION_CMD,
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
data->state = IWL_CHAIN_NOISE_ACCUMULATE;
IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
}
}
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u64 changes)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
int ret;
bool force = false;
mutex_lock(&priv->mutex);
if (changes & BSS_CHANGED_IDLE && vif->cfg.idle) {
iwlagn_lift_passive_no_rx(priv);
}
if (unlikely(!iwl_is_ready(priv))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
mutex_unlock(&priv->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
mutex_unlock(&priv->mutex);
return;
}
if (changes & BSS_CHANGED_BEACON_INT)
force = true;
if (changes & BSS_CHANGED_QOS) {
ctx->qos_data.qos_active = bss_conf->qos;
iwlagn_update_qos(priv, ctx);
}
ctx->staging.assoc_id = cpu_to_le16(vif->cfg.aid);
if (vif->bss_conf.use_short_preamble)
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (ctx->ctxid == IWL_RXON_CTX_BSS)
priv->have_rekey_data = false;
}
iwlagn_bt_coex_rssi_monitor(priv);
}
if (ctx->ht.enabled) {
ctx->ht.protection = bss_conf->ht_operation_mode &
IEEE80211_HT_OP_MODE_PROTECTION;
ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
iwlagn_check_needed_chains(priv, ctx, bss_conf);
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
iwlagn_set_rxon_chain(priv, ctx);
if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
if (bss_conf->use_cts_prot)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
else
ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
if (vif->bss_conf.enable_beacon) {
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
priv->beacon_ctx = ctx;
} else {
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
priv->beacon_ctx = NULL;
}
}
if (vif->type == NL80211_IFTYPE_STATION) {
if (!vif->cfg.assoc)
ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
else
ctx->staging.filter_flags &=
~RXON_FILTER_BCON_AWARE_MSK;
}
if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
iwl_power_update_mode(priv, false);
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
}
if (changes & BSS_CHANGED_IBSS) {
ret = iwlagn_manage_ibss_station(priv, vif,
vif->cfg.ibss_joined);
if (ret)
IWL_ERR(priv, "failed to %s IBSS station %pM\n",
vif->cfg.ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
if (iwlagn_update_beacon(priv, vif))
IWL_ERR(priv, "Error updating beacon\n");
}
mutex_unlock(&priv->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
iwl_set_tx_power(priv, priv->tx_power_next, false);
for_each_context(priv, ctx)
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
iwlagn_set_pan_params(priv);
}