]> Pileus Git - ~andy/linux/blob - drivers/net/wireless/iwlwifi/iwl-core.c
iwlagn: leave notification waits on firmware errors
[~andy/linux] / drivers / net / wireless / iwlwifi / iwl-core.c
1 /******************************************************************************
2  *
3  * GPL LICENSE SUMMARY
4  *
5  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of version 2 of the GNU General Public License as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19  * USA
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *****************************************************************************/
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
35
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h" /* FIXME: remove */
38 #include "iwl-debug.h"
39 #include "iwl-core.h"
40 #include "iwl-io.h"
41 #include "iwl-power.h"
42 #include "iwl-sta.h"
43 #include "iwl-helpers.h"
44
45
46 /*
47  * set bt_coex_active to true, uCode will do kill/defer
48  * every time the priority line is asserted (BT is sending signals on the
49  * priority line in the PCIx).
50  * set bt_coex_active to false, uCode will ignore the BT activity and
51  * perform the normal operation
52  *
53  * User might experience transmit issue on some platform due to WiFi/BT
54  * co-exist problem. The possible behaviors are:
55  *   Able to scan and finding all the available AP
56  *   Not able to associate with any AP
57  * On those platforms, WiFi communication can be restored by set
58  * "bt_coex_active" module parameter to "false"
59  *
60  * default: bt_coex_active = true (BT_COEX_ENABLE)
61  */
62 bool bt_coex_active = true;
63 module_param(bt_coex_active, bool, S_IRUGO);
64 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
65
66 u32 iwl_debug_level;
67
68 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
69
70 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
71 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
72 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
73                               struct ieee80211_sta_ht_cap *ht_info,
74                               enum ieee80211_band band)
75 {
76         u16 max_bit_rate = 0;
77         u8 rx_chains_num = priv->hw_params.rx_chains_num;
78         u8 tx_chains_num = priv->hw_params.tx_chains_num;
79
80         ht_info->cap = 0;
81         memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
82
83         ht_info->ht_supported = true;
84
85         if (priv->cfg->ht_params &&
86             priv->cfg->ht_params->ht_greenfield_support)
87                 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
88         ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
89         max_bit_rate = MAX_BIT_RATE_20_MHZ;
90         if (priv->hw_params.ht40_channel & BIT(band)) {
91                 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
92                 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
93                 ht_info->mcs.rx_mask[4] = 0x01;
94                 max_bit_rate = MAX_BIT_RATE_40_MHZ;
95         }
96
97         if (priv->cfg->mod_params->amsdu_size_8K)
98                 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
99
100         ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
101         if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
102                 ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
103         ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
104         if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
105                 ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
106
107         ht_info->mcs.rx_mask[0] = 0xFF;
108         if (rx_chains_num >= 2)
109                 ht_info->mcs.rx_mask[1] = 0xFF;
110         if (rx_chains_num >= 3)
111                 ht_info->mcs.rx_mask[2] = 0xFF;
112
113         /* Highest supported Rx data rate */
114         max_bit_rate *= rx_chains_num;
115         WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
116         ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
117
118         /* Tx MCS capabilities */
119         ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
120         if (tx_chains_num != rx_chains_num) {
121                 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
122                 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
123                                 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
124         }
125 }
126
127 /**
128  * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
129  */
130 int iwlcore_init_geos(struct iwl_priv *priv)
131 {
132         struct iwl_channel_info *ch;
133         struct ieee80211_supported_band *sband;
134         struct ieee80211_channel *channels;
135         struct ieee80211_channel *geo_ch;
136         struct ieee80211_rate *rates;
137         int i = 0;
138
139         if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
140             priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
141                 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
142                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
143                 return 0;
144         }
145
146         channels = kzalloc(sizeof(struct ieee80211_channel) *
147                            priv->channel_count, GFP_KERNEL);
148         if (!channels)
149                 return -ENOMEM;
150
151         rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
152                         GFP_KERNEL);
153         if (!rates) {
154                 kfree(channels);
155                 return -ENOMEM;
156         }
157
158         /* 5.2GHz channels start after the 2.4GHz channels */
159         sband = &priv->bands[IEEE80211_BAND_5GHZ];
160         sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
161         /* just OFDM */
162         sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
163         sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
164
165         if (priv->cfg->sku & IWL_SKU_N)
166                 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
167                                          IEEE80211_BAND_5GHZ);
168
169         sband = &priv->bands[IEEE80211_BAND_2GHZ];
170         sband->channels = channels;
171         /* OFDM & CCK */
172         sband->bitrates = rates;
173         sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
174
175         if (priv->cfg->sku & IWL_SKU_N)
176                 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
177                                          IEEE80211_BAND_2GHZ);
178
179         priv->ieee_channels = channels;
180         priv->ieee_rates = rates;
181
182         for (i = 0;  i < priv->channel_count; i++) {
183                 ch = &priv->channel_info[i];
184
185                 /* FIXME: might be removed if scan is OK */
186                 if (!is_channel_valid(ch))
187                         continue;
188
189                 sband =  &priv->bands[ch->band];
190
191                 geo_ch = &sband->channels[sband->n_channels++];
192
193                 geo_ch->center_freq =
194                         ieee80211_channel_to_frequency(ch->channel, ch->band);
195                 geo_ch->max_power = ch->max_power_avg;
196                 geo_ch->max_antenna_gain = 0xff;
197                 geo_ch->hw_value = ch->channel;
198
199                 if (is_channel_valid(ch)) {
200                         if (!(ch->flags & EEPROM_CHANNEL_IBSS))
201                                 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
202
203                         if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
204                                 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
205
206                         if (ch->flags & EEPROM_CHANNEL_RADAR)
207                                 geo_ch->flags |= IEEE80211_CHAN_RADAR;
208
209                         geo_ch->flags |= ch->ht40_extension_channel;
210
211                         if (ch->max_power_avg > priv->tx_power_device_lmt)
212                                 priv->tx_power_device_lmt = ch->max_power_avg;
213                 } else {
214                         geo_ch->flags |= IEEE80211_CHAN_DISABLED;
215                 }
216
217                 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
218                                 ch->channel, geo_ch->center_freq,
219                                 is_channel_a_band(ch) ?  "5.2" : "2.4",
220                                 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
221                                 "restricted" : "valid",
222                                  geo_ch->flags);
223         }
224
225         if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
226              priv->cfg->sku & IWL_SKU_A) {
227                 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
228                         "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
229                            priv->pci_dev->device,
230                            priv->pci_dev->subsystem_device);
231                 priv->cfg->sku &= ~IWL_SKU_A;
232         }
233
234         IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
235                    priv->bands[IEEE80211_BAND_2GHZ].n_channels,
236                    priv->bands[IEEE80211_BAND_5GHZ].n_channels);
237
238         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
239
240         return 0;
241 }
242
243 /*
244  * iwlcore_free_geos - undo allocations in iwlcore_init_geos
245  */
246 void iwlcore_free_geos(struct iwl_priv *priv)
247 {
248         kfree(priv->ieee_channels);
249         kfree(priv->ieee_rates);
250         clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
251 }
252
253 static bool iwl_is_channel_extension(struct iwl_priv *priv,
254                                      enum ieee80211_band band,
255                                      u16 channel, u8 extension_chan_offset)
256 {
257         const struct iwl_channel_info *ch_info;
258
259         ch_info = iwl_get_channel_info(priv, band, channel);
260         if (!is_channel_valid(ch_info))
261                 return false;
262
263         if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
264                 return !(ch_info->ht40_extension_channel &
265                                         IEEE80211_CHAN_NO_HT40PLUS);
266         else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
267                 return !(ch_info->ht40_extension_channel &
268                                         IEEE80211_CHAN_NO_HT40MINUS);
269
270         return false;
271 }
272
273 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
274                             struct iwl_rxon_context *ctx,
275                             struct ieee80211_sta_ht_cap *ht_cap)
276 {
277         if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
278                 return false;
279
280         /*
281          * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
282          * the bit will not set if it is pure 40MHz case
283          */
284         if (ht_cap && !ht_cap->ht_supported)
285                 return false;
286
287 #ifdef CONFIG_IWLWIFI_DEBUGFS
288         if (priv->disable_ht40)
289                 return false;
290 #endif
291
292         return iwl_is_channel_extension(priv, priv->band,
293                         le16_to_cpu(ctx->staging.channel),
294                         ctx->ht.extension_chan_offset);
295 }
296
297 static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
298 {
299         u16 new_val;
300         u16 beacon_factor;
301
302         /*
303          * If mac80211 hasn't given us a beacon interval, program
304          * the default into the device (not checking this here
305          * would cause the adjustment below to return the maximum
306          * value, which may break PAN.)
307          */
308         if (!beacon_val)
309                 return DEFAULT_BEACON_INTERVAL;
310
311         /*
312          * If the beacon interval we obtained from the peer
313          * is too large, we'll have to wake up more often
314          * (and in IBSS case, we'll beacon too much)
315          *
316          * For example, if max_beacon_val is 4096, and the
317          * requested beacon interval is 7000, we'll have to
318          * use 3500 to be able to wake up on the beacons.
319          *
320          * This could badly influence beacon detection stats.
321          */
322
323         beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
324         new_val = beacon_val / beacon_factor;
325
326         if (!new_val)
327                 new_val = max_beacon_val;
328
329         return new_val;
330 }
331
332 int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
333 {
334         u64 tsf;
335         s32 interval_tm, rem;
336         struct ieee80211_conf *conf = NULL;
337         u16 beacon_int;
338         struct ieee80211_vif *vif = ctx->vif;
339
340         conf = ieee80211_get_hw_conf(priv->hw);
341
342         lockdep_assert_held(&priv->mutex);
343
344         memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
345
346         ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
347         ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
348
349         beacon_int = vif ? vif->bss_conf.beacon_int : 0;
350
351         /*
352          * TODO: For IBSS we need to get atim_window from mac80211,
353          *       for now just always use 0
354          */
355         ctx->timing.atim_window = 0;
356
357         if (ctx->ctxid == IWL_RXON_CTX_PAN &&
358             (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
359             iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
360             priv->contexts[IWL_RXON_CTX_BSS].vif &&
361             priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
362                 ctx->timing.beacon_interval =
363                         priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
364                 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
365         } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
366                    iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
367                    priv->contexts[IWL_RXON_CTX_PAN].vif &&
368                    priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
369                    (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
370                     !ctx->vif->bss_conf.beacon_int)) {
371                 ctx->timing.beacon_interval =
372                         priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
373                 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
374         } else {
375                 beacon_int = iwl_adjust_beacon_interval(beacon_int,
376                                 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
377                 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
378         }
379
380         tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
381         interval_tm = beacon_int * TIME_UNIT;
382         rem = do_div(tsf, interval_tm);
383         ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
384
385         ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
386
387         IWL_DEBUG_ASSOC(priv,
388                         "beacon interval %d beacon timer %d beacon tim %d\n",
389                         le16_to_cpu(ctx->timing.beacon_interval),
390                         le32_to_cpu(ctx->timing.beacon_init_val),
391                         le16_to_cpu(ctx->timing.atim_window));
392
393         return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
394                                 sizeof(ctx->timing), &ctx->timing);
395 }
396
397 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
398                            int hw_decrypt)
399 {
400         struct iwl_rxon_cmd *rxon = &ctx->staging;
401
402         if (hw_decrypt)
403                 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
404         else
405                 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
406
407 }
408
409 /* validate RXON structure is valid */
410 int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
411 {
412         struct iwl_rxon_cmd *rxon = &ctx->staging;
413         bool error = false;
414
415         if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
416                 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
417                         IWL_WARN(priv, "check 2.4G: wrong narrow\n");
418                         error = true;
419                 }
420                 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
421                         IWL_WARN(priv, "check 2.4G: wrong radar\n");
422                         error = true;
423                 }
424         } else {
425                 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
426                         IWL_WARN(priv, "check 5.2G: not short slot!\n");
427                         error = true;
428                 }
429                 if (rxon->flags & RXON_FLG_CCK_MSK) {
430                         IWL_WARN(priv, "check 5.2G: CCK!\n");
431                         error = true;
432                 }
433         }
434         if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
435                 IWL_WARN(priv, "mac/bssid mcast!\n");
436                 error = true;
437         }
438
439         /* make sure basic rates 6Mbps and 1Mbps are supported */
440         if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
441             (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
442                 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
443                 error = true;
444         }
445
446         if (le16_to_cpu(rxon->assoc_id) > 2007) {
447                 IWL_WARN(priv, "aid > 2007\n");
448                 error = true;
449         }
450
451         if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
452                         == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
453                 IWL_WARN(priv, "CCK and short slot\n");
454                 error = true;
455         }
456
457         if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
458                         == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
459                 IWL_WARN(priv, "CCK and auto detect");
460                 error = true;
461         }
462
463         if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
464                             RXON_FLG_TGG_PROTECT_MSK)) ==
465                             RXON_FLG_TGG_PROTECT_MSK) {
466                 IWL_WARN(priv, "TGg but no auto-detect\n");
467                 error = true;
468         }
469
470         if (error)
471                 IWL_WARN(priv, "Tuning to channel %d\n",
472                             le16_to_cpu(rxon->channel));
473
474         if (error) {
475                 IWL_ERR(priv, "Invalid RXON\n");
476                 return -EINVAL;
477         }
478         return 0;
479 }
480
481 /**
482  * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
483  * @priv: staging_rxon is compared to active_rxon
484  *
485  * If the RXON structure is changing enough to require a new tune,
486  * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
487  * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
488  */
489 int iwl_full_rxon_required(struct iwl_priv *priv,
490                            struct iwl_rxon_context *ctx)
491 {
492         const struct iwl_rxon_cmd *staging = &ctx->staging;
493         const struct iwl_rxon_cmd *active = &ctx->active;
494
495 #define CHK(cond)                                                       \
496         if ((cond)) {                                                   \
497                 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");   \
498                 return 1;                                               \
499         }
500
501 #define CHK_NEQ(c1, c2)                                         \
502         if ((c1) != (c2)) {                                     \
503                 IWL_DEBUG_INFO(priv, "need full RXON - "        \
504                                #c1 " != " #c2 " - %d != %d\n",  \
505                                (c1), (c2));                     \
506                 return 1;                                       \
507         }
508
509         /* These items are only settable from the full RXON command */
510         CHK(!iwl_is_associated_ctx(ctx));
511         CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
512         CHK(compare_ether_addr(staging->node_addr, active->node_addr));
513         CHK(compare_ether_addr(staging->wlap_bssid_addr,
514                                 active->wlap_bssid_addr));
515         CHK_NEQ(staging->dev_type, active->dev_type);
516         CHK_NEQ(staging->channel, active->channel);
517         CHK_NEQ(staging->air_propagation, active->air_propagation);
518         CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
519                 active->ofdm_ht_single_stream_basic_rates);
520         CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
521                 active->ofdm_ht_dual_stream_basic_rates);
522         CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
523                 active->ofdm_ht_triple_stream_basic_rates);
524         CHK_NEQ(staging->assoc_id, active->assoc_id);
525
526         /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
527          * be updated with the RXON_ASSOC command -- however only some
528          * flag transitions are allowed using RXON_ASSOC */
529
530         /* Check if we are not switching bands */
531         CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
532                 active->flags & RXON_FLG_BAND_24G_MSK);
533
534         /* Check if we are switching association toggle */
535         CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
536                 active->filter_flags & RXON_FILTER_ASSOC_MSK);
537
538 #undef CHK
539 #undef CHK_NEQ
540
541         return 0;
542 }
543
544 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
545                             struct iwl_rxon_context *ctx)
546 {
547         /*
548          * Assign the lowest rate -- should really get this from
549          * the beacon skb from mac80211.
550          */
551         if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
552                 return IWL_RATE_1M_PLCP;
553         else
554                 return IWL_RATE_6M_PLCP;
555 }
556
557 static void _iwl_set_rxon_ht(struct iwl_priv *priv,
558                              struct iwl_ht_config *ht_conf,
559                              struct iwl_rxon_context *ctx)
560 {
561         struct iwl_rxon_cmd *rxon = &ctx->staging;
562
563         if (!ctx->ht.enabled) {
564                 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
565                         RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
566                         RXON_FLG_HT40_PROT_MSK |
567                         RXON_FLG_HT_PROT_MSK);
568                 return;
569         }
570
571         /* FIXME: if the definition of ht.protection changed, the "translation"
572          * will be needed for rxon->flags
573          */
574         rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
575
576         /* Set up channel bandwidth:
577          * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
578         /* clear the HT channel mode before set the mode */
579         rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
580                          RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
581         if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
582                 /* pure ht40 */
583                 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
584                         rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
585                         /* Note: control channel is opposite of extension channel */
586                         switch (ctx->ht.extension_chan_offset) {
587                         case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
588                                 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
589                                 break;
590                         case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
591                                 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
592                                 break;
593                         }
594                 } else {
595                         /* Note: control channel is opposite of extension channel */
596                         switch (ctx->ht.extension_chan_offset) {
597                         case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
598                                 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
599                                 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
600                                 break;
601                         case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
602                                 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
603                                 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
604                                 break;
605                         case IEEE80211_HT_PARAM_CHA_SEC_NONE:
606                         default:
607                                 /* channel location only valid if in Mixed mode */
608                                 IWL_ERR(priv, "invalid extension channel offset\n");
609                                 break;
610                         }
611                 }
612         } else {
613                 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
614         }
615
616         if (priv->cfg->ops->hcmd->set_rxon_chain)
617                 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
618
619         IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
620                         "extension channel offset 0x%x\n",
621                         le32_to_cpu(rxon->flags), ctx->ht.protection,
622                         ctx->ht.extension_chan_offset);
623 }
624
625 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
626 {
627         struct iwl_rxon_context *ctx;
628
629         for_each_context(priv, ctx)
630                 _iwl_set_rxon_ht(priv, ht_conf, ctx);
631 }
632
633 /* Return valid, unused, channel for a passive scan to reset the RF */
634 u8 iwl_get_single_channel_number(struct iwl_priv *priv,
635                                  enum ieee80211_band band)
636 {
637         const struct iwl_channel_info *ch_info;
638         int i;
639         u8 channel = 0;
640         u8 min, max;
641         struct iwl_rxon_context *ctx;
642
643         if (band == IEEE80211_BAND_5GHZ) {
644                 min = 14;
645                 max = priv->channel_count;
646         } else {
647                 min = 0;
648                 max = 14;
649         }
650
651         for (i = min; i < max; i++) {
652                 bool busy = false;
653
654                 for_each_context(priv, ctx) {
655                         busy = priv->channel_info[i].channel ==
656                                 le16_to_cpu(ctx->staging.channel);
657                         if (busy)
658                                 break;
659                 }
660
661                 if (busy)
662                         continue;
663
664                 channel = priv->channel_info[i].channel;
665                 ch_info = iwl_get_channel_info(priv, band, channel);
666                 if (is_channel_valid(ch_info))
667                         break;
668         }
669
670         return channel;
671 }
672
673 /**
674  * iwl_set_rxon_channel - Set the band and channel values in staging RXON
675  * @ch: requested channel as a pointer to struct ieee80211_channel
676
677  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
678  * in the staging RXON flag structure based on the ch->band
679  */
680 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
681                          struct iwl_rxon_context *ctx)
682 {
683         enum ieee80211_band band = ch->band;
684         u16 channel = ch->hw_value;
685
686         if ((le16_to_cpu(ctx->staging.channel) == channel) &&
687             (priv->band == band))
688                 return 0;
689
690         ctx->staging.channel = cpu_to_le16(channel);
691         if (band == IEEE80211_BAND_5GHZ)
692                 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
693         else
694                 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
695
696         priv->band = band;
697
698         IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
699
700         return 0;
701 }
702
703 void iwl_set_flags_for_band(struct iwl_priv *priv,
704                             struct iwl_rxon_context *ctx,
705                             enum ieee80211_band band,
706                             struct ieee80211_vif *vif)
707 {
708         if (band == IEEE80211_BAND_5GHZ) {
709                 ctx->staging.flags &=
710                     ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
711                       | RXON_FLG_CCK_MSK);
712                 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
713         } else {
714                 /* Copied from iwl_post_associate() */
715                 if (vif && vif->bss_conf.use_short_slot)
716                         ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
717                 else
718                         ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
719
720                 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
721                 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
722                 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
723         }
724 }
725
726 /*
727  * initialize rxon structure with default values from eeprom
728  */
729 void iwl_connection_init_rx_config(struct iwl_priv *priv,
730                                    struct iwl_rxon_context *ctx)
731 {
732         const struct iwl_channel_info *ch_info;
733
734         memset(&ctx->staging, 0, sizeof(ctx->staging));
735
736         if (!ctx->vif) {
737                 ctx->staging.dev_type = ctx->unused_devtype;
738         } else switch (ctx->vif->type) {
739         case NL80211_IFTYPE_AP:
740                 ctx->staging.dev_type = ctx->ap_devtype;
741                 break;
742
743         case NL80211_IFTYPE_STATION:
744                 ctx->staging.dev_type = ctx->station_devtype;
745                 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
746                 break;
747
748         case NL80211_IFTYPE_ADHOC:
749                 ctx->staging.dev_type = ctx->ibss_devtype;
750                 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
751                 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
752                                                   RXON_FILTER_ACCEPT_GRP_MSK;
753                 break;
754
755         default:
756                 IWL_ERR(priv, "Unsupported interface type %d\n",
757                         ctx->vif->type);
758                 break;
759         }
760
761 #if 0
762         /* TODO:  Figure out when short_preamble would be set and cache from
763          * that */
764         if (!hw_to_local(priv->hw)->short_preamble)
765                 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
766         else
767                 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
768 #endif
769
770         ch_info = iwl_get_channel_info(priv, priv->band,
771                                        le16_to_cpu(ctx->active.channel));
772
773         if (!ch_info)
774                 ch_info = &priv->channel_info[0];
775
776         ctx->staging.channel = cpu_to_le16(ch_info->channel);
777         priv->band = ch_info->band;
778
779         iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
780
781         ctx->staging.ofdm_basic_rates =
782             (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
783         ctx->staging.cck_basic_rates =
784             (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
785
786         /* clear both MIX and PURE40 mode flag */
787         ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
788                                         RXON_FLG_CHANNEL_MODE_PURE_40);
789         if (ctx->vif)
790                 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
791
792         ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
793         ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
794         ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
795 }
796
797 void iwl_set_rate(struct iwl_priv *priv)
798 {
799         const struct ieee80211_supported_band *hw = NULL;
800         struct ieee80211_rate *rate;
801         struct iwl_rxon_context *ctx;
802         int i;
803
804         hw = iwl_get_hw_mode(priv, priv->band);
805         if (!hw) {
806                 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
807                 return;
808         }
809
810         priv->active_rate = 0;
811
812         for (i = 0; i < hw->n_bitrates; i++) {
813                 rate = &(hw->bitrates[i]);
814                 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
815                         priv->active_rate |= (1 << rate->hw_value);
816         }
817
818         IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
819
820         for_each_context(priv, ctx) {
821                 ctx->staging.cck_basic_rates =
822                     (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
823
824                 ctx->staging.ofdm_basic_rates =
825                    (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
826         }
827 }
828
829 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
830 {
831         /*
832          * MULTI-FIXME
833          * See iwl_mac_channel_switch.
834          */
835         struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
836
837         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
838                 return;
839
840         if (priv->switch_rxon.switch_in_progress) {
841                 ieee80211_chswitch_done(ctx->vif, is_success);
842                 mutex_lock(&priv->mutex);
843                 priv->switch_rxon.switch_in_progress = false;
844                 mutex_unlock(&priv->mutex);
845         }
846 }
847
848 #ifdef CONFIG_IWLWIFI_DEBUG
849 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
850                              struct iwl_rxon_context *ctx)
851 {
852         struct iwl_rxon_cmd *rxon = &ctx->staging;
853
854         IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
855         iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
856         IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
857         IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
858         IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
859                         le32_to_cpu(rxon->filter_flags));
860         IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
861         IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
862                         rxon->ofdm_basic_rates);
863         IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
864         IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
865         IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
866         IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
867 }
868 #endif
869
870 static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
871 {
872         unsigned long flags;
873         struct iwl_notification_wait *wait_entry;
874
875         spin_lock_irqsave(&priv->_agn.notif_wait_lock, flags);
876         list_for_each_entry(wait_entry, &priv->_agn.notif_waits, list)
877                 wait_entry->aborted = true;
878         spin_unlock_irqrestore(&priv->_agn.notif_wait_lock, flags);
879
880         wake_up_all(&priv->_agn.notif_waitq);
881 }
882
883 void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
884 {
885         unsigned int reload_msec;
886         unsigned long reload_jiffies;
887
888         /* Set the FW error flag -- cleared on iwl_down */
889         set_bit(STATUS_FW_ERROR, &priv->status);
890
891         /* Cancel currently queued command. */
892         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
893
894         iwlagn_abort_notification_waits(priv);
895
896         /* Keep the restart process from trying to send host
897          * commands by clearing the ready bit */
898         clear_bit(STATUS_READY, &priv->status);
899
900         wake_up_interruptible(&priv->wait_command_queue);
901
902         if (!ondemand) {
903                 /*
904                  * If firmware keep reloading, then it indicate something
905                  * serious wrong and firmware having problem to recover
906                  * from it. Instead of keep trying which will fill the syslog
907                  * and hang the system, let's just stop it
908                  */
909                 reload_jiffies = jiffies;
910                 reload_msec = jiffies_to_msecs((long) reload_jiffies -
911                                         (long) priv->reload_jiffies);
912                 priv->reload_jiffies = reload_jiffies;
913                 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
914                         priv->reload_count++;
915                         if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
916                                 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
917                                 return;
918                         }
919                 } else
920                         priv->reload_count = 0;
921         }
922
923         if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
924                 if (priv->cfg->mod_params->restart_fw) {
925                         IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
926                                   "Restarting adapter due to uCode error.\n");
927                         queue_work(priv->workqueue, &priv->restart);
928                 } else
929                         IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
930                                   "Detected FW error, but not restarting\n");
931         }
932 }
933
934 /**
935  * iwl_irq_handle_error - called for HW or SW error interrupt from card
936  */
937 void iwl_irq_handle_error(struct iwl_priv *priv)
938 {
939         /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
940         if (priv->cfg->internal_wimax_coex &&
941             (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
942                         APMS_CLK_VAL_MRB_FUNC_MODE) ||
943              (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
944                         APMG_PS_CTRL_VAL_RESET_REQ))) {
945                 /*
946                  * Keep the restart process from trying to send host
947                  * commands by clearing the ready bit.
948                  */
949                 clear_bit(STATUS_READY, &priv->status);
950                 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
951                 wake_up_interruptible(&priv->wait_command_queue);
952                 IWL_ERR(priv, "RF is used by WiMAX\n");
953                 return;
954         }
955
956         IWL_ERR(priv, "Loaded firmware version: %s\n",
957                 priv->hw->wiphy->fw_version);
958
959         iwl_dump_nic_error_log(priv);
960         iwl_dump_csr(priv);
961         iwl_dump_fh(priv, NULL, false);
962         iwl_dump_nic_event_log(priv, false, NULL, false);
963 #ifdef CONFIG_IWLWIFI_DEBUG
964         if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
965                 iwl_print_rx_config_cmd(priv,
966                                         &priv->contexts[IWL_RXON_CTX_BSS]);
967 #endif
968
969         iwlagn_fw_error(priv, false);
970 }
971
972 static int iwl_apm_stop_master(struct iwl_priv *priv)
973 {
974         int ret = 0;
975
976         /* stop device's busmaster DMA activity */
977         iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
978
979         ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
980                         CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
981         if (ret)
982                 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
983
984         IWL_DEBUG_INFO(priv, "stop master\n");
985
986         return ret;
987 }
988
989 void iwl_apm_stop(struct iwl_priv *priv)
990 {
991         IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
992
993         /* Stop device's DMA activity */
994         iwl_apm_stop_master(priv);
995
996         /* Reset the entire device */
997         iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
998
999         udelay(10);
1000
1001         /*
1002          * Clear "initialization complete" bit to move adapter from
1003          * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1004          */
1005         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1006 }
1007
1008
1009 /*
1010  * Start up NIC's basic functionality after it has been reset
1011  * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1012  * NOTE:  This does not load uCode nor start the embedded processor
1013  */
1014 int iwl_apm_init(struct iwl_priv *priv)
1015 {
1016         int ret = 0;
1017         u16 lctl;
1018
1019         IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1020
1021         /*
1022          * Use "set_bit" below rather than "write", to preserve any hardware
1023          * bits already set by default after reset.
1024          */
1025
1026         /* Disable L0S exit timer (platform NMI Work/Around) */
1027         iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1028                           CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1029
1030         /*
1031          * Disable L0s without affecting L1;
1032          *  don't wait for ICH L0s (ICH bug W/A)
1033          */
1034         iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1035                           CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1036
1037         /* Set FH wait threshold to maximum (HW error during stress W/A) */
1038         iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1039
1040         /*
1041          * Enable HAP INTA (interrupt from management bus) to
1042          * wake device's PCI Express link L1a -> L0s
1043          */
1044         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1045                                     CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1046
1047         /*
1048          * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1049          * Check if BIOS (or OS) enabled L1-ASPM on this device.
1050          * If so (likely), disable L0S, so device moves directly L0->L1;
1051          *    costs negligible amount of power savings.
1052          * If not (unlikely), enable L0S, so there is at least some
1053          *    power savings, even without L1.
1054          */
1055         lctl = iwl_pcie_link_ctl(priv);
1056         if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1057                                 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1058                 /* L1-ASPM enabled; disable(!) L0S  */
1059                 iwl_set_bit(priv, CSR_GIO_REG,
1060                                 CSR_GIO_REG_VAL_L0S_ENABLED);
1061                 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1062         } else {
1063                 /* L1-ASPM disabled; enable(!) L0S */
1064                 iwl_clear_bit(priv, CSR_GIO_REG,
1065                                 CSR_GIO_REG_VAL_L0S_ENABLED);
1066                 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1067         }
1068
1069         /* Configure analog phase-lock-loop before activating to D0A */
1070         if (priv->cfg->base_params->pll_cfg_val)
1071                 iwl_set_bit(priv, CSR_ANA_PLL_CFG,
1072                             priv->cfg->base_params->pll_cfg_val);
1073
1074         /*
1075          * Set "initialization complete" bit to move adapter from
1076          * D0U* --> D0A* (powered-up active) state.
1077          */
1078         iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1079
1080         /*
1081          * Wait for clock stabilization; once stabilized, access to
1082          * device-internal resources is supported, e.g. iwl_write_prph()
1083          * and accesses to uCode SRAM.
1084          */
1085         ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1086                         CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1087                         CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1088         if (ret < 0) {
1089                 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1090                 goto out;
1091         }
1092
1093         /*
1094          * Enable DMA clock and wait for it to stabilize.
1095          *
1096          * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1097          * do not disable clocks.  This preserves any hardware bits already
1098          * set by default in "CLK_CTRL_REG" after reset.
1099          */
1100         iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1101         udelay(20);
1102
1103         /* Disable L1-Active */
1104         iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1105                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1106
1107 out:
1108         return ret;
1109 }
1110
1111
1112 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1113 {
1114         int ret;
1115         s8 prev_tx_power;
1116         bool defer;
1117         struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1118
1119         lockdep_assert_held(&priv->mutex);
1120
1121         if (priv->tx_power_user_lmt == tx_power && !force)
1122                 return 0;
1123
1124         if (!priv->cfg->ops->lib->send_tx_power)
1125                 return -EOPNOTSUPP;
1126
1127         if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1128                 IWL_WARN(priv,
1129                          "Requested user TXPOWER %d below lower limit %d.\n",
1130                          tx_power,
1131                          IWLAGN_TX_POWER_TARGET_POWER_MIN);
1132                 return -EINVAL;
1133         }
1134
1135         if (tx_power > priv->tx_power_device_lmt) {
1136                 IWL_WARN(priv,
1137                         "Requested user TXPOWER %d above upper limit %d.\n",
1138                          tx_power, priv->tx_power_device_lmt);
1139                 return -EINVAL;
1140         }
1141
1142         if (!iwl_is_ready_rf(priv))
1143                 return -EIO;
1144
1145         /* scan complete and commit_rxon use tx_power_next value,
1146          * it always need to be updated for newest request */
1147         priv->tx_power_next = tx_power;
1148
1149         /* do not set tx power when scanning or channel changing */
1150         defer = test_bit(STATUS_SCANNING, &priv->status) ||
1151                 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1152         if (defer && !force) {
1153                 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1154                 return 0;
1155         }
1156
1157         prev_tx_power = priv->tx_power_user_lmt;
1158         priv->tx_power_user_lmt = tx_power;
1159
1160         ret = priv->cfg->ops->lib->send_tx_power(priv);
1161
1162         /* if fail to set tx_power, restore the orig. tx power */
1163         if (ret) {
1164                 priv->tx_power_user_lmt = prev_tx_power;
1165                 priv->tx_power_next = prev_tx_power;
1166         }
1167         return ret;
1168 }
1169
1170 void iwl_send_bt_config(struct iwl_priv *priv)
1171 {
1172         struct iwl_bt_cmd bt_cmd = {
1173                 .lead_time = BT_LEAD_TIME_DEF,
1174                 .max_kill = BT_MAX_KILL_DEF,
1175                 .kill_ack_mask = 0,
1176                 .kill_cts_mask = 0,
1177         };
1178
1179         if (!bt_coex_active)
1180                 bt_cmd.flags = BT_COEX_DISABLE;
1181         else
1182                 bt_cmd.flags = BT_COEX_ENABLE;
1183
1184         priv->bt_enable_flag = bt_cmd.flags;
1185         IWL_DEBUG_INFO(priv, "BT coex %s\n",
1186                 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1187
1188         if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1189                              sizeof(struct iwl_bt_cmd), &bt_cmd))
1190                 IWL_ERR(priv, "failed to send BT Coex Config\n");
1191 }
1192
1193 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1194 {
1195         struct iwl_statistics_cmd statistics_cmd = {
1196                 .configuration_flags =
1197                         clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1198         };
1199
1200         if (flags & CMD_ASYNC)
1201                 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1202                                                sizeof(struct iwl_statistics_cmd),
1203                                                &statistics_cmd, NULL);
1204         else
1205                 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1206                                         sizeof(struct iwl_statistics_cmd),
1207                                         &statistics_cmd);
1208 }
1209
1210 void iwl_clear_isr_stats(struct iwl_priv *priv)
1211 {
1212         memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1213 }
1214
1215 int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1216                            const struct ieee80211_tx_queue_params *params)
1217 {
1218         struct iwl_priv *priv = hw->priv;
1219         struct iwl_rxon_context *ctx;
1220         unsigned long flags;
1221         int q;
1222
1223         IWL_DEBUG_MAC80211(priv, "enter\n");
1224
1225         if (!iwl_is_ready_rf(priv)) {
1226                 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1227                 return -EIO;
1228         }
1229
1230         if (queue >= AC_NUM) {
1231                 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1232                 return 0;
1233         }
1234
1235         q = AC_NUM - 1 - queue;
1236
1237         spin_lock_irqsave(&priv->lock, flags);
1238
1239         /*
1240          * MULTI-FIXME
1241          * This may need to be done per interface in nl80211/cfg80211/mac80211.
1242          */
1243         for_each_context(priv, ctx) {
1244                 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1245                         cpu_to_le16(params->cw_min);
1246                 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1247                         cpu_to_le16(params->cw_max);
1248                 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1249                 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1250                                 cpu_to_le16((params->txop * 32));
1251
1252                 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1253         }
1254
1255         spin_unlock_irqrestore(&priv->lock, flags);
1256
1257         IWL_DEBUG_MAC80211(priv, "leave\n");
1258         return 0;
1259 }
1260
1261 int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1262 {
1263         struct iwl_priv *priv = hw->priv;
1264
1265         return priv->ibss_manager == IWL_IBSS_MANAGER;
1266 }
1267
1268 static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1269 {
1270         iwl_connection_init_rx_config(priv, ctx);
1271
1272         if (priv->cfg->ops->hcmd->set_rxon_chain)
1273                 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1274
1275         return iwlcore_commit_rxon(priv, ctx);
1276 }
1277
1278 static int iwl_setup_interface(struct iwl_priv *priv,
1279                                struct iwl_rxon_context *ctx)
1280 {
1281         struct ieee80211_vif *vif = ctx->vif;
1282         int err;
1283
1284         lockdep_assert_held(&priv->mutex);
1285
1286         /*
1287          * This variable will be correct only when there's just
1288          * a single context, but all code using it is for hardware
1289          * that supports only one context.
1290          */
1291         priv->iw_mode = vif->type;
1292
1293         ctx->is_active = true;
1294
1295         err = iwl_set_mode(priv, ctx);
1296         if (err) {
1297                 if (!ctx->always_active)
1298                         ctx->is_active = false;
1299                 return err;
1300         }
1301
1302         if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1303             vif->type == NL80211_IFTYPE_ADHOC) {
1304                 /*
1305                  * pretend to have high BT traffic as long as we
1306                  * are operating in IBSS mode, as this will cause
1307                  * the rate scaling etc. to behave as intended.
1308                  */
1309                 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1310         }
1311
1312         return 0;
1313 }
1314
1315 int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1316 {
1317         struct iwl_priv *priv = hw->priv;
1318         struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1319         struct iwl_rxon_context *tmp, *ctx = NULL;
1320         int err;
1321         enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1322
1323         IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1324                            viftype, vif->addr);
1325
1326         mutex_lock(&priv->mutex);
1327
1328         if (!iwl_is_ready_rf(priv)) {
1329                 IWL_WARN(priv, "Try to add interface when device not ready\n");
1330                 err = -EINVAL;
1331                 goto out;
1332         }
1333
1334         for_each_context(priv, tmp) {
1335                 u32 possible_modes =
1336                         tmp->interface_modes | tmp->exclusive_interface_modes;
1337
1338                 if (tmp->vif) {
1339                         /* check if this busy context is exclusive */
1340                         if (tmp->exclusive_interface_modes &
1341                                                 BIT(tmp->vif->type)) {
1342                                 err = -EINVAL;
1343                                 goto out;
1344                         }
1345                         continue;
1346                 }
1347
1348                 if (!(possible_modes & BIT(viftype)))
1349                         continue;
1350
1351                 /* have maybe usable context w/o interface */
1352                 ctx = tmp;
1353                 break;
1354         }
1355
1356         if (!ctx) {
1357                 err = -EOPNOTSUPP;
1358                 goto out;
1359         }
1360
1361         vif_priv->ctx = ctx;
1362         ctx->vif = vif;
1363
1364         err = iwl_setup_interface(priv, ctx);
1365         if (!err)
1366                 goto out;
1367
1368         ctx->vif = NULL;
1369         priv->iw_mode = NL80211_IFTYPE_STATION;
1370  out:
1371         mutex_unlock(&priv->mutex);
1372
1373         IWL_DEBUG_MAC80211(priv, "leave\n");
1374         return err;
1375 }
1376
1377 static void iwl_teardown_interface(struct iwl_priv *priv,
1378                                    struct ieee80211_vif *vif,
1379                                    bool mode_change)
1380 {
1381         struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1382
1383         lockdep_assert_held(&priv->mutex);
1384
1385         if (priv->scan_vif == vif) {
1386                 iwl_scan_cancel_timeout(priv, 200);
1387                 iwl_force_scan_end(priv);
1388         }
1389
1390         if (!mode_change) {
1391                 iwl_set_mode(priv, ctx);
1392                 if (!ctx->always_active)
1393                         ctx->is_active = false;
1394         }
1395
1396         /*
1397          * When removing the IBSS interface, overwrite the
1398          * BT traffic load with the stored one from the last
1399          * notification, if any. If this is a device that
1400          * doesn't implement this, this has no effect since
1401          * both values are the same and zero.
1402          */
1403         if (vif->type == NL80211_IFTYPE_ADHOC)
1404                 priv->bt_traffic_load = priv->last_bt_traffic_load;
1405 }
1406
1407 void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1408                               struct ieee80211_vif *vif)
1409 {
1410         struct iwl_priv *priv = hw->priv;
1411         struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1412
1413         IWL_DEBUG_MAC80211(priv, "enter\n");
1414
1415         mutex_lock(&priv->mutex);
1416
1417         WARN_ON(ctx->vif != vif);
1418         ctx->vif = NULL;
1419
1420         iwl_teardown_interface(priv, vif, false);
1421
1422         mutex_unlock(&priv->mutex);
1423
1424         IWL_DEBUG_MAC80211(priv, "leave\n");
1425
1426 }
1427
1428 int iwl_alloc_txq_mem(struct iwl_priv *priv)
1429 {
1430         if (!priv->txq)
1431                 priv->txq = kzalloc(
1432                         sizeof(struct iwl_tx_queue) *
1433                                 priv->cfg->base_params->num_of_queues,
1434                         GFP_KERNEL);
1435         if (!priv->txq) {
1436                 IWL_ERR(priv, "Not enough memory for txq\n");
1437                 return -ENOMEM;
1438         }
1439         return 0;
1440 }
1441
1442 void iwl_free_txq_mem(struct iwl_priv *priv)
1443 {
1444         kfree(priv->txq);
1445         priv->txq = NULL;
1446 }
1447
1448 #ifdef CONFIG_IWLWIFI_DEBUGFS
1449
1450 #define IWL_TRAFFIC_DUMP_SIZE   (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1451
1452 void iwl_reset_traffic_log(struct iwl_priv *priv)
1453 {
1454         priv->tx_traffic_idx = 0;
1455         priv->rx_traffic_idx = 0;
1456         if (priv->tx_traffic)
1457                 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1458         if (priv->rx_traffic)
1459                 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1460 }
1461
1462 int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1463 {
1464         u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1465
1466         if (iwl_debug_level & IWL_DL_TX) {
1467                 if (!priv->tx_traffic) {
1468                         priv->tx_traffic =
1469                                 kzalloc(traffic_size, GFP_KERNEL);
1470                         if (!priv->tx_traffic)
1471                                 return -ENOMEM;
1472                 }
1473         }
1474         if (iwl_debug_level & IWL_DL_RX) {
1475                 if (!priv->rx_traffic) {
1476                         priv->rx_traffic =
1477                                 kzalloc(traffic_size, GFP_KERNEL);
1478                         if (!priv->rx_traffic)
1479                                 return -ENOMEM;
1480                 }
1481         }
1482         iwl_reset_traffic_log(priv);
1483         return 0;
1484 }
1485
1486 void iwl_free_traffic_mem(struct iwl_priv *priv)
1487 {
1488         kfree(priv->tx_traffic);
1489         priv->tx_traffic = NULL;
1490
1491         kfree(priv->rx_traffic);
1492         priv->rx_traffic = NULL;
1493 }
1494
1495 void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1496                       u16 length, struct ieee80211_hdr *header)
1497 {
1498         __le16 fc;
1499         u16 len;
1500
1501         if (likely(!(iwl_debug_level & IWL_DL_TX)))
1502                 return;
1503
1504         if (!priv->tx_traffic)
1505                 return;
1506
1507         fc = header->frame_control;
1508         if (ieee80211_is_data(fc)) {
1509                 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1510                        ? IWL_TRAFFIC_ENTRY_SIZE : length;
1511                 memcpy((priv->tx_traffic +
1512                        (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1513                        header, len);
1514                 priv->tx_traffic_idx =
1515                         (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1516         }
1517 }
1518
1519 void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1520                       u16 length, struct ieee80211_hdr *header)
1521 {
1522         __le16 fc;
1523         u16 len;
1524
1525         if (likely(!(iwl_debug_level & IWL_DL_RX)))
1526                 return;
1527
1528         if (!priv->rx_traffic)
1529                 return;
1530
1531         fc = header->frame_control;
1532         if (ieee80211_is_data(fc)) {
1533                 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1534                        ? IWL_TRAFFIC_ENTRY_SIZE : length;
1535                 memcpy((priv->rx_traffic +
1536                        (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1537                        header, len);
1538                 priv->rx_traffic_idx =
1539                         (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1540         }
1541 }
1542
1543 const char *get_mgmt_string(int cmd)
1544 {
1545         switch (cmd) {
1546                 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1547                 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1548                 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1549                 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1550                 IWL_CMD(MANAGEMENT_PROBE_REQ);
1551                 IWL_CMD(MANAGEMENT_PROBE_RESP);
1552                 IWL_CMD(MANAGEMENT_BEACON);
1553                 IWL_CMD(MANAGEMENT_ATIM);
1554                 IWL_CMD(MANAGEMENT_DISASSOC);
1555                 IWL_CMD(MANAGEMENT_AUTH);
1556                 IWL_CMD(MANAGEMENT_DEAUTH);
1557                 IWL_CMD(MANAGEMENT_ACTION);
1558         default:
1559                 return "UNKNOWN";
1560
1561         }
1562 }
1563
1564 const char *get_ctrl_string(int cmd)
1565 {
1566         switch (cmd) {
1567                 IWL_CMD(CONTROL_BACK_REQ);
1568                 IWL_CMD(CONTROL_BACK);
1569                 IWL_CMD(CONTROL_PSPOLL);
1570                 IWL_CMD(CONTROL_RTS);
1571                 IWL_CMD(CONTROL_CTS);
1572                 IWL_CMD(CONTROL_ACK);
1573                 IWL_CMD(CONTROL_CFEND);
1574                 IWL_CMD(CONTROL_CFENDACK);
1575         default:
1576                 return "UNKNOWN";
1577
1578         }
1579 }
1580
1581 void iwl_clear_traffic_stats(struct iwl_priv *priv)
1582 {
1583         memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1584         memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1585 }
1586
1587 /*
1588  * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1589  * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1590  * Use debugFs to display the rx/rx_statistics
1591  * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1592  * information will be recorded, but DATA pkt still will be recorded
1593  * for the reason of iwl_led.c need to control the led blinking based on
1594  * number of tx and rx data.
1595  *
1596  */
1597 void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1598 {
1599         struct traffic_stats    *stats;
1600
1601         if (is_tx)
1602                 stats = &priv->tx_stats;
1603         else
1604                 stats = &priv->rx_stats;
1605
1606         if (ieee80211_is_mgmt(fc)) {
1607                 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1608                 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1609                         stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1610                         break;
1611                 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1612                         stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1613                         break;
1614                 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1615                         stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1616                         break;
1617                 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1618                         stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1619                         break;
1620                 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1621                         stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1622                         break;
1623                 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1624                         stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1625                         break;
1626                 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1627                         stats->mgmt[MANAGEMENT_BEACON]++;
1628                         break;
1629                 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1630                         stats->mgmt[MANAGEMENT_ATIM]++;
1631                         break;
1632                 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1633                         stats->mgmt[MANAGEMENT_DISASSOC]++;
1634                         break;
1635                 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1636                         stats->mgmt[MANAGEMENT_AUTH]++;
1637                         break;
1638                 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1639                         stats->mgmt[MANAGEMENT_DEAUTH]++;
1640                         break;
1641                 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1642                         stats->mgmt[MANAGEMENT_ACTION]++;
1643                         break;
1644                 }
1645         } else if (ieee80211_is_ctl(fc)) {
1646                 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1647                 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1648                         stats->ctrl[CONTROL_BACK_REQ]++;
1649                         break;
1650                 case cpu_to_le16(IEEE80211_STYPE_BACK):
1651                         stats->ctrl[CONTROL_BACK]++;
1652                         break;
1653                 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1654                         stats->ctrl[CONTROL_PSPOLL]++;
1655                         break;
1656                 case cpu_to_le16(IEEE80211_STYPE_RTS):
1657                         stats->ctrl[CONTROL_RTS]++;
1658                         break;
1659                 case cpu_to_le16(IEEE80211_STYPE_CTS):
1660                         stats->ctrl[CONTROL_CTS]++;
1661                         break;
1662                 case cpu_to_le16(IEEE80211_STYPE_ACK):
1663                         stats->ctrl[CONTROL_ACK]++;
1664                         break;
1665                 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1666                         stats->ctrl[CONTROL_CFEND]++;
1667                         break;
1668                 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1669                         stats->ctrl[CONTROL_CFENDACK]++;
1670                         break;
1671                 }
1672         } else {
1673                 /* data */
1674                 stats->data_cnt++;
1675                 stats->data_bytes += len;
1676         }
1677 }
1678 #endif
1679
1680 static void iwl_force_rf_reset(struct iwl_priv *priv)
1681 {
1682         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1683                 return;
1684
1685         if (!iwl_is_any_associated(priv)) {
1686                 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1687                 return;
1688         }
1689         /*
1690          * There is no easy and better way to force reset the radio,
1691          * the only known method is switching channel which will force to
1692          * reset and tune the radio.
1693          * Use internal short scan (single channel) operation to should
1694          * achieve this objective.
1695          * Driver should reset the radio when number of consecutive missed
1696          * beacon, or any other uCode error condition detected.
1697          */
1698         IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1699         iwl_internal_short_hw_scan(priv);
1700 }
1701
1702
1703 int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1704 {
1705         struct iwl_force_reset *force_reset;
1706
1707         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1708                 return -EINVAL;
1709
1710         if (mode >= IWL_MAX_FORCE_RESET) {
1711                 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1712                 return -EINVAL;
1713         }
1714         force_reset = &priv->force_reset[mode];
1715         force_reset->reset_request_count++;
1716         if (!external) {
1717                 if (force_reset->last_force_reset_jiffies &&
1718                     time_after(force_reset->last_force_reset_jiffies +
1719                     force_reset->reset_duration, jiffies)) {
1720                         IWL_DEBUG_INFO(priv, "force reset rejected\n");
1721                         force_reset->reset_reject_count++;
1722                         return -EAGAIN;
1723                 }
1724         }
1725         force_reset->reset_success_count++;
1726         force_reset->last_force_reset_jiffies = jiffies;
1727         IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1728         switch (mode) {
1729         case IWL_RF_RESET:
1730                 iwl_force_rf_reset(priv);
1731                 break;
1732         case IWL_FW_RESET:
1733                 /*
1734                  * if the request is from external(ex: debugfs),
1735                  * then always perform the request in regardless the module
1736                  * parameter setting
1737                  * if the request is from internal (uCode error or driver
1738                  * detect failure), then fw_restart module parameter
1739                  * need to be check before performing firmware reload
1740                  */
1741                 if (!external && !priv->cfg->mod_params->restart_fw) {
1742                         IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1743                                        "module parameter setting\n");
1744                         break;
1745                 }
1746                 IWL_ERR(priv, "On demand firmware reload\n");
1747                 iwlagn_fw_error(priv, true);
1748                 break;
1749         }
1750         return 0;
1751 }
1752
1753 int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1754                              enum nl80211_iftype newtype, bool newp2p)
1755 {
1756         struct iwl_priv *priv = hw->priv;
1757         struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1758         struct iwl_rxon_context *tmp;
1759         u32 interface_modes;
1760         int err;
1761
1762         newtype = ieee80211_iftype_p2p(newtype, newp2p);
1763
1764         mutex_lock(&priv->mutex);
1765
1766         if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1767                 /*
1768                  * Huh? But wait ... this can maybe happen when
1769                  * we're in the middle of a firmware restart!
1770                  */
1771                 err = -EBUSY;
1772                 goto out;
1773         }
1774
1775         interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1776
1777         if (!(interface_modes & BIT(newtype))) {
1778                 err = -EBUSY;
1779                 goto out;
1780         }
1781
1782         if (ctx->exclusive_interface_modes & BIT(newtype)) {
1783                 for_each_context(priv, tmp) {
1784                         if (ctx == tmp)
1785                                 continue;
1786
1787                         if (!tmp->vif)
1788                                 continue;
1789
1790                         /*
1791                          * The current mode switch would be exclusive, but
1792                          * another context is active ... refuse the switch.
1793                          */
1794                         err = -EBUSY;
1795                         goto out;
1796                 }
1797         }
1798
1799         /* success */
1800         iwl_teardown_interface(priv, vif, true);
1801         vif->type = newtype;
1802         vif->p2p = newp2p;
1803         err = iwl_setup_interface(priv, ctx);
1804         WARN_ON(err);
1805         /*
1806          * We've switched internally, but submitting to the
1807          * device may have failed for some reason. Mask this
1808          * error, because otherwise mac80211 will not switch
1809          * (and set the interface type back) and we'll be
1810          * out of sync with it.
1811          */
1812         err = 0;
1813
1814  out:
1815         mutex_unlock(&priv->mutex);
1816         return err;
1817 }
1818
1819 /*
1820  * On every watchdog tick we check (latest) time stamp. If it does not
1821  * change during timeout period and queue is not empty we reset firmware.
1822  */
1823 static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
1824 {
1825         struct iwl_tx_queue *txq = &priv->txq[cnt];
1826         struct iwl_queue *q = &txq->q;
1827         unsigned long timeout;
1828         int ret;
1829
1830         if (q->read_ptr == q->write_ptr) {
1831                 txq->time_stamp = jiffies;
1832                 return 0;
1833         }
1834
1835         timeout = txq->time_stamp +
1836                   msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1837
1838         if (time_after(jiffies, timeout)) {
1839                 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1840                                 q->id, priv->cfg->base_params->wd_timeout);
1841                 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1842                 return (ret == -EAGAIN) ? 0 : 1;
1843         }
1844
1845         return 0;
1846 }
1847
1848 /*
1849  * Making watchdog tick be a quarter of timeout assure we will
1850  * discover the queue hung between timeout and 1.25*timeout
1851  */
1852 #define IWL_WD_TICK(timeout) ((timeout) / 4)
1853
1854 /*
1855  * Watchdog timer callback, we check each tx queue for stuck, if if hung
1856  * we reset the firmware. If everything is fine just rearm the timer.
1857  */
1858 void iwl_bg_watchdog(unsigned long data)
1859 {
1860         struct iwl_priv *priv = (struct iwl_priv *)data;
1861         int cnt;
1862         unsigned long timeout;
1863
1864         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1865                 return;
1866
1867         timeout = priv->cfg->base_params->wd_timeout;
1868         if (timeout == 0)
1869                 return;
1870
1871         /* monitor and check for stuck cmd queue */
1872         if (iwl_check_stuck_queue(priv, priv->cmd_queue))
1873                 return;
1874
1875         /* monitor and check for other stuck queues */
1876         if (iwl_is_any_associated(priv)) {
1877                 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1878                         /* skip as we already checked the command queue */
1879                         if (cnt == priv->cmd_queue)
1880                                 continue;
1881                         if (iwl_check_stuck_queue(priv, cnt))
1882                                 return;
1883                 }
1884         }
1885
1886         mod_timer(&priv->watchdog, jiffies +
1887                   msecs_to_jiffies(IWL_WD_TICK(timeout)));
1888 }
1889
1890 void iwl_setup_watchdog(struct iwl_priv *priv)
1891 {
1892         unsigned int timeout = priv->cfg->base_params->wd_timeout;
1893
1894         if (timeout)
1895                 mod_timer(&priv->watchdog,
1896                           jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1897         else
1898                 del_timer(&priv->watchdog);
1899 }
1900
1901 /*
1902  * extended beacon time format
1903  * time in usec will be changed into a 32-bit value in extended:internal format
1904  * the extended part is the beacon counts
1905  * the internal part is the time in usec within one beacon interval
1906  */
1907 u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
1908 {
1909         u32 quot;
1910         u32 rem;
1911         u32 interval = beacon_interval * TIME_UNIT;
1912
1913         if (!interval || !usec)
1914                 return 0;
1915
1916         quot = (usec / interval) &
1917                 (iwl_beacon_time_mask_high(priv,
1918                 priv->hw_params.beacon_time_tsf_bits) >>
1919                 priv->hw_params.beacon_time_tsf_bits);
1920         rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
1921                                    priv->hw_params.beacon_time_tsf_bits);
1922
1923         return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1924 }
1925
1926 /* base is usually what we get from ucode with each received frame,
1927  * the same as HW timer counter counting down
1928  */
1929 __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
1930                            u32 addon, u32 beacon_interval)
1931 {
1932         u32 base_low = base & iwl_beacon_time_mask_low(priv,
1933                                         priv->hw_params.beacon_time_tsf_bits);
1934         u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
1935                                         priv->hw_params.beacon_time_tsf_bits);
1936         u32 interval = beacon_interval * TIME_UNIT;
1937         u32 res = (base & iwl_beacon_time_mask_high(priv,
1938                                 priv->hw_params.beacon_time_tsf_bits)) +
1939                                 (addon & iwl_beacon_time_mask_high(priv,
1940                                 priv->hw_params.beacon_time_tsf_bits));
1941
1942         if (base_low > addon_low)
1943                 res += base_low - addon_low;
1944         else if (base_low < addon_low) {
1945                 res += interval + base_low - addon_low;
1946                 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1947         } else
1948                 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1949
1950         return cpu_to_le32(res);
1951 }
1952
1953 #ifdef CONFIG_PM
1954
1955 int iwl_pci_suspend(struct device *device)
1956 {
1957         struct pci_dev *pdev = to_pci_dev(device);
1958         struct iwl_priv *priv = pci_get_drvdata(pdev);
1959
1960         /*
1961          * This function is called when system goes into suspend state
1962          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1963          * first but since iwl_mac_stop() has no knowledge of who the caller is,
1964          * it will not call apm_ops.stop() to stop the DMA operation.
1965          * Calling apm_ops.stop here to make sure we stop the DMA.
1966          */
1967         iwl_apm_stop(priv);
1968
1969         return 0;
1970 }
1971
1972 int iwl_pci_resume(struct device *device)
1973 {
1974         struct pci_dev *pdev = to_pci_dev(device);
1975         struct iwl_priv *priv = pci_get_drvdata(pdev);
1976         bool hw_rfkill = false;
1977
1978         /*
1979          * We disable the RETRY_TIMEOUT register (0x41) to keep
1980          * PCI Tx retries from interfering with C3 CPU state.
1981          */
1982         pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1983
1984         iwl_enable_interrupts(priv);
1985
1986         if (!(iwl_read32(priv, CSR_GP_CNTRL) &
1987                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1988                 hw_rfkill = true;
1989
1990         if (hw_rfkill)
1991                 set_bit(STATUS_RF_KILL_HW, &priv->status);
1992         else
1993                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1994
1995         wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
1996
1997         return 0;
1998 }
1999
2000 const struct dev_pm_ops iwl_pm_ops = {
2001         .suspend = iwl_pci_suspend,
2002         .resume = iwl_pci_resume,
2003         .freeze = iwl_pci_suspend,
2004         .thaw = iwl_pci_resume,
2005         .poweroff = iwl_pci_suspend,
2006         .restore = iwl_pci_resume,
2007 };
2008
2009 #endif /* CONFIG_PM */