3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 static struct conf_drv_settings default_conf = {
59 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
60 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
61 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
62 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
63 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
64 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
65 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
66 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
67 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
68 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
69 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
70 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
71 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
72 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
73 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
74 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
75 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
76 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
77 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
78 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
79 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
80 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
81 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
82 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
83 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
84 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
85 /* active scan params */
86 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
87 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
88 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
89 /* passive scan params */
90 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
91 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
92 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
93 /* passive scan in dual antenna params */
94 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
95 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
96 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
98 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
99 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
100 [CONF_SG_BEACON_MISS_PERCENT] = 60,
101 [CONF_SG_DHCP_TIME] = 5000,
102 [CONF_SG_RXT] = 1200,
103 [CONF_SG_TXT] = 1000,
104 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
105 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
106 [CONF_SG_HV3_MAX_SERVED] = 6,
107 [CONF_SG_PS_POLL_TIMEOUT] = 10,
108 [CONF_SG_UPSD_TIMEOUT] = 10,
109 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
110 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
111 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
113 [CONF_AP_BEACON_MISS_TX] = 3,
114 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
115 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
116 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
117 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
118 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
119 /* CTS Diluting params */
120 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
121 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
123 .state = CONF_SG_PROTECTIVE,
126 .rx_msdu_life_time = 512000,
127 .packet_detection_threshold = 0,
128 .ps_poll_timeout = 15,
130 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
131 .rx_cca_threshold = 0,
132 .irq_blk_threshold = 0xFFFF,
133 .irq_pkt_threshold = 0,
135 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
138 .tx_energy_detection = 0,
141 .short_retry_limit = 10,
142 .long_retry_limit = 10,
165 .aifsn = CONF_TX_AIFS_PIFS,
172 .aifsn = CONF_TX_AIFS_PIFS,
176 .max_tx_retries = 100,
177 .ap_aging_period = 300,
181 .queue_id = CONF_TX_AC_BE,
182 .channel_type = CONF_CHANNEL_TYPE_EDCF,
183 .tsid = CONF_TX_AC_BE,
184 .ps_scheme = CONF_PS_SCHEME_LEGACY,
185 .ack_policy = CONF_ACK_POLICY_LEGACY,
189 .queue_id = CONF_TX_AC_BK,
190 .channel_type = CONF_CHANNEL_TYPE_EDCF,
191 .tsid = CONF_TX_AC_BK,
192 .ps_scheme = CONF_PS_SCHEME_LEGACY,
193 .ack_policy = CONF_ACK_POLICY_LEGACY,
197 .queue_id = CONF_TX_AC_VI,
198 .channel_type = CONF_CHANNEL_TYPE_EDCF,
199 .tsid = CONF_TX_AC_VI,
200 .ps_scheme = CONF_PS_SCHEME_LEGACY,
201 .ack_policy = CONF_ACK_POLICY_LEGACY,
205 .queue_id = CONF_TX_AC_VO,
206 .channel_type = CONF_CHANNEL_TYPE_EDCF,
207 .tsid = CONF_TX_AC_VO,
208 .ps_scheme = CONF_PS_SCHEME_LEGACY,
209 .ack_policy = CONF_ACK_POLICY_LEGACY,
213 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
214 .tx_compl_timeout = 700,
215 .tx_compl_threshold = 4,
216 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
217 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
218 .tmpl_short_retry_limit = 10,
219 .tmpl_long_retry_limit = 10,
220 .tx_watchdog_timeout = 5000,
223 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
224 .listen_interval = 1,
225 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
226 .suspend_listen_interval = 3,
227 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
228 .bcn_filt_ie_count = 2,
231 .ie = WLAN_EID_CHANNEL_SWITCH,
232 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
235 .ie = WLAN_EID_HT_INFORMATION,
236 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
239 .synch_fail_thold = 10,
240 .bss_lose_timeout = 100,
241 .beacon_rx_timeout = 10000,
242 .broadcast_timeout = 20000,
243 .rx_broadcast_in_ps = 1,
244 .ps_poll_threshold = 10,
245 .bet_enable = CONF_BET_MODE_ENABLE,
246 .bet_max_consecutive = 50,
247 .psm_entry_retries = 8,
248 .psm_exit_retries = 16,
249 .psm_entry_nullfunc_retries = 3,
250 .dynamic_ps_timeout = 200,
252 .keep_alive_interval = 55000,
253 .max_listen_interval = 20,
260 .host_clk_settling_time = 5000,
261 .host_fast_wakeup_support = false
265 .avg_weight_rssi_beacon = 20,
266 .avg_weight_rssi_data = 10,
267 .avg_weight_snr_beacon = 20,
268 .avg_weight_snr_data = 10,
271 .min_dwell_time_active = 7500,
272 .max_dwell_time_active = 30000,
273 .min_dwell_time_passive = 100000,
274 .max_dwell_time_passive = 100000,
276 .split_scan_timeout = 50000,
279 /* sched_scan requires dwell times in TU instead of TU/1000 */
280 .min_dwell_time_active = 30,
281 .max_dwell_time_active = 60,
282 .dwell_time_passive = 100,
283 .dwell_time_dfs = 150,
285 .rssi_threshold = -90,
289 .tx_per_channel_power_compensation_2 = {
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
292 .tx_per_channel_power_compensation_5 = {
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300 .tx_ba_win_size = 64,
301 .inactivity_timeout = 10000,
302 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
308 .tx_min_block_num = 40,
310 .min_req_tx_blocks = 100,
311 .min_req_rx_blocks = 22,
318 .tx_min_block_num = 40,
320 .min_req_tx_blocks = 45,
321 .min_req_rx_blocks = 22,
327 .n_divider_fref_set_1 = 0xff, /* default */
328 .n_divider_fref_set_2 = 12,
329 .m_divider_fref_set_1 = 148,
330 .m_divider_fref_set_2 = 0xffff, /* default */
331 .coex_pll_stabilization_time = 0xffffffff, /* default */
332 .ldo_stabilization_time = 0xffff, /* default */
333 .fm_disturbed_band_margin = 0xff, /* default */
334 .swallow_clk_diff = 0xff, /* default */
343 .mode = WL12XX_FWLOG_ON_DEMAND,
346 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
347 .output = WL12XX_FWLOG_OUTPUT_HOST,
350 .hci_io_ds = HCI_IO_DS_6MA,
352 .rate_retry_score = 32000,
357 .inverse_curiosity_factor = 5,
359 .tx_fail_high_th = 10,
360 .per_alpha_shift = 4,
362 .per_beta1_shift = 10,
363 .per_beta2_shift = 8,
365 .rate_check_down = 12,
366 .rate_retry_policy = {
367 0x00, 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00,
374 .hangover_period = 20,
376 .early_termination_mode = 1,
387 static char *fwlog_param;
388 static bool bug_on_recovery;
390 static void __wl1271_op_remove_interface(struct wl1271 *wl,
391 struct ieee80211_vif *vif,
392 bool reset_tx_queues);
393 static void wl1271_op_stop(struct ieee80211_hw *hw);
394 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
396 static int wl12xx_set_authorized(struct wl1271 *wl,
397 struct wl12xx_vif *wlvif)
401 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
404 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
407 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
410 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
414 wl12xx_croc(wl, wlvif->role_id);
416 wl1271_info("Association completed.");
420 static int wl1271_reg_notify(struct wiphy *wiphy,
421 struct regulatory_request *request)
423 struct ieee80211_supported_band *band;
424 struct ieee80211_channel *ch;
427 band = wiphy->bands[IEEE80211_BAND_5GHZ];
428 for (i = 0; i < band->n_channels; i++) {
429 ch = &band->channels[i];
430 if (ch->flags & IEEE80211_CHAN_DISABLED)
433 if (ch->flags & IEEE80211_CHAN_RADAR)
434 ch->flags |= IEEE80211_CHAN_NO_IBSS |
435 IEEE80211_CHAN_PASSIVE_SCAN;
442 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
447 /* we should hold wl->mutex */
448 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
453 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
455 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
461 * this function is being called when the rx_streaming interval
462 * has beed changed or rx_streaming should be disabled
464 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
467 int period = wl->conf.rx_streaming.interval;
469 /* don't reconfigure if rx_streaming is disabled */
470 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
473 /* reconfigure/disable according to new streaming_period */
475 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
476 (wl->conf.rx_streaming.always ||
477 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
478 ret = wl1271_set_rx_streaming(wl, wlvif, true);
480 ret = wl1271_set_rx_streaming(wl, wlvif, false);
481 /* don't cancel_work_sync since we might deadlock */
482 del_timer_sync(&wlvif->rx_streaming_timer);
488 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
491 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
492 rx_streaming_enable_work);
493 struct wl1271 *wl = wlvif->wl;
495 mutex_lock(&wl->mutex);
497 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
498 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
499 (!wl->conf.rx_streaming.always &&
500 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
503 if (!wl->conf.rx_streaming.interval)
506 ret = wl1271_ps_elp_wakeup(wl);
510 ret = wl1271_set_rx_streaming(wl, wlvif, true);
514 /* stop it after some time of inactivity */
515 mod_timer(&wlvif->rx_streaming_timer,
516 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
519 wl1271_ps_elp_sleep(wl);
521 mutex_unlock(&wl->mutex);
524 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
527 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
528 rx_streaming_disable_work);
529 struct wl1271 *wl = wlvif->wl;
531 mutex_lock(&wl->mutex);
533 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
536 ret = wl1271_ps_elp_wakeup(wl);
540 ret = wl1271_set_rx_streaming(wl, wlvif, false);
545 wl1271_ps_elp_sleep(wl);
547 mutex_unlock(&wl->mutex);
550 static void wl1271_rx_streaming_timer(unsigned long data)
552 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
553 struct wl1271 *wl = wlvif->wl;
554 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
557 /* wl->mutex must be taken */
558 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
560 /* if the watchdog is not armed, don't do anything */
561 if (wl->tx_allocated_blocks == 0)
564 cancel_delayed_work(&wl->tx_watchdog_work);
565 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
566 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
569 static void wl12xx_tx_watchdog_work(struct work_struct *work)
571 struct delayed_work *dwork;
574 dwork = container_of(work, struct delayed_work, work);
575 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
577 mutex_lock(&wl->mutex);
579 if (unlikely(wl->state == WL1271_STATE_OFF))
582 /* Tx went out in the meantime - everything is ok */
583 if (unlikely(wl->tx_allocated_blocks == 0))
587 * if a ROC is in progress, we might not have any Tx for a long
588 * time (e.g. pending Tx on the non-ROC channels)
590 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
591 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
592 wl->conf.tx.tx_watchdog_timeout);
593 wl12xx_rearm_tx_watchdog_locked(wl);
598 * if a scan is in progress, we might not have any Tx for a long
601 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
602 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
603 wl->conf.tx.tx_watchdog_timeout);
604 wl12xx_rearm_tx_watchdog_locked(wl);
609 * AP might cache a frame for a long time for a sleeping station,
610 * so rearm the timer if there's an AP interface with stations. If
611 * Tx is genuinely stuck we will most hopefully discover it when all
612 * stations are removed due to inactivity.
614 if (wl->active_sta_count) {
615 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
617 wl->conf.tx.tx_watchdog_timeout,
618 wl->active_sta_count);
619 wl12xx_rearm_tx_watchdog_locked(wl);
623 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
624 wl->conf.tx.tx_watchdog_timeout);
625 wl12xx_queue_recovery_work(wl);
628 mutex_unlock(&wl->mutex);
631 static void wl1271_conf_init(struct wl1271 *wl)
635 * This function applies the default configuration to the driver. This
636 * function is invoked upon driver load (spi probe.)
638 * The configuration is stored in a run-time structure in order to
639 * facilitate for run-time adjustment of any of the parameters. Making
640 * changes to the configuration structure will apply the new values on
641 * the next interface up (wl1271_op_start.)
644 /* apply driver default configuration */
645 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
647 /* Adjust settings according to optional module parameters */
649 if (!strcmp(fwlog_param, "continuous")) {
650 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
651 } else if (!strcmp(fwlog_param, "ondemand")) {
652 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
653 } else if (!strcmp(fwlog_param, "dbgpins")) {
654 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
655 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
656 } else if (!strcmp(fwlog_param, "disable")) {
657 wl->conf.fwlog.mem_blocks = 0;
658 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
660 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
665 static int wl1271_plt_init(struct wl1271 *wl)
669 if (wl->chip.id == CHIP_ID_1283_PG20)
670 ret = wl128x_cmd_general_parms(wl);
672 ret = wl1271_cmd_general_parms(wl);
676 if (wl->chip.id == CHIP_ID_1283_PG20)
677 ret = wl128x_cmd_radio_parms(wl);
679 ret = wl1271_cmd_radio_parms(wl);
683 if (wl->chip.id != CHIP_ID_1283_PG20) {
684 ret = wl1271_cmd_ext_radio_parms(wl);
689 /* Chip-specific initializations */
690 ret = wl1271_chip_specific_init(wl);
694 ret = wl1271_acx_init_mem_config(wl);
698 ret = wl12xx_acx_mem_cfg(wl);
700 goto out_free_memmap;
702 /* Enable data path */
703 ret = wl1271_cmd_data_path(wl, 1);
705 goto out_free_memmap;
707 /* Configure for CAM power saving (ie. always active) */
708 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
710 goto out_free_memmap;
713 ret = wl1271_acx_pm_config(wl);
715 goto out_free_memmap;
720 kfree(wl->target_mem_map);
721 wl->target_mem_map = NULL;
726 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
727 struct wl12xx_vif *wlvif,
730 bool fw_ps, single_sta;
732 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
733 single_sta = (wl->active_sta_count == 1);
736 * Wake up from high level PS if the STA is asleep with too little
737 * packets in FW or if the STA is awake.
739 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
740 wl12xx_ps_link_end(wl, wlvif, hlid);
743 * Start high-level PS if the STA is asleep with enough blocks in FW.
744 * Make an exception if this is the only connected station. In this
745 * case FW-memory congestion is not a problem.
747 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
748 wl12xx_ps_link_start(wl, wlvif, hlid, true);
751 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
752 struct wl12xx_vif *wlvif,
753 struct wl12xx_fw_status *status)
755 struct wl1271_link *lnk;
759 /* TODO: also use link_fast_bitmap here */
761 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
762 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
763 wl1271_debug(DEBUG_PSM,
764 "link ps prev 0x%x cur 0x%x changed 0x%x",
765 wl->ap_fw_ps_map, cur_fw_ps_map,
766 wl->ap_fw_ps_map ^ cur_fw_ps_map);
768 wl->ap_fw_ps_map = cur_fw_ps_map;
771 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
772 lnk = &wl->links[hlid];
773 cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
775 lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
776 lnk->allocated_pkts -= cnt;
778 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
779 lnk->allocated_pkts);
783 static void wl12xx_fw_status(struct wl1271 *wl,
784 struct wl12xx_fw_status *status)
786 struct wl12xx_vif *wlvif;
788 u32 old_tx_blk_count = wl->tx_blocks_available;
789 int avail, freed_blocks;
792 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
794 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
795 "drv_rx_counter = %d, tx_results_counter = %d)",
797 status->fw_rx_counter,
798 status->drv_rx_counter,
799 status->tx_results_counter);
801 for (i = 0; i < NUM_TX_QUEUES; i++) {
802 /* prevent wrap-around in freed-packets counter */
803 wl->tx_allocated_pkts[i] -=
804 (status->tx_released_pkts[i] -
805 wl->tx_pkts_freed[i]) & 0xff;
807 wl->tx_pkts_freed[i] = status->tx_released_pkts[i];
810 /* prevent wrap-around in total blocks counter */
811 if (likely(wl->tx_blocks_freed <=
812 le32_to_cpu(status->total_released_blks)))
813 freed_blocks = le32_to_cpu(status->total_released_blks) -
816 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
817 le32_to_cpu(status->total_released_blks);
819 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
821 wl->tx_allocated_blocks -= freed_blocks;
824 * If the FW freed some blocks:
825 * If we still have allocated blocks - re-arm the timer, Tx is
826 * not stuck. Otherwise, cancel the timer (no Tx currently).
829 if (wl->tx_allocated_blocks)
830 wl12xx_rearm_tx_watchdog_locked(wl);
832 cancel_delayed_work(&wl->tx_watchdog_work);
835 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
838 * The FW might change the total number of TX memblocks before
839 * we get a notification about blocks being released. Thus, the
840 * available blocks calculation might yield a temporary result
841 * which is lower than the actual available blocks. Keeping in
842 * mind that only blocks that were allocated can be moved from
843 * TX to RX, tx_blocks_available should never decrease here.
845 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
848 /* if more blocks are available now, tx work can be scheduled */
849 if (wl->tx_blocks_available > old_tx_blk_count)
850 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
852 /* for AP update num of allocated TX blocks per link and ps status */
853 wl12xx_for_each_wlvif_ap(wl, wlvif) {
854 wl12xx_irq_update_links_status(wl, wlvif, status);
857 /* update the host-chipset time offset */
859 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
860 (s64)le32_to_cpu(status->fw_localtime);
863 static void wl1271_flush_deferred_work(struct wl1271 *wl)
867 /* Pass all received frames to the network stack */
868 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
869 ieee80211_rx_ni(wl->hw, skb);
871 /* Return sent skbs to the network stack */
872 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
873 ieee80211_tx_status_ni(wl->hw, skb);
876 static void wl1271_netstack_work(struct work_struct *work)
879 container_of(work, struct wl1271, netstack_work);
882 wl1271_flush_deferred_work(wl);
883 } while (skb_queue_len(&wl->deferred_rx_queue));
886 #define WL1271_IRQ_MAX_LOOPS 256
888 static irqreturn_t wl1271_irq(int irq, void *cookie)
892 int loopcount = WL1271_IRQ_MAX_LOOPS;
893 struct wl1271 *wl = (struct wl1271 *)cookie;
895 unsigned int defer_count;
898 /* TX might be handled here, avoid redundant work */
899 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
900 cancel_work_sync(&wl->tx_work);
903 * In case edge triggered interrupt must be used, we cannot iterate
904 * more than once without introducing race conditions with the hardirq.
906 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
909 mutex_lock(&wl->mutex);
911 wl1271_debug(DEBUG_IRQ, "IRQ work");
913 if (unlikely(wl->state == WL1271_STATE_OFF))
916 ret = wl1271_ps_elp_wakeup(wl);
920 while (!done && loopcount--) {
922 * In order to avoid a race with the hardirq, clear the flag
923 * before acknowledging the chip. Since the mutex is held,
924 * wl1271_ps_elp_wakeup cannot be called concurrently.
926 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
927 smp_mb__after_clear_bit();
929 wl12xx_fw_status(wl, wl->fw_status);
930 intr = le32_to_cpu(wl->fw_status->intr);
931 intr &= WL1271_INTR_MASK;
937 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
938 wl1271_error("watchdog interrupt received! "
939 "starting recovery.");
940 wl12xx_queue_recovery_work(wl);
942 /* restarting the chip. ignore any other interrupt. */
946 if (likely(intr & WL1271_ACX_INTR_DATA)) {
947 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
949 wl12xx_rx(wl, wl->fw_status);
951 /* Check if any tx blocks were freed */
952 spin_lock_irqsave(&wl->wl_lock, flags);
953 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
954 wl1271_tx_total_queue_count(wl) > 0) {
955 spin_unlock_irqrestore(&wl->wl_lock, flags);
957 * In order to avoid starvation of the TX path,
958 * call the work function directly.
960 wl1271_tx_work_locked(wl);
962 spin_unlock_irqrestore(&wl->wl_lock, flags);
965 /* check for tx results */
966 if (wl->fw_status->tx_results_counter !=
967 (wl->tx_results_count & 0xff))
968 wl1271_tx_complete(wl);
970 /* Make sure the deferred queues don't get too long */
971 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
972 skb_queue_len(&wl->deferred_rx_queue);
973 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
974 wl1271_flush_deferred_work(wl);
977 if (intr & WL1271_ACX_INTR_EVENT_A) {
978 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
979 wl1271_event_handle(wl, 0);
982 if (intr & WL1271_ACX_INTR_EVENT_B) {
983 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
984 wl1271_event_handle(wl, 1);
987 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
988 wl1271_debug(DEBUG_IRQ,
989 "WL1271_ACX_INTR_INIT_COMPLETE");
991 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
992 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
995 wl1271_ps_elp_sleep(wl);
998 spin_lock_irqsave(&wl->wl_lock, flags);
999 /* In case TX was not handled here, queue TX work */
1000 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
1001 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1002 wl1271_tx_total_queue_count(wl) > 0)
1003 ieee80211_queue_work(wl->hw, &wl->tx_work);
1004 spin_unlock_irqrestore(&wl->wl_lock, flags);
1006 mutex_unlock(&wl->mutex);
1011 struct vif_counter_data {
1014 struct ieee80211_vif *cur_vif;
1015 bool cur_vif_running;
1018 static void wl12xx_vif_count_iter(void *data, u8 *mac,
1019 struct ieee80211_vif *vif)
1021 struct vif_counter_data *counter = data;
1024 if (counter->cur_vif == vif)
1025 counter->cur_vif_running = true;
1028 /* caller must not hold wl->mutex, as it might deadlock */
1029 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
1030 struct ieee80211_vif *cur_vif,
1031 struct vif_counter_data *data)
1033 memset(data, 0, sizeof(*data));
1034 data->cur_vif = cur_vif;
1036 ieee80211_iterate_active_interfaces(hw,
1037 wl12xx_vif_count_iter, data);
1040 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
1042 const struct firmware *fw;
1043 const char *fw_name;
1044 enum wl12xx_fw_type fw_type;
1048 fw_type = WL12XX_FW_TYPE_PLT;
1049 if (wl->chip.id == CHIP_ID_1283_PG20)
1050 fw_name = WL128X_PLT_FW_NAME;
1052 fw_name = WL127X_PLT_FW_NAME;
1055 * we can't call wl12xx_get_vif_count() here because
1056 * wl->mutex is taken, so use the cached last_vif_count value
1058 if (wl->last_vif_count > 1) {
1059 fw_type = WL12XX_FW_TYPE_MULTI;
1060 if (wl->chip.id == CHIP_ID_1283_PG20)
1061 fw_name = WL128X_FW_NAME_MULTI;
1063 fw_name = WL127X_FW_NAME_MULTI;
1065 fw_type = WL12XX_FW_TYPE_NORMAL;
1066 if (wl->chip.id == CHIP_ID_1283_PG20)
1067 fw_name = WL128X_FW_NAME_SINGLE;
1069 fw_name = WL127X_FW_NAME_SINGLE;
1073 if (wl->fw_type == fw_type)
1076 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
1078 ret = request_firmware(&fw, fw_name, wl->dev);
1081 wl1271_error("could not get firmware %s: %d", fw_name, ret);
1086 wl1271_error("firmware size is not multiple of 32 bits: %zu",
1093 wl->fw_type = WL12XX_FW_TYPE_NONE;
1094 wl->fw_len = fw->size;
1095 wl->fw = vmalloc(wl->fw_len);
1098 wl1271_error("could not allocate memory for the firmware");
1103 memcpy(wl->fw, fw->data, wl->fw_len);
1105 wl->fw_type = fw_type;
1107 release_firmware(fw);
1112 static int wl1271_fetch_nvs(struct wl1271 *wl)
1114 const struct firmware *fw;
1117 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
1120 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
1125 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
1128 wl1271_error("could not allocate memory for the nvs file");
1133 wl->nvs_len = fw->size;
1136 release_firmware(fw);
1141 void wl12xx_queue_recovery_work(struct wl1271 *wl)
1143 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1144 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1147 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
1151 /* The FW log is a length-value list, find where the log end */
1152 while (len < maxlen) {
1153 if (memblock[len] == 0)
1155 if (len + memblock[len] + 1 > maxlen)
1157 len += memblock[len] + 1;
1160 /* Make sure we have enough room */
1161 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
1163 /* Fill the FW log file, consumed by the sysfs fwlog entry */
1164 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
1165 wl->fwlog_size += len;
1170 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
1176 if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
1177 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
1178 (wl->conf.fwlog.mem_blocks == 0))
1181 wl1271_info("Reading FW panic log");
1183 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
1188 * Make sure the chip is awake and the logger isn't active.
1189 * This might fail if the firmware hanged.
1191 if (!wl1271_ps_elp_wakeup(wl))
1192 wl12xx_cmd_stop_fwlog(wl);
1194 /* Read the first memory block address */
1195 wl12xx_fw_status(wl, wl->fw_status);
1196 first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
1200 /* Traverse the memory blocks linked list */
1203 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
1204 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
1208 * Memory blocks are linked to one another. The first 4 bytes
1209 * of each memory block hold the hardware address of the next
1210 * one. The last memory block points to the first one.
1212 addr = le32_to_cpup((__le32 *)block);
1213 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
1214 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
1216 } while (addr && (addr != first_addr));
1218 wake_up_interruptible(&wl->fwlog_waitq);
1224 static void wl1271_recovery_work(struct work_struct *work)
1227 container_of(work, struct wl1271, recovery_work);
1228 struct wl12xx_vif *wlvif;
1229 struct ieee80211_vif *vif;
1231 mutex_lock(&wl->mutex);
1233 if (wl->state != WL1271_STATE_ON || wl->plt)
1236 /* Avoid a recursive recovery */
1237 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1239 wl12xx_read_fwlog_panic(wl);
1241 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
1242 wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
1244 BUG_ON(bug_on_recovery &&
1245 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1248 * Advance security sequence number to overcome potential progress
1249 * in the firmware during recovery. This doens't hurt if the network is
1252 wl12xx_for_each_wlvif(wl, wlvif) {
1253 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
1254 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1255 wlvif->tx_security_seq +=
1256 WL1271_TX_SQN_POST_RECOVERY_PADDING;
1259 /* Prevent spurious TX during FW restart */
1260 ieee80211_stop_queues(wl->hw);
1262 if (wl->sched_scanning) {
1263 ieee80211_sched_scan_stopped(wl->hw);
1264 wl->sched_scanning = false;
1267 /* reboot the chipset */
1268 while (!list_empty(&wl->wlvif_list)) {
1269 wlvif = list_first_entry(&wl->wlvif_list,
1270 struct wl12xx_vif, list);
1271 vif = wl12xx_wlvif_to_vif(wlvif);
1272 __wl1271_op_remove_interface(wl, vif, false);
1274 mutex_unlock(&wl->mutex);
1275 wl1271_op_stop(wl->hw);
1277 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1279 ieee80211_restart_hw(wl->hw);
1282 * Its safe to enable TX now - the queues are stopped after a request
1283 * to restart the HW.
1285 ieee80211_wake_queues(wl->hw);
1288 mutex_unlock(&wl->mutex);
1291 static void wl1271_fw_wakeup(struct wl1271 *wl)
1295 elp_reg = ELPCTRL_WAKE_UP;
1296 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
1299 static int wl1271_setup(struct wl1271 *wl)
1301 wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1305 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1306 if (!wl->tx_res_if) {
1307 kfree(wl->fw_status);
1314 static int wl12xx_set_power_on(struct wl1271 *wl)
1318 msleep(WL1271_PRE_POWER_ON_SLEEP);
1319 ret = wl1271_power_on(wl);
1322 msleep(WL1271_POWER_ON_SLEEP);
1323 wl1271_io_reset(wl);
1326 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
1328 /* ELP module wake up */
1329 wl1271_fw_wakeup(wl);
1335 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1339 ret = wl12xx_set_power_on(wl);
1344 * For wl127x based devices we could use the default block
1345 * size (512 bytes), but due to a bug in the sdio driver, we
1346 * need to set it explicitly after the chip is powered on. To
1347 * simplify the code and since the performance impact is
1348 * negligible, we use the same block size for all different
1351 if (!wl1271_set_block_size(wl))
1352 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1354 switch (wl->chip.id) {
1355 case CHIP_ID_1271_PG10:
1356 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
1359 ret = wl1271_setup(wl);
1362 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1365 case CHIP_ID_1271_PG20:
1366 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
1369 ret = wl1271_setup(wl);
1372 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1375 case CHIP_ID_1283_PG20:
1376 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
1379 ret = wl1271_setup(wl);
1383 case CHIP_ID_1283_PG10:
1385 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
1390 ret = wl12xx_fetch_firmware(wl, plt);
1394 /* No NVS from netlink, try to get it from the filesystem */
1395 if (wl->nvs == NULL) {
1396 ret = wl1271_fetch_nvs(wl);
1405 int wl1271_plt_start(struct wl1271 *wl)
1407 int retries = WL1271_BOOT_RETRIES;
1408 struct wiphy *wiphy = wl->hw->wiphy;
1411 mutex_lock(&wl->mutex);
1413 wl1271_notice("power up");
1415 if (wl->state != WL1271_STATE_OFF) {
1416 wl1271_error("cannot go into PLT state because not "
1417 "in off state: %d", wl->state);
1424 ret = wl12xx_chip_wakeup(wl, true);
1428 ret = wl1271_boot(wl);
1432 ret = wl1271_plt_init(wl);
1437 wl->state = WL1271_STATE_ON;
1438 wl1271_notice("firmware booted in PLT mode (%s)",
1439 wl->chip.fw_ver_str);
1441 /* update hw/fw version info in wiphy struct */
1442 wiphy->hw_version = wl->chip.id;
1443 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1444 sizeof(wiphy->fw_version));
1449 mutex_unlock(&wl->mutex);
1450 /* Unlocking the mutex in the middle of handling is
1451 inherently unsafe. In this case we deem it safe to do,
1452 because we need to let any possibly pending IRQ out of
1453 the system (and while we are WL1271_STATE_OFF the IRQ
1454 work function will not do anything.) Also, any other
1455 possible concurrent operations will fail due to the
1456 current state, hence the wl1271 struct should be safe. */
1457 wl1271_disable_interrupts(wl);
1458 wl1271_flush_deferred_work(wl);
1459 cancel_work_sync(&wl->netstack_work);
1460 mutex_lock(&wl->mutex);
1462 wl1271_power_off(wl);
1465 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1466 WL1271_BOOT_RETRIES);
1468 mutex_unlock(&wl->mutex);
1473 int wl1271_plt_stop(struct wl1271 *wl)
1477 wl1271_notice("power down");
1480 * Interrupts must be disabled before setting the state to OFF.
1481 * Otherwise, the interrupt handler might be called and exit without
1482 * reading the interrupt status.
1484 wl1271_disable_interrupts(wl);
1485 mutex_lock(&wl->mutex);
1487 mutex_unlock(&wl->mutex);
1490 * This will not necessarily enable interrupts as interrupts
1491 * may have been disabled when op_stop was called. It will,
1492 * however, balance the above call to disable_interrupts().
1494 wl1271_enable_interrupts(wl);
1496 wl1271_error("cannot power down because not in PLT "
1497 "state: %d", wl->state);
1502 mutex_unlock(&wl->mutex);
1504 wl1271_flush_deferred_work(wl);
1505 cancel_work_sync(&wl->netstack_work);
1506 cancel_work_sync(&wl->recovery_work);
1507 cancel_delayed_work_sync(&wl->elp_work);
1508 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1510 mutex_lock(&wl->mutex);
1511 wl1271_power_off(wl);
1513 wl->state = WL1271_STATE_OFF;
1516 mutex_unlock(&wl->mutex);
1522 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1524 struct wl1271 *wl = hw->priv;
1525 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1526 struct ieee80211_vif *vif = info->control.vif;
1527 struct wl12xx_vif *wlvif = NULL;
1528 unsigned long flags;
1533 wlvif = wl12xx_vif_to_data(vif);
1535 mapping = skb_get_queue_mapping(skb);
1536 q = wl1271_tx_get_queue(mapping);
1538 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1540 spin_lock_irqsave(&wl->wl_lock, flags);
1542 /* queue the packet */
1543 if (hlid == WL12XX_INVALID_LINK_ID ||
1544 (wlvif && !test_bit(hlid, wlvif->links_map))) {
1545 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1546 ieee80211_free_txskb(hw, skb);
1550 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1552 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1554 wl->tx_queue_count[q]++;
1557 * The workqueue is slow to process the tx_queue and we need stop
1558 * the queue here, otherwise the queue will get too long.
1560 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1561 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1562 ieee80211_stop_queue(wl->hw, mapping);
1563 set_bit(q, &wl->stopped_queues_map);
1567 * The chip specific setup must run before the first TX packet -
1568 * before that, the tx_work will not be initialized!
1571 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1572 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1573 ieee80211_queue_work(wl->hw, &wl->tx_work);
1576 spin_unlock_irqrestore(&wl->wl_lock, flags);
1579 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1581 unsigned long flags;
1584 /* no need to queue a new dummy packet if one is already pending */
1585 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1588 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1590 spin_lock_irqsave(&wl->wl_lock, flags);
1591 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1592 wl->tx_queue_count[q]++;
1593 spin_unlock_irqrestore(&wl->wl_lock, flags);
1595 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1596 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1597 wl1271_tx_work_locked(wl);
1600 * If the FW TX is busy, TX work will be scheduled by the threaded
1601 * interrupt handler function
1607 * The size of the dummy packet should be at least 1400 bytes. However, in
1608 * order to minimize the number of bus transactions, aligning it to 512 bytes
1609 * boundaries could be beneficial, performance wise
1611 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1613 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1615 struct sk_buff *skb;
1616 struct ieee80211_hdr_3addr *hdr;
1617 unsigned int dummy_packet_size;
1619 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1620 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1622 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1624 wl1271_warning("Failed to allocate a dummy packet skb");
1628 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1630 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1631 memset(hdr, 0, sizeof(*hdr));
1632 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1633 IEEE80211_STYPE_NULLFUNC |
1634 IEEE80211_FCTL_TODS);
1636 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1638 /* Dummy packets require the TID to be management */
1639 skb->priority = WL1271_TID_MGMT;
1641 /* Initialize all fields that might be used */
1642 skb_set_queue_mapping(skb, 0);
1643 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1650 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1651 struct wl12xx_vif *wlvif)
1655 mutex_lock(&wl->mutex);
1657 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1660 ret = wl1271_ps_elp_wakeup(wl);
1664 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1665 wl->conf.conn.suspend_wake_up_event,
1666 wl->conf.conn.suspend_listen_interval);
1669 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1672 wl1271_ps_elp_sleep(wl);
1675 mutex_unlock(&wl->mutex);
1680 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1681 struct wl12xx_vif *wlvif)
1685 mutex_lock(&wl->mutex);
1687 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1690 ret = wl1271_ps_elp_wakeup(wl);
1694 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1696 wl1271_ps_elp_sleep(wl);
1698 mutex_unlock(&wl->mutex);
1703 static int wl1271_configure_suspend(struct wl1271 *wl,
1704 struct wl12xx_vif *wlvif)
1706 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1707 return wl1271_configure_suspend_sta(wl, wlvif);
1708 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1709 return wl1271_configure_suspend_ap(wl, wlvif);
1713 static void wl1271_configure_resume(struct wl1271 *wl,
1714 struct wl12xx_vif *wlvif)
1717 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1718 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1720 if ((!is_ap) && (!is_sta))
1723 mutex_lock(&wl->mutex);
1724 ret = wl1271_ps_elp_wakeup(wl);
1729 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1730 wl->conf.conn.wake_up_event,
1731 wl->conf.conn.listen_interval);
1734 wl1271_error("resume: wake up conditions failed: %d",
1738 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1741 wl1271_ps_elp_sleep(wl);
1743 mutex_unlock(&wl->mutex);
1746 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1747 struct cfg80211_wowlan *wow)
1749 struct wl1271 *wl = hw->priv;
1750 struct wl12xx_vif *wlvif;
1753 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1754 WARN_ON(!wow || !wow->any);
1756 wl1271_tx_flush(wl);
1758 wl->wow_enabled = true;
1759 wl12xx_for_each_wlvif(wl, wlvif) {
1760 ret = wl1271_configure_suspend(wl, wlvif);
1762 wl1271_warning("couldn't prepare device to suspend");
1766 /* flush any remaining work */
1767 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1770 * disable and re-enable interrupts in order to flush
1773 wl1271_disable_interrupts(wl);
1776 * set suspended flag to avoid triggering a new threaded_irq
1777 * work. no need for spinlock as interrupts are disabled.
1779 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1781 wl1271_enable_interrupts(wl);
1782 flush_work(&wl->tx_work);
1783 flush_delayed_work(&wl->elp_work);
1788 static int wl1271_op_resume(struct ieee80211_hw *hw)
1790 struct wl1271 *wl = hw->priv;
1791 struct wl12xx_vif *wlvif;
1792 unsigned long flags;
1793 bool run_irq_work = false;
1795 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1797 WARN_ON(!wl->wow_enabled);
1800 * re-enable irq_work enqueuing, and call irq_work directly if
1801 * there is a pending work.
1803 spin_lock_irqsave(&wl->wl_lock, flags);
1804 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1805 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1806 run_irq_work = true;
1807 spin_unlock_irqrestore(&wl->wl_lock, flags);
1810 wl1271_debug(DEBUG_MAC80211,
1811 "run postponed irq_work directly");
1813 wl1271_enable_interrupts(wl);
1815 wl12xx_for_each_wlvif(wl, wlvif) {
1816 wl1271_configure_resume(wl, wlvif);
1818 wl->wow_enabled = false;
1824 static int wl1271_op_start(struct ieee80211_hw *hw)
1826 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1829 * We have to delay the booting of the hardware because
1830 * we need to know the local MAC address before downloading and
1831 * initializing the firmware. The MAC address cannot be changed
1832 * after boot, and without the proper MAC address, the firmware
1833 * will not function properly.
1835 * The MAC address is first known when the corresponding interface
1836 * is added. That is where we will initialize the hardware.
1842 static void wl1271_op_stop(struct ieee80211_hw *hw)
1844 struct wl1271 *wl = hw->priv;
1847 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1850 * Interrupts must be disabled before setting the state to OFF.
1851 * Otherwise, the interrupt handler might be called and exit without
1852 * reading the interrupt status.
1854 wl1271_disable_interrupts(wl);
1855 mutex_lock(&wl->mutex);
1856 if (wl->state == WL1271_STATE_OFF) {
1857 mutex_unlock(&wl->mutex);
1860 * This will not necessarily enable interrupts as interrupts
1861 * may have been disabled when op_stop was called. It will,
1862 * however, balance the above call to disable_interrupts().
1864 wl1271_enable_interrupts(wl);
1869 * this must be before the cancel_work calls below, so that the work
1870 * functions don't perform further work.
1872 wl->state = WL1271_STATE_OFF;
1873 mutex_unlock(&wl->mutex);
1875 wl1271_flush_deferred_work(wl);
1876 cancel_delayed_work_sync(&wl->scan_complete_work);
1877 cancel_work_sync(&wl->netstack_work);
1878 cancel_work_sync(&wl->tx_work);
1879 cancel_delayed_work_sync(&wl->elp_work);
1880 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1882 /* let's notify MAC80211 about the remaining pending TX frames */
1883 wl12xx_tx_reset(wl, true);
1884 mutex_lock(&wl->mutex);
1886 wl1271_power_off(wl);
1888 wl->band = IEEE80211_BAND_2GHZ;
1891 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1892 wl->tx_blocks_available = 0;
1893 wl->tx_allocated_blocks = 0;
1894 wl->tx_results_count = 0;
1895 wl->tx_packets_count = 0;
1896 wl->time_offset = 0;
1897 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
1898 wl->ap_fw_ps_map = 0;
1900 wl->sched_scanning = false;
1901 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1902 memset(wl->links_map, 0, sizeof(wl->links_map));
1903 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1904 wl->active_sta_count = 0;
1906 /* The system link is always allocated */
1907 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1910 * this is performed after the cancel_work calls and the associated
1911 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1912 * get executed before all these vars have been reset.
1916 wl->tx_blocks_freed = 0;
1918 for (i = 0; i < NUM_TX_QUEUES; i++) {
1919 wl->tx_pkts_freed[i] = 0;
1920 wl->tx_allocated_pkts[i] = 0;
1923 wl1271_debugfs_reset(wl);
1925 kfree(wl->fw_status);
1926 wl->fw_status = NULL;
1927 kfree(wl->tx_res_if);
1928 wl->tx_res_if = NULL;
1929 kfree(wl->target_mem_map);
1930 wl->target_mem_map = NULL;
1932 mutex_unlock(&wl->mutex);
1935 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1937 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1938 WL12XX_MAX_RATE_POLICIES);
1939 if (policy >= WL12XX_MAX_RATE_POLICIES)
1942 __set_bit(policy, wl->rate_policies_map);
1947 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1949 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1952 __clear_bit(*idx, wl->rate_policies_map);
1953 *idx = WL12XX_MAX_RATE_POLICIES;
1956 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1958 switch (wlvif->bss_type) {
1959 case BSS_TYPE_AP_BSS:
1961 return WL1271_ROLE_P2P_GO;
1963 return WL1271_ROLE_AP;
1965 case BSS_TYPE_STA_BSS:
1967 return WL1271_ROLE_P2P_CL;
1969 return WL1271_ROLE_STA;
1972 return WL1271_ROLE_IBSS;
1975 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1977 return WL12XX_INVALID_ROLE_TYPE;
1980 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1982 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1985 /* clear everything but the persistent data */
1986 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1988 switch (ieee80211_vif_type_p2p(vif)) {
1989 case NL80211_IFTYPE_P2P_CLIENT:
1992 case NL80211_IFTYPE_STATION:
1993 wlvif->bss_type = BSS_TYPE_STA_BSS;
1995 case NL80211_IFTYPE_ADHOC:
1996 wlvif->bss_type = BSS_TYPE_IBSS;
1998 case NL80211_IFTYPE_P2P_GO:
2001 case NL80211_IFTYPE_AP:
2002 wlvif->bss_type = BSS_TYPE_AP_BSS;
2005 wlvif->bss_type = MAX_BSS_TYPE;
2009 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2010 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2011 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2013 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2014 wlvif->bss_type == BSS_TYPE_IBSS) {
2015 /* init sta/ibss data */
2016 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2017 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2018 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2019 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2022 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2023 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2024 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2025 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2026 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2027 wl12xx_allocate_rate_policy(wl,
2028 &wlvif->ap.ucast_rate_idx[i]);
2031 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2032 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2033 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2034 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2035 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2036 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2039 * mac80211 configures some values globally, while we treat them
2040 * per-interface. thus, on init, we have to copy them from wl
2042 wlvif->band = wl->band;
2043 wlvif->channel = wl->channel;
2044 wlvif->power_level = wl->power_level;
2046 INIT_WORK(&wlvif->rx_streaming_enable_work,
2047 wl1271_rx_streaming_enable_work);
2048 INIT_WORK(&wlvif->rx_streaming_disable_work,
2049 wl1271_rx_streaming_disable_work);
2050 INIT_LIST_HEAD(&wlvif->list);
2052 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2053 (unsigned long) wlvif);
2057 static bool wl12xx_init_fw(struct wl1271 *wl)
2059 int retries = WL1271_BOOT_RETRIES;
2060 bool booted = false;
2061 struct wiphy *wiphy = wl->hw->wiphy;
2066 ret = wl12xx_chip_wakeup(wl, false);
2070 ret = wl1271_boot(wl);
2074 ret = wl1271_hw_init(wl);
2082 mutex_unlock(&wl->mutex);
2083 /* Unlocking the mutex in the middle of handling is
2084 inherently unsafe. In this case we deem it safe to do,
2085 because we need to let any possibly pending IRQ out of
2086 the system (and while we are WL1271_STATE_OFF the IRQ
2087 work function will not do anything.) Also, any other
2088 possible concurrent operations will fail due to the
2089 current state, hence the wl1271 struct should be safe. */
2090 wl1271_disable_interrupts(wl);
2091 wl1271_flush_deferred_work(wl);
2092 cancel_work_sync(&wl->netstack_work);
2093 mutex_lock(&wl->mutex);
2095 wl1271_power_off(wl);
2099 wl1271_error("firmware boot failed despite %d retries",
2100 WL1271_BOOT_RETRIES);
2104 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2106 /* update hw/fw version info in wiphy struct */
2107 wiphy->hw_version = wl->chip.id;
2108 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2109 sizeof(wiphy->fw_version));
2112 * Now we know if 11a is supported (info from the NVS), so disable
2113 * 11a channels if not supported
2115 if (!wl->enable_11a)
2116 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2118 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2119 wl->enable_11a ? "" : "not ");
2121 wl->state = WL1271_STATE_ON;
2126 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2128 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2132 * Check whether a fw switch (i.e. moving from one loaded
2133 * fw to another) is needed. This function is also responsible
2134 * for updating wl->last_vif_count, so it must be called before
2135 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2138 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2139 struct vif_counter_data vif_counter_data,
2142 enum wl12xx_fw_type current_fw = wl->fw_type;
2143 u8 vif_count = vif_counter_data.counter;
2145 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2148 /* increase the vif count if this is a new vif */
2149 if (add && !vif_counter_data.cur_vif_running)
2152 wl->last_vif_count = vif_count;
2154 /* no need for fw change if the device is OFF */
2155 if (wl->state == WL1271_STATE_OFF)
2158 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2160 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2167 * Enter "forced psm". Make sure the sta is in psm against the ap,
2168 * to make the fw switch a bit more disconnection-persistent.
2170 static void wl12xx_force_active_psm(struct wl1271 *wl)
2172 struct wl12xx_vif *wlvif;
2174 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2175 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2179 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2180 struct ieee80211_vif *vif)
2182 struct wl1271 *wl = hw->priv;
2183 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2184 struct vif_counter_data vif_count;
2187 bool booted = false;
2189 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2190 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2192 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2193 ieee80211_vif_type_p2p(vif), vif->addr);
2195 wl12xx_get_vif_count(hw, vif, &vif_count);
2197 mutex_lock(&wl->mutex);
2198 ret = wl1271_ps_elp_wakeup(wl);
2203 * in some very corner case HW recovery scenarios its possible to
2204 * get here before __wl1271_op_remove_interface is complete, so
2205 * opt out if that is the case.
2207 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2208 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2214 ret = wl12xx_init_vif_data(wl, vif);
2219 role_type = wl12xx_get_role_type(wl, wlvif);
2220 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2225 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2226 wl12xx_force_active_psm(wl);
2227 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2228 mutex_unlock(&wl->mutex);
2229 wl1271_recovery_work(&wl->recovery_work);
2234 * TODO: after the nvs issue will be solved, move this block
2235 * to start(), and make sure here the driver is ON.
2237 if (wl->state == WL1271_STATE_OFF) {
2239 * we still need this in order to configure the fw
2240 * while uploading the nvs
2242 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2244 booted = wl12xx_init_fw(wl);
2251 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2252 wlvif->bss_type == BSS_TYPE_IBSS) {
2254 * The device role is a special role used for
2255 * rx and tx frames prior to association (as
2256 * the STA role can get packets only from
2257 * its associated bssid)
2259 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2261 &wlvif->dev_role_id);
2266 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2267 role_type, &wlvif->role_id);
2271 ret = wl1271_init_vif_specific(wl, vif);
2275 list_add(&wlvif->list, &wl->wlvif_list);
2276 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2278 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2283 wl1271_ps_elp_sleep(wl);
2285 mutex_unlock(&wl->mutex);
2290 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2291 struct ieee80211_vif *vif,
2292 bool reset_tx_queues)
2294 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2297 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2299 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2302 /* because of hardware recovery, we may get here twice */
2303 if (wl->state != WL1271_STATE_ON)
2306 wl1271_info("down");
2308 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2309 wl->scan_vif == vif) {
2311 * Rearm the tx watchdog just before idling scan. This
2312 * prevents just-finished scans from triggering the watchdog
2314 wl12xx_rearm_tx_watchdog_locked(wl);
2316 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2317 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2318 wl->scan_vif = NULL;
2319 wl->scan.req = NULL;
2320 ieee80211_scan_completed(wl->hw, true);
2323 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2324 /* disable active roles */
2325 ret = wl1271_ps_elp_wakeup(wl);
2329 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2330 wlvif->bss_type == BSS_TYPE_IBSS) {
2331 if (wl12xx_dev_role_started(wlvif))
2332 wl12xx_stop_dev(wl, wlvif);
2334 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2339 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2343 wl1271_ps_elp_sleep(wl);
2346 /* clear all hlids (except system_hlid) */
2347 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2349 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2350 wlvif->bss_type == BSS_TYPE_IBSS) {
2351 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2352 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2353 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2354 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2356 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2357 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2358 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2359 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2360 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2361 wl12xx_free_rate_policy(wl,
2362 &wlvif->ap.ucast_rate_idx[i]);
2365 wl12xx_tx_reset_wlvif(wl, wlvif);
2366 wl1271_free_ap_keys(wl, wlvif);
2367 if (wl->last_wlvif == wlvif)
2368 wl->last_wlvif = NULL;
2369 list_del(&wlvif->list);
2370 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2371 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2372 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2374 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2379 mutex_unlock(&wl->mutex);
2381 del_timer_sync(&wlvif->rx_streaming_timer);
2382 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2383 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2385 mutex_lock(&wl->mutex);
2388 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2389 struct ieee80211_vif *vif)
2391 struct wl1271 *wl = hw->priv;
2392 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2393 struct wl12xx_vif *iter;
2394 struct vif_counter_data vif_count;
2395 bool cancel_recovery = true;
2397 wl12xx_get_vif_count(hw, vif, &vif_count);
2398 mutex_lock(&wl->mutex);
2400 if (wl->state == WL1271_STATE_OFF ||
2401 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2405 * wl->vif can be null here if someone shuts down the interface
2406 * just when hardware recovery has been started.
2408 wl12xx_for_each_wlvif(wl, iter) {
2412 __wl1271_op_remove_interface(wl, vif, true);
2415 WARN_ON(iter != wlvif);
2416 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2417 wl12xx_force_active_psm(wl);
2418 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2419 wl12xx_queue_recovery_work(wl);
2420 cancel_recovery = false;
2423 mutex_unlock(&wl->mutex);
2424 if (cancel_recovery)
2425 cancel_work_sync(&wl->recovery_work);
2428 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2429 struct ieee80211_vif *vif,
2430 enum nl80211_iftype new_type, bool p2p)
2432 struct wl1271 *wl = hw->priv;
2435 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2436 wl1271_op_remove_interface(hw, vif);
2438 vif->type = new_type;
2440 ret = wl1271_op_add_interface(hw, vif);
2442 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2446 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2450 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2453 * One of the side effects of the JOIN command is that is clears
2454 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2455 * to a WPA/WPA2 access point will therefore kill the data-path.
2456 * Currently the only valid scenario for JOIN during association
2457 * is on roaming, in which case we will also be given new keys.
2458 * Keep the below message for now, unless it starts bothering
2459 * users who really like to roam a lot :)
2461 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2462 wl1271_info("JOIN while associated.");
2464 /* clear encryption type */
2465 wlvif->encryption_type = KEY_NONE;
2468 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2471 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2473 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2477 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2481 * The join command disable the keep-alive mode, shut down its process,
2482 * and also clear the template config, so we need to reset it all after
2483 * the join. The acx_aid starts the keep-alive process, and the order
2484 * of the commands below is relevant.
2486 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2490 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2494 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2498 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2499 CMD_TEMPL_KLV_IDX_NULL_DATA,
2500 ACX_KEEP_ALIVE_TPL_VALID);
2508 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2512 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2513 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2515 wl12xx_cmd_stop_channel_switch(wl);
2516 ieee80211_chswitch_done(vif, false);
2519 /* to stop listening to a channel, we disconnect */
2520 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2524 /* reset TX security counters on a clean disconnect */
2525 wlvif->tx_security_last_seq_lsb = 0;
2526 wlvif->tx_security_seq = 0;
2532 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2534 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2535 wlvif->rate_set = wlvif->basic_rate_set;
2538 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2542 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2544 if (idle == cur_idle)
2548 /* no need to croc if we weren't busy (e.g. during boot) */
2549 if (wl12xx_dev_role_started(wlvif)) {
2550 ret = wl12xx_stop_dev(wl, wlvif);
2555 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2556 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2559 ret = wl1271_acx_keep_alive_config(
2560 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2561 ACX_KEEP_ALIVE_TPL_INVALID);
2564 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2566 /* The current firmware only supports sched_scan in idle */
2567 if (wl->sched_scanning) {
2568 wl1271_scan_sched_scan_stop(wl);
2569 ieee80211_sched_scan_stopped(wl->hw);
2572 ret = wl12xx_start_dev(wl, wlvif);
2575 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2582 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2583 struct ieee80211_conf *conf, u32 changed)
2585 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2588 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2590 /* if the channel changes while joined, join again */
2591 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2592 ((wlvif->band != conf->channel->band) ||
2593 (wlvif->channel != channel))) {
2594 /* send all pending packets */
2595 wl1271_tx_work_locked(wl);
2596 wlvif->band = conf->channel->band;
2597 wlvif->channel = channel;
2601 * FIXME: the mac80211 should really provide a fixed
2602 * rate to use here. for now, just use the smallest
2603 * possible rate for the band as a fixed rate for
2604 * association frames and other control messages.
2606 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2607 wl1271_set_band_rate(wl, wlvif);
2610 wl1271_tx_min_rate_get(wl,
2611 wlvif->basic_rate_set);
2612 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2614 wl1271_warning("rate policy for channel "
2618 * change the ROC channel. do it only if we are
2619 * not idle. otherwise, CROC will be called
2622 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2624 wl12xx_dev_role_started(wlvif) &&
2625 !(conf->flags & IEEE80211_CONF_IDLE)) {
2626 ret = wl12xx_stop_dev(wl, wlvif);
2630 ret = wl12xx_start_dev(wl, wlvif);
2637 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2639 if ((conf->flags & IEEE80211_CONF_PS) &&
2640 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2641 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2646 if (wl->conf.conn.forced_ps) {
2647 ps_mode = STATION_POWER_SAVE_MODE;
2648 ps_mode_str = "forced";
2650 ps_mode = STATION_AUTO_PS_MODE;
2651 ps_mode_str = "auto";
2654 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2656 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2659 wl1271_warning("enter %s ps failed %d",
2662 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2663 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2665 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2667 ret = wl1271_ps_set_mode(wl, wlvif,
2668 STATION_ACTIVE_MODE);
2670 wl1271_warning("exit auto ps failed %d", ret);
2674 if (conf->power_level != wlvif->power_level) {
2675 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2679 wlvif->power_level = conf->power_level;
2685 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2687 struct wl1271 *wl = hw->priv;
2688 struct wl12xx_vif *wlvif;
2689 struct ieee80211_conf *conf = &hw->conf;
2690 int channel, ret = 0;
2692 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2694 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2697 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2699 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2703 * mac80211 will go to idle nearly immediately after transmitting some
2704 * frames, such as the deauth. To make sure those frames reach the air,
2705 * wait here until the TX queue is fully flushed.
2707 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2708 (conf->flags & IEEE80211_CONF_IDLE))
2709 wl1271_tx_flush(wl);
2711 mutex_lock(&wl->mutex);
2713 /* we support configuring the channel and band even while off */
2714 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2715 wl->band = conf->channel->band;
2716 wl->channel = channel;
2719 if (changed & IEEE80211_CONF_CHANGE_POWER)
2720 wl->power_level = conf->power_level;
2722 if (unlikely(wl->state == WL1271_STATE_OFF))
2725 ret = wl1271_ps_elp_wakeup(wl);
2729 /* configure each interface */
2730 wl12xx_for_each_wlvif(wl, wlvif) {
2731 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2737 wl1271_ps_elp_sleep(wl);
2740 mutex_unlock(&wl->mutex);
2745 struct wl1271_filter_params {
2748 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2751 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2752 struct netdev_hw_addr_list *mc_list)
2754 struct wl1271_filter_params *fp;
2755 struct netdev_hw_addr *ha;
2756 struct wl1271 *wl = hw->priv;
2758 if (unlikely(wl->state == WL1271_STATE_OFF))
2761 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2763 wl1271_error("Out of memory setting filters.");
2767 /* update multicast filtering parameters */
2768 fp->mc_list_length = 0;
2769 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2770 fp->enabled = false;
2773 netdev_hw_addr_list_for_each(ha, mc_list) {
2774 memcpy(fp->mc_list[fp->mc_list_length],
2775 ha->addr, ETH_ALEN);
2776 fp->mc_list_length++;
2780 return (u64)(unsigned long)fp;
2783 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2786 FIF_BCN_PRBRESP_PROMISC | \
2790 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2791 unsigned int changed,
2792 unsigned int *total, u64 multicast)
2794 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2795 struct wl1271 *wl = hw->priv;
2796 struct wl12xx_vif *wlvif;
2800 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2801 " total %x", changed, *total);
2803 mutex_lock(&wl->mutex);
2805 *total &= WL1271_SUPPORTED_FILTERS;
2806 changed &= WL1271_SUPPORTED_FILTERS;
2808 if (unlikely(wl->state == WL1271_STATE_OFF))
2811 ret = wl1271_ps_elp_wakeup(wl);
2815 wl12xx_for_each_wlvif(wl, wlvif) {
2816 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2817 if (*total & FIF_ALLMULTI)
2818 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2822 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2825 fp->mc_list_length);
2832 * the fw doesn't provide an api to configure the filters. instead,
2833 * the filters configuration is based on the active roles / ROC
2838 wl1271_ps_elp_sleep(wl);
2841 mutex_unlock(&wl->mutex);
2845 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2846 u8 id, u8 key_type, u8 key_size,
2847 const u8 *key, u8 hlid, u32 tx_seq_32,
2850 struct wl1271_ap_key *ap_key;
2853 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2855 if (key_size > MAX_KEY_SIZE)
2859 * Find next free entry in ap_keys. Also check we are not replacing
2862 for (i = 0; i < MAX_NUM_KEYS; i++) {
2863 if (wlvif->ap.recorded_keys[i] == NULL)
2866 if (wlvif->ap.recorded_keys[i]->id == id) {
2867 wl1271_warning("trying to record key replacement");
2872 if (i == MAX_NUM_KEYS)
2875 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2880 ap_key->key_type = key_type;
2881 ap_key->key_size = key_size;
2882 memcpy(ap_key->key, key, key_size);
2883 ap_key->hlid = hlid;
2884 ap_key->tx_seq_32 = tx_seq_32;
2885 ap_key->tx_seq_16 = tx_seq_16;
2887 wlvif->ap.recorded_keys[i] = ap_key;
2891 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2895 for (i = 0; i < MAX_NUM_KEYS; i++) {
2896 kfree(wlvif->ap.recorded_keys[i]);
2897 wlvif->ap.recorded_keys[i] = NULL;
2901 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2904 struct wl1271_ap_key *key;
2905 bool wep_key_added = false;
2907 for (i = 0; i < MAX_NUM_KEYS; i++) {
2909 if (wlvif->ap.recorded_keys[i] == NULL)
2912 key = wlvif->ap.recorded_keys[i];
2914 if (hlid == WL12XX_INVALID_LINK_ID)
2915 hlid = wlvif->ap.bcast_hlid;
2917 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2918 key->id, key->key_type,
2919 key->key_size, key->key,
2920 hlid, key->tx_seq_32,
2925 if (key->key_type == KEY_WEP)
2926 wep_key_added = true;
2929 if (wep_key_added) {
2930 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2931 wlvif->ap.bcast_hlid);
2937 wl1271_free_ap_keys(wl, wlvif);
2941 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2942 u16 action, u8 id, u8 key_type,
2943 u8 key_size, const u8 *key, u32 tx_seq_32,
2944 u16 tx_seq_16, struct ieee80211_sta *sta)
2947 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2950 struct wl1271_station *wl_sta;
2954 wl_sta = (struct wl1271_station *)sta->drv_priv;
2955 hlid = wl_sta->hlid;
2957 hlid = wlvif->ap.bcast_hlid;
2960 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2962 * We do not support removing keys after AP shutdown.
2963 * Pretend we do to make mac80211 happy.
2965 if (action != KEY_ADD_OR_REPLACE)
2968 ret = wl1271_record_ap_key(wl, wlvif, id,
2970 key, hlid, tx_seq_32,
2973 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2974 id, key_type, key_size,
2975 key, hlid, tx_seq_32,
2983 static const u8 bcast_addr[ETH_ALEN] = {
2984 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2988 * A STA set to GEM cipher requires 2 tx spare blocks.
2989 * Return to default value when GEM cipher key is removed
2991 if (key_type == KEY_GEM) {
2992 if (action == KEY_ADD_OR_REPLACE)
2993 wl->tx_spare_blocks = 2;
2994 else if (action == KEY_REMOVE)
2995 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
2998 addr = sta ? sta->addr : bcast_addr;
3000 if (is_zero_ether_addr(addr)) {
3001 /* We dont support TX only encryption */
3005 /* The wl1271 does not allow to remove unicast keys - they
3006 will be cleared automatically on next CMD_JOIN. Ignore the
3007 request silently, as we dont want the mac80211 to emit
3008 an error message. */
3009 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3012 /* don't remove key if hlid was already deleted */
3013 if (action == KEY_REMOVE &&
3014 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3017 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3018 id, key_type, key_size,
3019 key, addr, tx_seq_32,
3024 /* the default WEP key needs to be configured at least once */
3025 if (key_type == KEY_WEP) {
3026 ret = wl12xx_cmd_set_default_wep_key(wl,
3037 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3038 struct ieee80211_vif *vif,
3039 struct ieee80211_sta *sta,
3040 struct ieee80211_key_conf *key_conf)
3042 struct wl1271 *wl = hw->priv;
3043 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3049 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3051 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3052 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3053 key_conf->cipher, key_conf->keyidx,
3054 key_conf->keylen, key_conf->flags);
3055 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3057 mutex_lock(&wl->mutex);
3059 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3064 ret = wl1271_ps_elp_wakeup(wl);
3068 switch (key_conf->cipher) {
3069 case WLAN_CIPHER_SUITE_WEP40:
3070 case WLAN_CIPHER_SUITE_WEP104:
3073 key_conf->hw_key_idx = key_conf->keyidx;
3075 case WLAN_CIPHER_SUITE_TKIP:
3076 key_type = KEY_TKIP;
3078 key_conf->hw_key_idx = key_conf->keyidx;
3079 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3080 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3082 case WLAN_CIPHER_SUITE_CCMP:
3085 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3086 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3087 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3089 case WL1271_CIPHER_SUITE_GEM:
3091 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3092 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3095 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3103 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3104 key_conf->keyidx, key_type,
3105 key_conf->keylen, key_conf->key,
3106 tx_seq_32, tx_seq_16, sta);
3108 wl1271_error("Could not add or replace key");
3113 * reconfiguring arp response if the unicast (or common)
3114 * encryption key type was changed
3116 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3117 (sta || key_type == KEY_WEP) &&
3118 wlvif->encryption_type != key_type) {
3119 wlvif->encryption_type = key_type;
3120 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3122 wl1271_warning("build arp rsp failed: %d", ret);
3129 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3130 key_conf->keyidx, key_type,
3131 key_conf->keylen, key_conf->key,
3134 wl1271_error("Could not remove key");
3140 wl1271_error("Unsupported key cmd 0x%x", cmd);
3146 wl1271_ps_elp_sleep(wl);
3149 mutex_unlock(&wl->mutex);
3154 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3155 struct ieee80211_vif *vif,
3156 struct cfg80211_scan_request *req)
3158 struct wl1271 *wl = hw->priv;
3163 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3166 ssid = req->ssids[0].ssid;
3167 len = req->ssids[0].ssid_len;
3170 mutex_lock(&wl->mutex);
3172 if (wl->state == WL1271_STATE_OFF) {
3174 * We cannot return -EBUSY here because cfg80211 will expect
3175 * a call to ieee80211_scan_completed if we do - in this case
3176 * there won't be any call.
3182 ret = wl1271_ps_elp_wakeup(wl);
3186 /* fail if there is any role in ROC */
3187 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3188 /* don't allow scanning right now */
3193 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3195 wl1271_ps_elp_sleep(wl);
3197 mutex_unlock(&wl->mutex);
3202 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3203 struct ieee80211_vif *vif)
3205 struct wl1271 *wl = hw->priv;
3208 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3210 mutex_lock(&wl->mutex);
3212 if (wl->state == WL1271_STATE_OFF)
3215 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3218 ret = wl1271_ps_elp_wakeup(wl);
3222 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3223 ret = wl1271_scan_stop(wl);
3229 * Rearm the tx watchdog just before idling scan. This
3230 * prevents just-finished scans from triggering the watchdog
3232 wl12xx_rearm_tx_watchdog_locked(wl);
3234 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3235 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3236 wl->scan_vif = NULL;
3237 wl->scan.req = NULL;
3238 ieee80211_scan_completed(wl->hw, true);
3241 wl1271_ps_elp_sleep(wl);
3243 mutex_unlock(&wl->mutex);
3245 cancel_delayed_work_sync(&wl->scan_complete_work);
3248 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3249 struct ieee80211_vif *vif,
3250 struct cfg80211_sched_scan_request *req,
3251 struct ieee80211_sched_scan_ies *ies)
3253 struct wl1271 *wl = hw->priv;
3254 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3257 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3259 mutex_lock(&wl->mutex);
3261 if (wl->state == WL1271_STATE_OFF) {
3266 ret = wl1271_ps_elp_wakeup(wl);
3270 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3274 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3278 wl->sched_scanning = true;
3281 wl1271_ps_elp_sleep(wl);
3283 mutex_unlock(&wl->mutex);
3287 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3288 struct ieee80211_vif *vif)
3290 struct wl1271 *wl = hw->priv;
3293 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3295 mutex_lock(&wl->mutex);
3297 if (wl->state == WL1271_STATE_OFF)
3300 ret = wl1271_ps_elp_wakeup(wl);
3304 wl1271_scan_sched_scan_stop(wl);
3306 wl1271_ps_elp_sleep(wl);
3308 mutex_unlock(&wl->mutex);
3311 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3313 struct wl1271 *wl = hw->priv;
3316 mutex_lock(&wl->mutex);
3318 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3323 ret = wl1271_ps_elp_wakeup(wl);
3327 ret = wl1271_acx_frag_threshold(wl, value);
3329 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3331 wl1271_ps_elp_sleep(wl);
3334 mutex_unlock(&wl->mutex);
3339 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3341 struct wl1271 *wl = hw->priv;
3342 struct wl12xx_vif *wlvif;
3345 mutex_lock(&wl->mutex);
3347 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3352 ret = wl1271_ps_elp_wakeup(wl);
3356 wl12xx_for_each_wlvif(wl, wlvif) {
3357 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3359 wl1271_warning("set rts threshold failed: %d", ret);
3361 wl1271_ps_elp_sleep(wl);
3364 mutex_unlock(&wl->mutex);
3369 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3372 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3374 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3378 wl1271_error("No SSID in IEs!");
3383 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3384 wl1271_error("SSID is too long!");
3388 wlvif->ssid_len = ssid_len;
3389 memcpy(wlvif->ssid, ptr+2, ssid_len);
3393 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3396 const u8 *next, *end = skb->data + skb->len;
3397 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3398 skb->len - ieoffset);
3403 memmove(ie, next, end - next);
3404 skb_trim(skb, skb->len - len);
3407 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3408 unsigned int oui, u8 oui_type,
3412 const u8 *next, *end = skb->data + skb->len;
3413 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3414 skb->data + ieoffset,
3415 skb->len - ieoffset);
3420 memmove(ie, next, end - next);
3421 skb_trim(skb, skb->len - len);
3424 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3425 struct ieee80211_vif *vif)
3427 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3428 struct sk_buff *skb;
3431 skb = ieee80211_proberesp_get(wl->hw, vif);
3435 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3436 CMD_TEMPL_AP_PROBE_RESPONSE,
3445 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3446 struct ieee80211_vif *vif,
3448 size_t probe_rsp_len,
3451 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3452 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3453 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3454 int ssid_ie_offset, ie_offset, templ_len;
3457 /* no need to change probe response if the SSID is set correctly */
3458 if (wlvif->ssid_len > 0)
3459 return wl1271_cmd_template_set(wl, wlvif->role_id,
3460 CMD_TEMPL_AP_PROBE_RESPONSE,
3465 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3466 wl1271_error("probe_rsp template too big");
3470 /* start searching from IE offset */
3471 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3473 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3474 probe_rsp_len - ie_offset);
3476 wl1271_error("No SSID in beacon!");
3480 ssid_ie_offset = ptr - probe_rsp_data;
3481 ptr += (ptr[1] + 2);
3483 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3485 /* insert SSID from bss_conf */
3486 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3487 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3488 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3489 bss_conf->ssid, bss_conf->ssid_len);
3490 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3492 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3493 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3494 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3496 return wl1271_cmd_template_set(wl, wlvif->role_id,
3497 CMD_TEMPL_AP_PROBE_RESPONSE,
3503 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3504 struct ieee80211_vif *vif,
3505 struct ieee80211_bss_conf *bss_conf,
3508 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3511 if (changed & BSS_CHANGED_ERP_SLOT) {
3512 if (bss_conf->use_short_slot)
3513 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3515 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3517 wl1271_warning("Set slot time failed %d", ret);
3522 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3523 if (bss_conf->use_short_preamble)
3524 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3526 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3529 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3530 if (bss_conf->use_cts_prot)
3531 ret = wl1271_acx_cts_protect(wl, wlvif,
3534 ret = wl1271_acx_cts_protect(wl, wlvif,
3535 CTSPROTECT_DISABLE);
3537 wl1271_warning("Set ctsprotect failed %d", ret);
3546 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3547 struct ieee80211_vif *vif,
3548 struct ieee80211_bss_conf *bss_conf,
3551 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3552 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3555 if ((changed & BSS_CHANGED_BEACON_INT)) {
3556 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3557 bss_conf->beacon_int);
3559 wlvif->beacon_int = bss_conf->beacon_int;
3562 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3563 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3564 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
3565 wl1271_debug(DEBUG_AP, "probe response updated");
3566 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3570 if ((changed & BSS_CHANGED_BEACON)) {
3571 struct ieee80211_hdr *hdr;
3573 int ieoffset = offsetof(struct ieee80211_mgmt,
3575 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3583 wl1271_debug(DEBUG_MASTER, "beacon updated");
3585 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3587 dev_kfree_skb(beacon);
3590 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3591 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3593 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3598 dev_kfree_skb(beacon);
3603 * In case we already have a probe-resp beacon set explicitly
3604 * by usermode, don't use the beacon data.
3606 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3609 /* remove TIM ie from probe response */
3610 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3613 * remove p2p ie from probe response.
3614 * the fw reponds to probe requests that don't include
3615 * the p2p ie. probe requests with p2p ie will be passed,
3616 * and will be responded by the supplicant (the spec
3617 * forbids including the p2p ie when responding to probe
3618 * requests that didn't include it).
3620 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3621 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3623 hdr = (struct ieee80211_hdr *) beacon->data;
3624 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3625 IEEE80211_STYPE_PROBE_RESP);
3627 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3632 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3633 CMD_TEMPL_PROBE_RESPONSE,
3638 dev_kfree_skb(beacon);
3645 wl1271_error("beacon info change failed: %d", ret);
3649 /* AP mode changes */
3650 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3651 struct ieee80211_vif *vif,
3652 struct ieee80211_bss_conf *bss_conf,
3655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3658 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3659 u32 rates = bss_conf->basic_rates;
3661 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3663 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3664 wlvif->basic_rate_set);
3666 ret = wl1271_init_ap_rates(wl, wlvif);
3668 wl1271_error("AP rate policy change failed %d", ret);
3672 ret = wl1271_ap_init_templates(wl, vif);
3677 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3681 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3682 if (bss_conf->enable_beacon) {
3683 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3684 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3688 ret = wl1271_ap_init_hwenc(wl, wlvif);
3692 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3693 wl1271_debug(DEBUG_AP, "started AP");
3696 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3697 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3701 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3702 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3704 wl1271_debug(DEBUG_AP, "stopped AP");
3709 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3713 /* Handle HT information change */
3714 if ((changed & BSS_CHANGED_HT) &&
3715 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3716 ret = wl1271_acx_set_ht_information(wl, wlvif,
3717 bss_conf->ht_operation_mode);
3719 wl1271_warning("Set ht information failed %d", ret);
3728 /* STA/IBSS mode changes */
3729 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3730 struct ieee80211_vif *vif,
3731 struct ieee80211_bss_conf *bss_conf,
3734 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3735 bool do_join = false, set_assoc = false;
3736 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3737 bool ibss_joined = false;
3738 u32 sta_rate_set = 0;
3740 struct ieee80211_sta *sta;
3741 bool sta_exists = false;
3742 struct ieee80211_sta_ht_cap sta_ht_cap;
3745 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3751 if (changed & BSS_CHANGED_IBSS) {
3752 if (bss_conf->ibss_joined) {
3753 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3756 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3758 wl1271_unjoin(wl, wlvif);
3762 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3765 /* Need to update the SSID (for filtering etc) */
3766 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3769 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3770 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3771 bss_conf->enable_beacon ? "enabled" : "disabled");
3776 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3777 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3779 wl1271_warning("idle mode change failed %d", ret);
3782 if ((changed & BSS_CHANGED_CQM)) {
3783 bool enable = false;
3784 if (bss_conf->cqm_rssi_thold)
3786 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3787 bss_conf->cqm_rssi_thold,
3788 bss_conf->cqm_rssi_hyst);
3791 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3794 if (changed & BSS_CHANGED_BSSID &&
3795 (is_ibss || bss_conf->assoc))
3796 if (!is_zero_ether_addr(bss_conf->bssid)) {
3797 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3801 ret = wl1271_build_qos_null_data(wl, vif);
3805 /* Need to update the BSSID (for filtering etc) */
3809 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3811 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3815 /* save the supp_rates of the ap */
3816 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3817 if (sta->ht_cap.ht_supported)
3819 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
3820 sta_ht_cap = sta->ht_cap;
3827 if ((changed & BSS_CHANGED_ASSOC)) {
3828 if (bss_conf->assoc) {
3831 wlvif->aid = bss_conf->aid;
3832 wlvif->beacon_int = bss_conf->beacon_int;
3836 * use basic rates from AP, and determine lowest rate
3837 * to use with control frames.
3839 rates = bss_conf->basic_rates;
3840 wlvif->basic_rate_set =
3841 wl1271_tx_enabled_rates_get(wl, rates,
3844 wl1271_tx_min_rate_get(wl,
3845 wlvif->basic_rate_set);
3848 wl1271_tx_enabled_rates_get(wl,
3851 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3856 * with wl1271, we don't need to update the
3857 * beacon_int and dtim_period, because the firmware
3858 * updates it by itself when the first beacon is
3859 * received after a join.
3861 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3866 * Get a template for hardware connection maintenance
3868 dev_kfree_skb(wlvif->probereq);
3869 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3872 ieoffset = offsetof(struct ieee80211_mgmt,
3873 u.probe_req.variable);
3874 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3876 /* enable the connection monitoring feature */
3877 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3881 /* use defaults when not associated */
3883 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3886 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3890 /* free probe-request template */
3891 dev_kfree_skb(wlvif->probereq);
3892 wlvif->probereq = NULL;
3894 /* revert back to minimum rates for the current band */
3895 wl1271_set_band_rate(wl, wlvif);
3897 wl1271_tx_min_rate_get(wl,
3898 wlvif->basic_rate_set);
3899 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3903 /* disable connection monitor features */
3904 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3906 /* Disable the keep-alive feature */
3907 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3911 /* restore the bssid filter and go to dummy bssid */
3914 * we might have to disable roc, if there was
3915 * no IF_OPER_UP notification.
3918 ret = wl12xx_croc(wl, wlvif->role_id);
3923 * (we also need to disable roc in case of
3924 * roaming on the same channel. until we will
3925 * have a better flow...)
3927 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3928 ret = wl12xx_croc(wl,
3929 wlvif->dev_role_id);
3934 wl1271_unjoin(wl, wlvif);
3935 if (!bss_conf->idle)
3936 wl12xx_start_dev(wl, wlvif);
3941 if (changed & BSS_CHANGED_IBSS) {
3942 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
3943 bss_conf->ibss_joined);
3945 if (bss_conf->ibss_joined) {
3946 u32 rates = bss_conf->basic_rates;
3947 wlvif->basic_rate_set =
3948 wl1271_tx_enabled_rates_get(wl, rates,
3951 wl1271_tx_min_rate_get(wl,
3952 wlvif->basic_rate_set);
3954 /* by default, use 11b + OFDM rates */
3955 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3956 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3962 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3967 ret = wl1271_join(wl, wlvif, set_assoc);
3969 wl1271_warning("cmd join failed %d", ret);
3973 /* ROC until connected (after EAPOL exchange) */
3975 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3979 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
3980 wl12xx_set_authorized(wl, wlvif);
3983 * stop device role if started (we might already be in
3986 if (wl12xx_dev_role_started(wlvif)) {
3987 ret = wl12xx_stop_dev(wl, wlvif);
3993 /* Handle new association with HT. Do this after join. */
3995 if ((changed & BSS_CHANGED_HT) &&
3996 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3997 ret = wl1271_acx_set_ht_capabilities(wl,
4002 wl1271_warning("Set ht cap true failed %d",
4007 /* handle new association without HT and disassociation */
4008 else if (changed & BSS_CHANGED_ASSOC) {
4009 ret = wl1271_acx_set_ht_capabilities(wl,
4014 wl1271_warning("Set ht cap false failed %d",
4021 /* Handle HT information change. Done after join. */
4022 if ((changed & BSS_CHANGED_HT) &&
4023 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4024 ret = wl1271_acx_set_ht_information(wl, wlvif,
4025 bss_conf->ht_operation_mode);
4027 wl1271_warning("Set ht information failed %d", ret);
4032 /* Handle arp filtering. Done after join. */
4033 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4034 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4035 __be32 addr = bss_conf->arp_addr_list[0];
4036 wlvif->sta.qos = bss_conf->qos;
4037 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4039 if (bss_conf->arp_addr_cnt == 1 &&
4040 bss_conf->arp_filter_enabled) {
4041 wlvif->ip_addr = addr;
4043 * The template should have been configured only upon
4044 * association. however, it seems that the correct ip
4045 * isn't being set (when sending), so we have to
4046 * reconfigure the template upon every ip change.
4048 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4050 wl1271_warning("build arp rsp failed: %d", ret);
4054 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4055 (ACX_ARP_FILTER_ARP_FILTERING |
4056 ACX_ARP_FILTER_AUTO_ARP),
4060 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4071 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4072 struct ieee80211_vif *vif,
4073 struct ieee80211_bss_conf *bss_conf,
4076 struct wl1271 *wl = hw->priv;
4077 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4078 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4081 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
4084 mutex_lock(&wl->mutex);
4086 if (unlikely(wl->state == WL1271_STATE_OFF))
4089 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4092 ret = wl1271_ps_elp_wakeup(wl);
4097 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4099 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4101 wl1271_ps_elp_sleep(wl);
4104 mutex_unlock(&wl->mutex);
4107 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4108 struct ieee80211_vif *vif, u16 queue,
4109 const struct ieee80211_tx_queue_params *params)
4111 struct wl1271 *wl = hw->priv;
4112 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4116 mutex_lock(&wl->mutex);
4118 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4121 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4123 ps_scheme = CONF_PS_SCHEME_LEGACY;
4125 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4128 ret = wl1271_ps_elp_wakeup(wl);
4133 * the txop is confed in units of 32us by the mac80211,
4136 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4137 params->cw_min, params->cw_max,
4138 params->aifs, params->txop << 5);
4142 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4143 CONF_CHANNEL_TYPE_EDCF,
4144 wl1271_tx_get_queue(queue),
4145 ps_scheme, CONF_ACK_POLICY_LEGACY,
4149 wl1271_ps_elp_sleep(wl);
4152 mutex_unlock(&wl->mutex);
4157 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4158 struct ieee80211_vif *vif)
4161 struct wl1271 *wl = hw->priv;
4162 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4163 u64 mactime = ULLONG_MAX;
4166 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4168 mutex_lock(&wl->mutex);
4170 if (unlikely(wl->state == WL1271_STATE_OFF))
4173 ret = wl1271_ps_elp_wakeup(wl);
4177 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4182 wl1271_ps_elp_sleep(wl);
4185 mutex_unlock(&wl->mutex);
4189 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4190 struct survey_info *survey)
4192 struct wl1271 *wl = hw->priv;
4193 struct ieee80211_conf *conf = &hw->conf;
4198 survey->channel = conf->channel;
4199 survey->filled = SURVEY_INFO_NOISE_DBM;
4200 survey->noise = wl->noise;
4205 static int wl1271_allocate_sta(struct wl1271 *wl,
4206 struct wl12xx_vif *wlvif,
4207 struct ieee80211_sta *sta)
4209 struct wl1271_station *wl_sta;
4213 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4214 wl1271_warning("could not allocate HLID - too much stations");
4218 wl_sta = (struct wl1271_station *)sta->drv_priv;
4219 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4221 wl1271_warning("could not allocate HLID - too many links");
4225 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4226 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4227 wl->active_sta_count++;
4231 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4233 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4236 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4237 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4238 wl->links[hlid].ba_bitmap = 0;
4239 __clear_bit(hlid, &wl->ap_ps_map);
4240 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4241 wl12xx_free_link(wl, wlvif, &hlid);
4242 wl->active_sta_count--;
4245 * rearm the tx watchdog when the last STA is freed - give the FW a
4246 * chance to return STA-buffered packets before complaining.
4248 if (wl->active_sta_count == 0)
4249 wl12xx_rearm_tx_watchdog_locked(wl);
4252 static int wl12xx_sta_add(struct wl1271 *wl,
4253 struct wl12xx_vif *wlvif,
4254 struct ieee80211_sta *sta)
4256 struct wl1271_station *wl_sta;
4260 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4262 ret = wl1271_allocate_sta(wl, wlvif, sta);
4266 wl_sta = (struct wl1271_station *)sta->drv_priv;
4267 hlid = wl_sta->hlid;
4269 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4271 wl1271_free_sta(wl, wlvif, hlid);
4276 static int wl12xx_sta_remove(struct wl1271 *wl,
4277 struct wl12xx_vif *wlvif,
4278 struct ieee80211_sta *sta)
4280 struct wl1271_station *wl_sta;
4283 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4285 wl_sta = (struct wl1271_station *)sta->drv_priv;
4287 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4290 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4294 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4298 static int wl12xx_update_sta_state(struct wl1271 *wl,
4299 struct wl12xx_vif *wlvif,
4300 struct ieee80211_sta *sta,
4301 enum ieee80211_sta_state old_state,
4302 enum ieee80211_sta_state new_state)
4304 struct wl1271_station *wl_sta;
4306 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4307 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4310 wl_sta = (struct wl1271_station *)sta->drv_priv;
4311 hlid = wl_sta->hlid;
4313 /* Add station (AP mode) */
4315 old_state == IEEE80211_STA_NOTEXIST &&
4316 new_state == IEEE80211_STA_NONE)
4317 return wl12xx_sta_add(wl, wlvif, sta);
4319 /* Remove station (AP mode) */
4321 old_state == IEEE80211_STA_NONE &&
4322 new_state == IEEE80211_STA_NOTEXIST) {
4324 wl12xx_sta_remove(wl, wlvif, sta);
4328 /* Authorize station (AP mode) */
4330 new_state == IEEE80211_STA_AUTHORIZED) {
4331 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4335 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4340 /* Authorize station */
4342 new_state == IEEE80211_STA_AUTHORIZED) {
4343 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4344 return wl12xx_set_authorized(wl, wlvif);
4348 old_state == IEEE80211_STA_AUTHORIZED &&
4349 new_state == IEEE80211_STA_ASSOC) {
4350 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4357 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4358 struct ieee80211_vif *vif,
4359 struct ieee80211_sta *sta,
4360 enum ieee80211_sta_state old_state,
4361 enum ieee80211_sta_state new_state)
4363 struct wl1271 *wl = hw->priv;
4364 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4367 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4368 sta->aid, old_state, new_state);
4370 mutex_lock(&wl->mutex);
4372 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4377 ret = wl1271_ps_elp_wakeup(wl);
4381 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4383 wl1271_ps_elp_sleep(wl);
4385 mutex_unlock(&wl->mutex);
4386 if (new_state < old_state)
4391 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4392 struct ieee80211_vif *vif,
4393 enum ieee80211_ampdu_mlme_action action,
4394 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4397 struct wl1271 *wl = hw->priv;
4398 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4400 u8 hlid, *ba_bitmap;
4402 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4405 /* sanity check - the fields in FW are only 8bits wide */
4406 if (WARN_ON(tid > 0xFF))
4409 mutex_lock(&wl->mutex);
4411 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4416 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4417 hlid = wlvif->sta.hlid;
4418 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4419 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4420 struct wl1271_station *wl_sta;
4422 wl_sta = (struct wl1271_station *)sta->drv_priv;
4423 hlid = wl_sta->hlid;
4424 ba_bitmap = &wl->links[hlid].ba_bitmap;
4430 ret = wl1271_ps_elp_wakeup(wl);
4434 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4438 case IEEE80211_AMPDU_RX_START:
4439 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4444 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4446 wl1271_error("exceeded max RX BA sessions");
4450 if (*ba_bitmap & BIT(tid)) {
4452 wl1271_error("cannot enable RX BA session on active "
4457 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4460 *ba_bitmap |= BIT(tid);
4461 wl->ba_rx_session_count++;
4465 case IEEE80211_AMPDU_RX_STOP:
4466 if (!(*ba_bitmap & BIT(tid))) {
4468 wl1271_error("no active RX BA session on tid: %d",
4473 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4476 *ba_bitmap &= ~BIT(tid);
4477 wl->ba_rx_session_count--;
4482 * The BA initiator session management in FW independently.
4483 * Falling break here on purpose for all TX APDU commands.
4485 case IEEE80211_AMPDU_TX_START:
4486 case IEEE80211_AMPDU_TX_STOP:
4487 case IEEE80211_AMPDU_TX_OPERATIONAL:
4492 wl1271_error("Incorrect ampdu action id=%x\n", action);
4496 wl1271_ps_elp_sleep(wl);
4499 mutex_unlock(&wl->mutex);
4504 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4505 struct ieee80211_vif *vif,
4506 const struct cfg80211_bitrate_mask *mask)
4508 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4509 struct wl1271 *wl = hw->priv;
4512 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4513 mask->control[NL80211_BAND_2GHZ].legacy,
4514 mask->control[NL80211_BAND_5GHZ].legacy);
4516 mutex_lock(&wl->mutex);
4518 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4519 wlvif->bitrate_masks[i] =
4520 wl1271_tx_enabled_rates_get(wl,
4521 mask->control[i].legacy,
4524 if (unlikely(wl->state == WL1271_STATE_OFF))
4527 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4528 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4530 ret = wl1271_ps_elp_wakeup(wl);
4534 wl1271_set_band_rate(wl, wlvif);
4536 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4537 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4539 wl1271_ps_elp_sleep(wl);
4542 mutex_unlock(&wl->mutex);
4547 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4548 struct ieee80211_channel_switch *ch_switch)
4550 struct wl1271 *wl = hw->priv;
4551 struct wl12xx_vif *wlvif;
4554 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4556 wl1271_tx_flush(wl);
4558 mutex_lock(&wl->mutex);
4560 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4561 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4562 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4563 ieee80211_chswitch_done(vif, false);
4568 ret = wl1271_ps_elp_wakeup(wl);
4572 /* TODO: change mac80211 to pass vif as param */
4573 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4574 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4577 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4580 wl1271_ps_elp_sleep(wl);
4583 mutex_unlock(&wl->mutex);
4586 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4588 struct wl1271 *wl = hw->priv;
4591 mutex_lock(&wl->mutex);
4593 if (unlikely(wl->state == WL1271_STATE_OFF))
4596 /* packets are considered pending if in the TX queue or the FW */
4597 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4599 mutex_unlock(&wl->mutex);
4604 /* can't be const, mac80211 writes to this */
4605 static struct ieee80211_rate wl1271_rates[] = {
4607 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4608 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4610 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4611 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4612 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4614 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4615 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4616 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4618 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4619 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4620 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4622 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4623 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4625 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4626 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4628 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4629 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4631 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4632 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4634 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4635 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4637 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4638 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4640 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4641 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4643 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4644 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4647 /* can't be const, mac80211 writes to this */
4648 static struct ieee80211_channel wl1271_channels[] = {
4649 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4650 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4651 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4652 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4653 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4654 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4655 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4656 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4657 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4658 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4659 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4660 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4661 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4662 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4665 /* mapping to indexes for wl1271_rates */
4666 static const u8 wl1271_rate_to_idx_2ghz[] = {
4667 /* MCS rates are used only with 11n */
4668 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4669 7, /* CONF_HW_RXTX_RATE_MCS7 */
4670 6, /* CONF_HW_RXTX_RATE_MCS6 */
4671 5, /* CONF_HW_RXTX_RATE_MCS5 */
4672 4, /* CONF_HW_RXTX_RATE_MCS4 */
4673 3, /* CONF_HW_RXTX_RATE_MCS3 */
4674 2, /* CONF_HW_RXTX_RATE_MCS2 */
4675 1, /* CONF_HW_RXTX_RATE_MCS1 */
4676 0, /* CONF_HW_RXTX_RATE_MCS0 */
4678 11, /* CONF_HW_RXTX_RATE_54 */
4679 10, /* CONF_HW_RXTX_RATE_48 */
4680 9, /* CONF_HW_RXTX_RATE_36 */
4681 8, /* CONF_HW_RXTX_RATE_24 */
4683 /* TI-specific rate */
4684 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4686 7, /* CONF_HW_RXTX_RATE_18 */
4687 6, /* CONF_HW_RXTX_RATE_12 */
4688 3, /* CONF_HW_RXTX_RATE_11 */
4689 5, /* CONF_HW_RXTX_RATE_9 */
4690 4, /* CONF_HW_RXTX_RATE_6 */
4691 2, /* CONF_HW_RXTX_RATE_5_5 */
4692 1, /* CONF_HW_RXTX_RATE_2 */
4693 0 /* CONF_HW_RXTX_RATE_1 */
4696 /* 11n STA capabilities */
4697 #define HW_RX_HIGHEST_RATE 72
4699 #define WL12XX_HT_CAP { \
4700 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
4701 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
4702 .ht_supported = true, \
4703 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
4704 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
4706 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
4707 .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
4708 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
4712 /* can't be const, mac80211 writes to this */
4713 static struct ieee80211_supported_band wl1271_band_2ghz = {
4714 .channels = wl1271_channels,
4715 .n_channels = ARRAY_SIZE(wl1271_channels),
4716 .bitrates = wl1271_rates,
4717 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4718 .ht_cap = WL12XX_HT_CAP,
4721 /* 5 GHz data rates for WL1273 */
4722 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4724 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4725 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4727 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4728 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4730 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4731 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4733 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4734 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4736 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4737 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4739 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4740 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4742 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4743 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4745 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4746 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4749 /* 5 GHz band channels for WL1273 */
4750 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4751 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4752 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4753 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4754 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4755 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4756 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4757 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4758 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4759 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4760 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4761 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4762 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4763 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4764 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4765 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4766 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4767 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4768 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4769 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4770 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4771 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4772 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4773 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4774 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4775 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4776 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4777 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4778 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4779 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4780 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4781 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4782 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4783 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4784 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4787 /* mapping to indexes for wl1271_rates_5ghz */
4788 static const u8 wl1271_rate_to_idx_5ghz[] = {
4789 /* MCS rates are used only with 11n */
4790 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4791 7, /* CONF_HW_RXTX_RATE_MCS7 */
4792 6, /* CONF_HW_RXTX_RATE_MCS6 */
4793 5, /* CONF_HW_RXTX_RATE_MCS5 */
4794 4, /* CONF_HW_RXTX_RATE_MCS4 */
4795 3, /* CONF_HW_RXTX_RATE_MCS3 */
4796 2, /* CONF_HW_RXTX_RATE_MCS2 */
4797 1, /* CONF_HW_RXTX_RATE_MCS1 */
4798 0, /* CONF_HW_RXTX_RATE_MCS0 */
4800 7, /* CONF_HW_RXTX_RATE_54 */
4801 6, /* CONF_HW_RXTX_RATE_48 */
4802 5, /* CONF_HW_RXTX_RATE_36 */
4803 4, /* CONF_HW_RXTX_RATE_24 */
4805 /* TI-specific rate */
4806 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4808 3, /* CONF_HW_RXTX_RATE_18 */
4809 2, /* CONF_HW_RXTX_RATE_12 */
4810 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
4811 1, /* CONF_HW_RXTX_RATE_9 */
4812 0, /* CONF_HW_RXTX_RATE_6 */
4813 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
4814 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
4815 CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
4818 static struct ieee80211_supported_band wl1271_band_5ghz = {
4819 .channels = wl1271_channels_5ghz,
4820 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4821 .bitrates = wl1271_rates_5ghz,
4822 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4823 .ht_cap = WL12XX_HT_CAP,
4826 static const u8 *wl1271_band_rate_to_idx[] = {
4827 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
4828 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
4831 static const struct ieee80211_ops wl1271_ops = {
4832 .start = wl1271_op_start,
4833 .stop = wl1271_op_stop,
4834 .add_interface = wl1271_op_add_interface,
4835 .remove_interface = wl1271_op_remove_interface,
4836 .change_interface = wl12xx_op_change_interface,
4838 .suspend = wl1271_op_suspend,
4839 .resume = wl1271_op_resume,
4841 .config = wl1271_op_config,
4842 .prepare_multicast = wl1271_op_prepare_multicast,
4843 .configure_filter = wl1271_op_configure_filter,
4845 .set_key = wl1271_op_set_key,
4846 .hw_scan = wl1271_op_hw_scan,
4847 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4848 .sched_scan_start = wl1271_op_sched_scan_start,
4849 .sched_scan_stop = wl1271_op_sched_scan_stop,
4850 .bss_info_changed = wl1271_op_bss_info_changed,
4851 .set_frag_threshold = wl1271_op_set_frag_threshold,
4852 .set_rts_threshold = wl1271_op_set_rts_threshold,
4853 .conf_tx = wl1271_op_conf_tx,
4854 .get_tsf = wl1271_op_get_tsf,
4855 .get_survey = wl1271_op_get_survey,
4856 .sta_state = wl12xx_op_sta_state,
4857 .ampdu_action = wl1271_op_ampdu_action,
4858 .tx_frames_pending = wl1271_tx_frames_pending,
4859 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4860 .channel_switch = wl12xx_op_channel_switch,
4861 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4865 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
4869 BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
4871 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
4872 wl1271_error("Illegal RX rate from HW: %d", rate);
4876 idx = wl1271_band_rate_to_idx[band][rate];
4877 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4878 wl1271_error("Unsupported RX rate from HW: %d", rate);
4885 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4886 struct device_attribute *attr,
4889 struct wl1271 *wl = dev_get_drvdata(dev);
4894 mutex_lock(&wl->mutex);
4895 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4897 mutex_unlock(&wl->mutex);
4903 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4904 struct device_attribute *attr,
4905 const char *buf, size_t count)
4907 struct wl1271 *wl = dev_get_drvdata(dev);
4911 ret = kstrtoul(buf, 10, &res);
4913 wl1271_warning("incorrect value written to bt_coex_mode");
4917 mutex_lock(&wl->mutex);
4921 if (res == wl->sg_enabled)
4924 wl->sg_enabled = res;
4926 if (wl->state == WL1271_STATE_OFF)
4929 ret = wl1271_ps_elp_wakeup(wl);
4933 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4934 wl1271_ps_elp_sleep(wl);
4937 mutex_unlock(&wl->mutex);
4941 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4942 wl1271_sysfs_show_bt_coex_state,
4943 wl1271_sysfs_store_bt_coex_state);
4945 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4946 struct device_attribute *attr,
4949 struct wl1271 *wl = dev_get_drvdata(dev);
4954 mutex_lock(&wl->mutex);
4955 if (wl->hw_pg_ver >= 0)
4956 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4958 len = snprintf(buf, len, "n/a\n");
4959 mutex_unlock(&wl->mutex);
4964 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4965 wl1271_sysfs_show_hw_pg_ver, NULL);
4967 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4968 struct bin_attribute *bin_attr,
4969 char *buffer, loff_t pos, size_t count)
4971 struct device *dev = container_of(kobj, struct device, kobj);
4972 struct wl1271 *wl = dev_get_drvdata(dev);
4976 ret = mutex_lock_interruptible(&wl->mutex);
4978 return -ERESTARTSYS;
4980 /* Let only one thread read the log at a time, blocking others */
4981 while (wl->fwlog_size == 0) {
4984 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4986 TASK_INTERRUPTIBLE);
4988 if (wl->fwlog_size != 0) {
4989 finish_wait(&wl->fwlog_waitq, &wait);
4993 mutex_unlock(&wl->mutex);
4996 finish_wait(&wl->fwlog_waitq, &wait);
4998 if (signal_pending(current))
4999 return -ERESTARTSYS;
5001 ret = mutex_lock_interruptible(&wl->mutex);
5003 return -ERESTARTSYS;
5006 /* Check if the fwlog is still valid */
5007 if (wl->fwlog_size < 0) {
5008 mutex_unlock(&wl->mutex);
5012 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5013 len = min(count, (size_t)wl->fwlog_size);
5014 wl->fwlog_size -= len;
5015 memcpy(buffer, wl->fwlog, len);
5017 /* Make room for new messages */
5018 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5020 mutex_unlock(&wl->mutex);
5025 static struct bin_attribute fwlog_attr = {
5026 .attr = {.name = "fwlog", .mode = S_IRUSR},
5027 .read = wl1271_sysfs_read_fwlog,
5030 static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
5032 bool supported = false;
5035 if (wl->chip.id == CHIP_ID_1283_PG20) {
5036 major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
5037 minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
5039 /* in wl128x we have the MAC address if the PG is >= (2, 1) */
5040 if (major > 2 || (major == 2 && minor >= 1))
5043 major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
5044 minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
5046 /* in wl127x we have the MAC address if the PG is >= (3, 1) */
5047 if (major == 3 && minor >= 1)
5051 wl1271_debug(DEBUG_PROBE,
5052 "PG Ver major = %d minor = %d, MAC %s present",
5053 major, minor, supported ? "is" : "is not");
5058 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5059 u32 oui, u32 nic, int n)
5063 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
5066 if (nic + n - 1 > 0xffffff)
5067 wl1271_warning("NIC part of the MAC address wraps around!");
5069 for (i = 0; i < n; i++) {
5070 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5071 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5072 wl->addresses[i].addr[2] = (u8) oui;
5073 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5074 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5075 wl->addresses[i].addr[5] = (u8) nic;
5079 wl->hw->wiphy->n_addresses = n;
5080 wl->hw->wiphy->addresses = wl->addresses;
5083 static void wl12xx_get_fuse_mac(struct wl1271 *wl)
5087 wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
5089 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
5090 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
5092 /* these are the two parts of the BD_ADDR */
5093 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
5094 ((mac1 & 0xff000000) >> 24);
5095 wl->fuse_nic_addr = mac1 & 0xffffff;
5097 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
5100 static int wl12xx_get_hw_info(struct wl1271 *wl)
5105 ret = wl12xx_set_power_on(wl);
5109 wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
5111 if (wl->chip.id == CHIP_ID_1283_PG20)
5112 die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
5114 die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
5116 wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
5118 if (!wl12xx_mac_in_fuse(wl)) {
5119 wl->fuse_oui_addr = 0;
5120 wl->fuse_nic_addr = 0;
5122 wl12xx_get_fuse_mac(wl);
5125 wl1271_power_off(wl);
5130 static int wl1271_register_hw(struct wl1271 *wl)
5133 u32 oui_addr = 0, nic_addr = 0;
5135 if (wl->mac80211_registered)
5138 ret = wl12xx_get_hw_info(wl);
5140 wl1271_error("couldn't get hw info");
5144 ret = wl1271_fetch_nvs(wl);
5146 /* NOTE: The wl->nvs->nvs element must be first, in
5147 * order to simplify the casting, we assume it is at
5148 * the beginning of the wl->nvs structure.
5150 u8 *nvs_ptr = (u8 *)wl->nvs;
5153 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5155 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5158 /* if the MAC address is zeroed in the NVS derive from fuse */
5159 if (oui_addr == 0 && nic_addr == 0) {
5160 oui_addr = wl->fuse_oui_addr;
5161 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5162 nic_addr = wl->fuse_nic_addr + 1;
5165 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
5167 ret = ieee80211_register_hw(wl->hw);
5169 wl1271_error("unable to register mac80211 hw: %d", ret);
5173 wl->mac80211_registered = true;
5175 wl1271_debugfs_init(wl);
5177 wl1271_notice("loaded");
5183 static void wl1271_unregister_hw(struct wl1271 *wl)
5186 wl1271_plt_stop(wl);
5188 ieee80211_unregister_hw(wl->hw);
5189 wl->mac80211_registered = false;
5193 static int wl1271_init_ieee80211(struct wl1271 *wl)
5195 static const u32 cipher_suites[] = {
5196 WLAN_CIPHER_SUITE_WEP40,
5197 WLAN_CIPHER_SUITE_WEP104,
5198 WLAN_CIPHER_SUITE_TKIP,
5199 WLAN_CIPHER_SUITE_CCMP,
5200 WL1271_CIPHER_SUITE_GEM,
5203 /* The tx descriptor buffer and the TKIP space. */
5204 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
5205 sizeof(struct wl1271_tx_hw_descr);
5208 /* FIXME: find a proper value */
5209 wl->hw->channel_change_time = 10000;
5210 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5212 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5213 IEEE80211_HW_SUPPORTS_PS |
5214 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5215 IEEE80211_HW_SUPPORTS_UAPSD |
5216 IEEE80211_HW_HAS_RATE_CONTROL |
5217 IEEE80211_HW_CONNECTION_MONITOR |
5218 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5219 IEEE80211_HW_SPECTRUM_MGMT |
5220 IEEE80211_HW_AP_LINK_PS |
5221 IEEE80211_HW_AMPDU_AGGREGATION |
5222 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5223 IEEE80211_HW_SCAN_WHILE_IDLE;
5225 wl->hw->wiphy->cipher_suites = cipher_suites;
5226 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5228 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5229 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5230 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5231 wl->hw->wiphy->max_scan_ssids = 1;
5232 wl->hw->wiphy->max_sched_scan_ssids = 16;
5233 wl->hw->wiphy->max_match_sets = 16;
5235 * Maximum length of elements in scanning probe request templates
5236 * should be the maximum length possible for a template, without
5237 * the IEEE80211 header of the template
5239 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5240 sizeof(struct ieee80211_header);
5242 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5243 sizeof(struct ieee80211_header);
5245 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
5247 /* make sure all our channels fit in the scanned_ch bitmask */
5248 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5249 ARRAY_SIZE(wl1271_channels_5ghz) >
5250 WL1271_MAX_CHANNELS);
5252 * We keep local copies of the band structs because we need to
5253 * modify them on a per-device basis.
5255 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5256 sizeof(wl1271_band_2ghz));
5257 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5258 sizeof(wl1271_band_5ghz));
5260 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5261 &wl->bands[IEEE80211_BAND_2GHZ];
5262 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5263 &wl->bands[IEEE80211_BAND_5GHZ];
5266 wl->hw->max_rates = 1;
5268 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5270 /* the FW answers probe-requests in AP-mode */
5271 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5272 wl->hw->wiphy->probe_resp_offload =
5273 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5274 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5275 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5277 SET_IEEE80211_DEV(wl->hw, wl->dev);
5279 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5280 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5282 wl->hw->max_rx_aggregation_subframes = 8;
5287 #define WL1271_DEFAULT_CHANNEL 0
5289 static struct ieee80211_hw *wl1271_alloc_hw(void)
5291 struct ieee80211_hw *hw;
5296 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5298 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5300 wl1271_error("could not alloc ieee80211_hw");
5306 memset(wl, 0, sizeof(*wl));
5308 INIT_LIST_HEAD(&wl->wlvif_list);
5312 for (i = 0; i < NUM_TX_QUEUES; i++)
5313 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5314 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5316 skb_queue_head_init(&wl->deferred_rx_queue);
5317 skb_queue_head_init(&wl->deferred_tx_queue);
5319 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5320 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5321 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5322 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5323 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5324 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5326 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5327 if (!wl->freezable_wq) {
5332 wl->channel = WL1271_DEFAULT_CHANNEL;
5334 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5335 wl->band = IEEE80211_BAND_2GHZ;
5337 wl->sg_enabled = true;
5340 wl->ap_fw_ps_map = 0;
5342 wl->platform_quirks = 0;
5343 wl->sched_scanning = false;
5344 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
5345 wl->system_hlid = WL12XX_SYSTEM_HLID;
5346 wl->active_sta_count = 0;
5348 init_waitqueue_head(&wl->fwlog_waitq);
5350 /* The system link is always allocated */
5351 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5353 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5354 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
5355 wl->tx_frames[i] = NULL;
5357 spin_lock_init(&wl->wl_lock);
5359 wl->state = WL1271_STATE_OFF;
5360 wl->fw_type = WL12XX_FW_TYPE_NONE;
5361 mutex_init(&wl->mutex);
5363 /* Apply default driver configuration. */
5364 wl1271_conf_init(wl);
5366 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5367 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5368 if (!wl->aggr_buf) {
5373 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5374 if (!wl->dummy_packet) {
5379 /* Allocate one page for the FW log */
5380 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5383 goto err_dummy_packet;
5389 dev_kfree_skb(wl->dummy_packet);
5392 free_pages((unsigned long)wl->aggr_buf, order);
5395 destroy_workqueue(wl->freezable_wq);
5398 wl1271_debugfs_exit(wl);
5399 ieee80211_free_hw(hw);
5403 return ERR_PTR(ret);
5406 static int wl1271_free_hw(struct wl1271 *wl)
5408 /* Unblock any fwlog readers */
5409 mutex_lock(&wl->mutex);
5410 wl->fwlog_size = -1;
5411 wake_up_interruptible_all(&wl->fwlog_waitq);
5412 mutex_unlock(&wl->mutex);
5414 device_remove_bin_file(wl->dev, &fwlog_attr);
5416 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5418 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5419 free_page((unsigned long)wl->fwlog);
5420 dev_kfree_skb(wl->dummy_packet);
5421 free_pages((unsigned long)wl->aggr_buf,
5422 get_order(WL1271_AGGR_BUFFER_SIZE));
5424 wl1271_debugfs_exit(wl);
5428 wl->fw_type = WL12XX_FW_TYPE_NONE;
5432 kfree(wl->fw_status);
5433 kfree(wl->tx_res_if);
5434 destroy_workqueue(wl->freezable_wq);
5436 ieee80211_free_hw(wl->hw);
5441 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5443 struct wl1271 *wl = cookie;
5444 unsigned long flags;
5446 wl1271_debug(DEBUG_IRQ, "IRQ");
5448 /* complete the ELP completion */
5449 spin_lock_irqsave(&wl->wl_lock, flags);
5450 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5451 if (wl->elp_compl) {
5452 complete(wl->elp_compl);
5453 wl->elp_compl = NULL;
5456 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5457 /* don't enqueue a work right now. mark it as pending */
5458 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5459 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5460 disable_irq_nosync(wl->irq);
5461 pm_wakeup_event(wl->dev, 0);
5462 spin_unlock_irqrestore(&wl->wl_lock, flags);
5465 spin_unlock_irqrestore(&wl->wl_lock, flags);
5467 return IRQ_WAKE_THREAD;
5470 static int __devinit wl12xx_probe(struct platform_device *pdev)
5472 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5473 struct ieee80211_hw *hw;
5475 unsigned long irqflags;
5478 hw = wl1271_alloc_hw();
5480 wl1271_error("can't allocate hw");
5486 wl->irq = platform_get_irq(pdev, 0);
5487 wl->ref_clock = pdata->board_ref_clock;
5488 wl->tcxo_clock = pdata->board_tcxo_clock;
5489 wl->platform_quirks = pdata->platform_quirks;
5490 wl->set_power = pdata->set_power;
5491 wl->dev = &pdev->dev;
5492 wl->if_ops = pdata->ops;
5494 platform_set_drvdata(pdev, wl);
5496 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5497 irqflags = IRQF_TRIGGER_RISING;
5499 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5501 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5505 wl1271_error("request_irq() failed: %d", ret);
5509 ret = enable_irq_wake(wl->irq);
5511 wl->irq_wake_enabled = true;
5512 device_init_wakeup(wl->dev, 1);
5513 if (pdata->pwr_in_suspend)
5514 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5517 disable_irq(wl->irq);
5519 ret = wl1271_init_ieee80211(wl);
5523 ret = wl1271_register_hw(wl);
5527 /* Create sysfs file to control bt coex state */
5528 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5530 wl1271_error("failed to create sysfs file bt_coex_state");
5534 /* Create sysfs file to get HW PG version */
5535 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5537 wl1271_error("failed to create sysfs file hw_pg_ver");
5538 goto out_bt_coex_state;
5541 /* Create sysfs file for the FW log */
5542 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5544 wl1271_error("failed to create sysfs file fwlog");
5551 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5554 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5557 free_irq(wl->irq, wl);
5566 static int __devexit wl12xx_remove(struct platform_device *pdev)
5568 struct wl1271 *wl = platform_get_drvdata(pdev);
5570 if (wl->irq_wake_enabled) {
5571 device_init_wakeup(wl->dev, 0);
5572 disable_irq_wake(wl->irq);
5574 wl1271_unregister_hw(wl);
5575 free_irq(wl->irq, wl);
5581 static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
5583 { } /* Terminating Entry */
5585 MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
5587 static struct platform_driver wl12xx_driver = {
5588 .probe = wl12xx_probe,
5589 .remove = __devexit_p(wl12xx_remove),
5590 .id_table = wl12xx_id_table,
5592 .name = "wl12xx_driver",
5593 .owner = THIS_MODULE,
5597 static int __init wl12xx_init(void)
5599 return platform_driver_register(&wl12xx_driver);
5601 module_init(wl12xx_init);
5603 static void __exit wl12xx_exit(void)
5605 platform_driver_unregister(&wl12xx_driver);
5607 module_exit(wl12xx_exit);
5609 u32 wl12xx_debug_level = DEBUG_NONE;
5610 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5611 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5612 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5614 module_param_named(fwlog, fwlog_param, charp, 0);
5615 MODULE_PARM_DESC(fwlog,
5616 "FW logger options: continuous, ondemand, dbgpins or disable");
5618 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5619 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5621 MODULE_LICENSE("GPL");
5622 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5623 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");