3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
98 wlcore_regdomain_config(wl);
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
126 int period = wl->conf.rx_streaming.interval;
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
132 /* reconfigure/disable according to new streaming_period */
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
154 mutex_lock(&wl->mutex);
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
162 if (!wl->conf.rx_streaming.interval)
165 ret = wl1271_ps_elp_wakeup(wl);
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
178 wl1271_ps_elp_sleep(wl);
180 mutex_unlock(&wl->mutex);
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
190 mutex_lock(&wl->mutex);
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
195 ret = wl1271_ps_elp_wakeup(wl);
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
204 wl1271_ps_elp_sleep(wl);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(unsigned long data)
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 struct delayed_work *dwork;
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
257 * if a scan is in progress, we might not have any Tx for a long
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
287 mutex_unlock(&wl->mutex);
290 static void wlcore_adjust_conf(struct wl1271 *wl)
292 /* Adjust settings according to optional module parameters */
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 3 active links, since we must
349 * account for the global and broadcast AP links. The "fw_ps" check
350 * assures us the third link is a STA connected to the AP. Otherwise
351 * the FW would not set the PSM bit.
353 else if (wl->active_link_count > 3 && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status)
365 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 wl->ap_fw_ps_map = cur_fw_ps_map;
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
380 static int wlcore_fw_status(struct wl1271 *wl,
381 struct wl_fw_status_1 *status_1,
382 struct wl_fw_status_2 *status_2)
384 struct wl12xx_vif *wlvif;
386 u32 old_tx_blk_count = wl->tx_blocks_available;
387 int avail, freed_blocks;
391 struct wl1271_link *lnk;
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1->fw_rx_counter,
405 status_1->drv_rx_counter,
406 status_1->tx_results_counter);
408 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff;
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
418 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
422 /* prevent wrap-around in freed-packets counter */
423 diff = (status_2->counters.tx_lnk_free_pkts[i] -
424 lnk->prev_freed_pkts) & 0xff;
429 lnk->allocated_pkts -= diff;
430 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
432 /* accumulate the prev_freed_pkts counter */
433 lnk->total_freed_pkts += diff;
436 /* prevent wrap-around in total blocks counter */
437 if (likely(wl->tx_blocks_freed <=
438 le32_to_cpu(status_2->total_released_blks)))
439 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
442 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
443 le32_to_cpu(status_2->total_released_blks);
445 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
447 wl->tx_allocated_blocks -= freed_blocks;
450 * If the FW freed some blocks:
451 * If we still have allocated blocks - re-arm the timer, Tx is
452 * not stuck. Otherwise, cancel the timer (no Tx currently).
455 if (wl->tx_allocated_blocks)
456 wl12xx_rearm_tx_watchdog_locked(wl);
458 cancel_delayed_work(&wl->tx_watchdog_work);
461 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
464 * The FW might change the total number of TX memblocks before
465 * we get a notification about blocks being released. Thus, the
466 * available blocks calculation might yield a temporary result
467 * which is lower than the actual available blocks. Keeping in
468 * mind that only blocks that were allocated can be moved from
469 * TX to RX, tx_blocks_available should never decrease here.
471 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 /* if more blocks are available now, tx work can be scheduled */
475 if (wl->tx_blocks_available > old_tx_blk_count)
476 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478 /* for AP update num of allocated TX blocks per link and ps status */
479 wl12xx_for_each_wlvif_ap(wl, wlvif) {
480 wl12xx_irq_update_links_status(wl, wlvif, status_2);
483 /* update the host-chipset time offset */
485 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
486 (s64)le32_to_cpu(status_2->fw_localtime);
488 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
493 static void wl1271_flush_deferred_work(struct wl1271 *wl)
497 /* Pass all received frames to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 ieee80211_rx_ni(wl->hw, skb);
501 /* Return sent skbs to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 ieee80211_tx_status_ni(wl->hw, skb);
506 static void wl1271_netstack_work(struct work_struct *work)
509 container_of(work, struct wl1271, netstack_work);
512 wl1271_flush_deferred_work(wl);
513 } while (skb_queue_len(&wl->deferred_rx_queue));
516 #define WL1271_IRQ_MAX_LOOPS 256
518 static int wlcore_irq_locked(struct wl1271 *wl)
522 int loopcount = WL1271_IRQ_MAX_LOOPS;
524 unsigned int defer_count;
528 * In case edge triggered interrupt must be used, we cannot iterate
529 * more than once without introducing race conditions with the hardirq.
531 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
534 wl1271_debug(DEBUG_IRQ, "IRQ work");
536 if (unlikely(wl->state != WLCORE_STATE_ON))
539 ret = wl1271_ps_elp_wakeup(wl);
543 while (!done && loopcount--) {
545 * In order to avoid a race with the hardirq, clear the flag
546 * before acknowledging the chip. Since the mutex is held,
547 * wl1271_ps_elp_wakeup cannot be called concurrently.
549 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
550 smp_mb__after_clear_bit();
552 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
556 wlcore_hw_tx_immediate_compl(wl);
558 intr = le32_to_cpu(wl->fw_status_1->intr);
559 intr &= WLCORE_ALL_INTR_MASK;
565 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
566 wl1271_error("HW watchdog interrupt received! starting recovery.");
567 wl->watchdog_recovery = true;
570 /* restarting the chip. ignore any other interrupt. */
574 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
575 wl1271_error("SW watchdog interrupt received! "
576 "starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (likely(intr & WL1271_ACX_INTR_DATA)) {
585 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
587 ret = wlcore_rx(wl, wl->fw_status_1);
591 /* Check if any tx blocks were freed */
592 spin_lock_irqsave(&wl->wl_lock, flags);
593 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
594 wl1271_tx_total_queue_count(wl) > 0) {
595 spin_unlock_irqrestore(&wl->wl_lock, flags);
597 * In order to avoid starvation of the TX path,
598 * call the work function directly.
600 ret = wlcore_tx_work_locked(wl);
604 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 /* check for tx results */
608 ret = wlcore_hw_tx_delayed_compl(wl);
612 /* Make sure the deferred queues don't get too long */
613 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 skb_queue_len(&wl->deferred_rx_queue);
615 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 wl1271_flush_deferred_work(wl);
619 if (intr & WL1271_ACX_INTR_EVENT_A) {
620 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 ret = wl1271_event_handle(wl, 0);
626 if (intr & WL1271_ACX_INTR_EVENT_B) {
627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 ret = wl1271_event_handle(wl, 1);
633 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 wl1271_debug(DEBUG_IRQ,
635 "WL1271_ACX_INTR_INIT_COMPLETE");
637 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
641 wl1271_ps_elp_sleep(wl);
647 static irqreturn_t wlcore_irq(int irq, void *cookie)
651 struct wl1271 *wl = cookie;
653 /* complete the ELP completion */
654 spin_lock_irqsave(&wl->wl_lock, flags);
655 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 complete(wl->elp_compl);
658 wl->elp_compl = NULL;
661 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
662 /* don't enqueue a work right now. mark it as pending */
663 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
664 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
665 disable_irq_nosync(wl->irq);
666 pm_wakeup_event(wl->dev, 0);
667 spin_unlock_irqrestore(&wl->wl_lock, flags);
670 spin_unlock_irqrestore(&wl->wl_lock, flags);
672 /* TX might be handled here, avoid redundant work */
673 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
674 cancel_work_sync(&wl->tx_work);
676 mutex_lock(&wl->mutex);
678 ret = wlcore_irq_locked(wl);
680 wl12xx_queue_recovery_work(wl);
682 spin_lock_irqsave(&wl->wl_lock, flags);
683 /* In case TX was not handled here, queue TX work */
684 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
685 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
686 wl1271_tx_total_queue_count(wl) > 0)
687 ieee80211_queue_work(wl->hw, &wl->tx_work);
688 spin_unlock_irqrestore(&wl->wl_lock, flags);
690 mutex_unlock(&wl->mutex);
695 struct vif_counter_data {
698 struct ieee80211_vif *cur_vif;
699 bool cur_vif_running;
702 static void wl12xx_vif_count_iter(void *data, u8 *mac,
703 struct ieee80211_vif *vif)
705 struct vif_counter_data *counter = data;
708 if (counter->cur_vif == vif)
709 counter->cur_vif_running = true;
712 /* caller must not hold wl->mutex, as it might deadlock */
713 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
714 struct ieee80211_vif *cur_vif,
715 struct vif_counter_data *data)
717 memset(data, 0, sizeof(*data));
718 data->cur_vif = cur_vif;
720 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
721 wl12xx_vif_count_iter, data);
724 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
726 const struct firmware *fw;
728 enum wl12xx_fw_type fw_type;
732 fw_type = WL12XX_FW_TYPE_PLT;
733 fw_name = wl->plt_fw_name;
736 * we can't call wl12xx_get_vif_count() here because
737 * wl->mutex is taken, so use the cached last_vif_count value
739 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
740 fw_type = WL12XX_FW_TYPE_MULTI;
741 fw_name = wl->mr_fw_name;
743 fw_type = WL12XX_FW_TYPE_NORMAL;
744 fw_name = wl->sr_fw_name;
748 if (wl->fw_type == fw_type)
751 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
753 ret = request_firmware(&fw, fw_name, wl->dev);
756 wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 wl1271_error("firmware size is not multiple of 32 bits: %zu",
768 wl->fw_type = WL12XX_FW_TYPE_NONE;
769 wl->fw_len = fw->size;
770 wl->fw = vmalloc(wl->fw_len);
773 wl1271_error("could not allocate memory for the firmware");
778 memcpy(wl->fw, fw->data, wl->fw_len);
780 wl->fw_type = fw_type;
782 release_firmware(fw);
787 void wl12xx_queue_recovery_work(struct wl1271 *wl)
789 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
791 /* Avoid a recursive recovery */
792 if (wl->state == WLCORE_STATE_ON) {
793 wl->state = WLCORE_STATE_RESTARTING;
794 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
795 wl1271_ps_elp_wakeup(wl);
796 wlcore_disable_interrupts_nosync(wl);
797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
801 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 /* Make sure we have enough room */
806 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
808 /* Fill the FW log file, consumed by the sysfs fwlog entry */
809 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
810 wl->fwlog_size += len;
815 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
817 struct wlcore_partition_set part, old_part;
824 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
825 (wl->conf.fwlog.mem_blocks == 0))
828 wl1271_info("Reading FW panic log");
830 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
835 * Make sure the chip is awake and the logger isn't active.
836 * Do not send a stop fwlog command if the fw is hanged or if
837 * dbgpins are used (due to some fw bug).
839 if (wl1271_ps_elp_wakeup(wl))
841 if (!wl->watchdog_recovery &&
842 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
843 wl12xx_cmd_stop_fwlog(wl);
845 /* Read the first memory block address */
846 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
850 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
854 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
855 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
856 end_of_log = wl->fwlog_end;
858 offset = sizeof(addr);
862 old_part = wl->curr_part;
863 memset(&part, 0, sizeof(part));
865 /* Traverse the memory blocks linked list */
867 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
868 part.mem.size = PAGE_SIZE;
870 ret = wlcore_set_partition(wl, &part);
872 wl1271_error("%s: set_partition start=0x%X size=%d",
873 __func__, part.mem.start, part.mem.size);
877 memset(block, 0, wl->fw_mem_block_size);
878 ret = wlcore_read_hwaddr(wl, addr, block,
879 wl->fw_mem_block_size, false);
885 * Memory blocks are linked to one another. The first 4 bytes
886 * of each memory block hold the hardware address of the next
887 * one. The last memory block points to the first one in
888 * on demand mode and is equal to 0x2000000 in continuous mode.
890 addr = le32_to_cpup((__le32 *)block);
892 if (!wl12xx_copy_fwlog(wl, block + offset,
893 wl->fw_mem_block_size - offset))
895 } while (addr && (addr != end_of_log));
897 wake_up_interruptible(&wl->fwlog_waitq);
901 wlcore_set_partition(wl, &old_part);
904 static void wlcore_print_recovery(struct wl1271 *wl)
910 wl1271_info("Hardware recovery in progress. FW ver: %s",
911 wl->chip.fw_ver_str);
913 /* change partitions momentarily so we can read the FW pc */
914 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
918 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
922 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
926 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
927 pc, hint_sts, ++wl->recovery_count);
929 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
933 static void wl1271_recovery_work(struct work_struct *work)
936 container_of(work, struct wl1271, recovery_work);
937 struct wl12xx_vif *wlvif;
938 struct ieee80211_vif *vif;
940 mutex_lock(&wl->mutex);
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
945 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
946 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
947 wl12xx_read_fwlog_panic(wl);
948 wlcore_print_recovery(wl);
951 BUG_ON(wl->conf.recovery.bug_on_recovery &&
952 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
954 if (wl->conf.recovery.no_recovery) {
955 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
959 /* Prevent spurious TX during FW restart */
960 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
962 /* reboot the chipset */
963 while (!list_empty(&wl->wlvif_list)) {
964 wlvif = list_first_entry(&wl->wlvif_list,
965 struct wl12xx_vif, list);
966 vif = wl12xx_wlvif_to_vif(wlvif);
967 __wl1271_op_remove_interface(wl, vif, false);
970 wlcore_op_stop_locked(wl);
972 ieee80211_restart_hw(wl->hw);
975 * Its safe to enable TX now - the queues are stopped after a request
978 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
981 wl->watchdog_recovery = false;
982 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
983 mutex_unlock(&wl->mutex);
986 static int wlcore_fw_wakeup(struct wl1271 *wl)
988 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
991 static int wl1271_setup(struct wl1271 *wl)
993 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
994 sizeof(*wl->fw_status_2) +
995 wl->fw_status_priv_len, GFP_KERNEL);
996 if (!wl->fw_status_1)
999 wl->fw_status_2 = (struct wl_fw_status_2 *)
1000 (((u8 *) wl->fw_status_1) +
1001 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
1003 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1004 if (!wl->tx_res_if) {
1005 kfree(wl->fw_status_1);
1012 static int wl12xx_set_power_on(struct wl1271 *wl)
1016 msleep(WL1271_PRE_POWER_ON_SLEEP);
1017 ret = wl1271_power_on(wl);
1020 msleep(WL1271_POWER_ON_SLEEP);
1021 wl1271_io_reset(wl);
1024 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1028 /* ELP module wake up */
1029 ret = wlcore_fw_wakeup(wl);
1037 wl1271_power_off(wl);
1041 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1045 ret = wl12xx_set_power_on(wl);
1050 * For wl127x based devices we could use the default block
1051 * size (512 bytes), but due to a bug in the sdio driver, we
1052 * need to set it explicitly after the chip is powered on. To
1053 * simplify the code and since the performance impact is
1054 * negligible, we use the same block size for all different
1057 * Check if the bus supports blocksize alignment and, if it
1058 * doesn't, make sure we don't have the quirk.
1060 if (!wl1271_set_block_size(wl))
1061 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063 /* TODO: make sure the lower driver has set things up correctly */
1065 ret = wl1271_setup(wl);
1069 ret = wl12xx_fetch_firmware(wl, plt);
1077 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1079 int retries = WL1271_BOOT_RETRIES;
1080 struct wiphy *wiphy = wl->hw->wiphy;
1082 static const char* const PLT_MODE[] = {
1091 mutex_lock(&wl->mutex);
1093 wl1271_notice("power up");
1095 if (wl->state != WLCORE_STATE_OFF) {
1096 wl1271_error("cannot go into PLT state because not "
1097 "in off state: %d", wl->state);
1102 /* Indicate to lower levels that we are now in PLT mode */
1104 wl->plt_mode = plt_mode;
1108 ret = wl12xx_chip_wakeup(wl, true);
1112 if (plt_mode != PLT_CHIP_AWAKE) {
1113 ret = wl->ops->plt_init(wl);
1118 wl->state = WLCORE_STATE_ON;
1119 wl1271_notice("firmware booted in PLT mode %s (%s)",
1121 wl->chip.fw_ver_str);
1123 /* update hw/fw version info in wiphy struct */
1124 wiphy->hw_version = wl->chip.id;
1125 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1126 sizeof(wiphy->fw_version));
1131 wl1271_power_off(wl);
1135 wl->plt_mode = PLT_OFF;
1137 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1138 WL1271_BOOT_RETRIES);
1140 mutex_unlock(&wl->mutex);
1145 int wl1271_plt_stop(struct wl1271 *wl)
1149 wl1271_notice("power down");
1152 * Interrupts must be disabled before setting the state to OFF.
1153 * Otherwise, the interrupt handler might be called and exit without
1154 * reading the interrupt status.
1156 wlcore_disable_interrupts(wl);
1157 mutex_lock(&wl->mutex);
1159 mutex_unlock(&wl->mutex);
1162 * This will not necessarily enable interrupts as interrupts
1163 * may have been disabled when op_stop was called. It will,
1164 * however, balance the above call to disable_interrupts().
1166 wlcore_enable_interrupts(wl);
1168 wl1271_error("cannot power down because not in PLT "
1169 "state: %d", wl->state);
1174 mutex_unlock(&wl->mutex);
1176 wl1271_flush_deferred_work(wl);
1177 cancel_work_sync(&wl->netstack_work);
1178 cancel_work_sync(&wl->recovery_work);
1179 cancel_delayed_work_sync(&wl->elp_work);
1180 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1182 mutex_lock(&wl->mutex);
1183 wl1271_power_off(wl);
1185 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1186 wl->state = WLCORE_STATE_OFF;
1188 wl->plt_mode = PLT_OFF;
1190 mutex_unlock(&wl->mutex);
1196 static void wl1271_op_tx(struct ieee80211_hw *hw,
1197 struct ieee80211_tx_control *control,
1198 struct sk_buff *skb)
1200 struct wl1271 *wl = hw->priv;
1201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1202 struct ieee80211_vif *vif = info->control.vif;
1203 struct wl12xx_vif *wlvif = NULL;
1204 unsigned long flags;
1209 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1210 ieee80211_free_txskb(hw, skb);
1214 wlvif = wl12xx_vif_to_data(vif);
1215 mapping = skb_get_queue_mapping(skb);
1216 q = wl1271_tx_get_queue(mapping);
1218 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1220 spin_lock_irqsave(&wl->wl_lock, flags);
1223 * drop the packet if the link is invalid or the queue is stopped
1224 * for any reason but watermark. Watermark is a "soft"-stop so we
1225 * allow these packets through.
1227 if (hlid == WL12XX_INVALID_LINK_ID ||
1228 (!test_bit(hlid, wlvif->links_map)) ||
1229 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1230 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1231 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1232 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1233 ieee80211_free_txskb(hw, skb);
1237 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1239 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1241 wl->tx_queue_count[q]++;
1242 wlvif->tx_queue_count[q]++;
1245 * The workqueue is slow to process the tx_queue and we need stop
1246 * the queue here, otherwise the queue will get too long.
1248 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1249 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1251 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1252 wlcore_stop_queue_locked(wl, wlvif, q,
1253 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1257 * The chip specific setup must run before the first TX packet -
1258 * before that, the tx_work will not be initialized!
1261 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1262 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1263 ieee80211_queue_work(wl->hw, &wl->tx_work);
1266 spin_unlock_irqrestore(&wl->wl_lock, flags);
1269 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1271 unsigned long flags;
1274 /* no need to queue a new dummy packet if one is already pending */
1275 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1278 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1280 spin_lock_irqsave(&wl->wl_lock, flags);
1281 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1282 wl->tx_queue_count[q]++;
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1285 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1286 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1287 return wlcore_tx_work_locked(wl);
1290 * If the FW TX is busy, TX work will be scheduled by the threaded
1291 * interrupt handler function
1297 * The size of the dummy packet should be at least 1400 bytes. However, in
1298 * order to minimize the number of bus transactions, aligning it to 512 bytes
1299 * boundaries could be beneficial, performance wise
1301 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1303 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1305 struct sk_buff *skb;
1306 struct ieee80211_hdr_3addr *hdr;
1307 unsigned int dummy_packet_size;
1309 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1310 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1312 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1314 wl1271_warning("Failed to allocate a dummy packet skb");
1318 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1320 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1321 memset(hdr, 0, sizeof(*hdr));
1322 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1323 IEEE80211_STYPE_NULLFUNC |
1324 IEEE80211_FCTL_TODS);
1326 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1328 /* Dummy packets require the TID to be management */
1329 skb->priority = WL1271_TID_MGMT;
1331 /* Initialize all fields that might be used */
1332 skb_set_queue_mapping(skb, 0);
1333 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1341 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343 int num_fields = 0, in_field = 0, fields_size = 0;
1344 int i, pattern_len = 0;
1347 wl1271_warning("No mask in WoWLAN pattern");
1352 * The pattern is broken up into segments of bytes at different offsets
1353 * that need to be checked by the FW filter. Each segment is called
1354 * a field in the FW API. We verify that the total number of fields
1355 * required for this pattern won't exceed FW limits (8)
1356 * as well as the total fields buffer won't exceed the FW limit.
1357 * Note that if there's a pattern which crosses Ethernet/IP header
1358 * boundary a new field is required.
1360 for (i = 0; i < p->pattern_len; i++) {
1361 if (test_bit(i, (unsigned long *)p->mask)) {
1366 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368 fields_size += pattern_len +
1369 RX_FILTER_FIELD_OVERHEAD;
1377 fields_size += pattern_len +
1378 RX_FILTER_FIELD_OVERHEAD;
1385 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1389 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1390 wl1271_warning("RX Filter too complex. Too many segments");
1394 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1395 wl1271_warning("RX filter pattern is too big");
1402 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1407 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1414 for (i = 0; i < filter->num_fields; i++)
1415 kfree(filter->fields[i].pattern);
1420 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1421 u16 offset, u8 flags,
1422 u8 *pattern, u8 len)
1424 struct wl12xx_rx_filter_field *field;
1426 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1427 wl1271_warning("Max fields per RX filter. can't alloc another");
1431 field = &filter->fields[filter->num_fields];
1433 field->pattern = kzalloc(len, GFP_KERNEL);
1434 if (!field->pattern) {
1435 wl1271_warning("Failed to allocate RX filter pattern");
1439 filter->num_fields++;
1441 field->offset = cpu_to_le16(offset);
1442 field->flags = flags;
1444 memcpy(field->pattern, pattern, len);
1449 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1451 int i, fields_size = 0;
1453 for (i = 0; i < filter->num_fields; i++)
1454 fields_size += filter->fields[i].len +
1455 sizeof(struct wl12xx_rx_filter_field) -
1461 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1465 struct wl12xx_rx_filter_field *field;
1467 for (i = 0; i < filter->num_fields; i++) {
1468 field = (struct wl12xx_rx_filter_field *)buf;
1470 field->offset = filter->fields[i].offset;
1471 field->flags = filter->fields[i].flags;
1472 field->len = filter->fields[i].len;
1474 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1475 buf += sizeof(struct wl12xx_rx_filter_field) -
1476 sizeof(u8 *) + field->len;
1481 * Allocates an RX filter returned through f
1482 * which needs to be freed using rx_filter_free()
1485 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1486 struct wl12xx_rx_filter **f)
1489 struct wl12xx_rx_filter *filter;
1493 filter = wl1271_rx_filter_alloc();
1495 wl1271_warning("Failed to alloc rx filter");
1501 while (i < p->pattern_len) {
1502 if (!test_bit(i, (unsigned long *)p->mask)) {
1507 for (j = i; j < p->pattern_len; j++) {
1508 if (!test_bit(j, (unsigned long *)p->mask))
1511 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1512 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1516 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1518 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1520 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1521 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1526 ret = wl1271_rx_filter_alloc_field(filter,
1529 &p->pattern[i], len);
1536 filter->action = FILTER_SIGNAL;
1542 wl1271_rx_filter_free(filter);
1548 static int wl1271_configure_wowlan(struct wl1271 *wl,
1549 struct cfg80211_wowlan *wow)
1553 if (!wow || wow->any || !wow->n_patterns) {
1554 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1559 ret = wl1271_rx_filter_clear_all(wl);
1566 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1569 /* Validate all incoming patterns before clearing current FW state */
1570 for (i = 0; i < wow->n_patterns; i++) {
1571 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1573 wl1271_warning("Bad wowlan pattern %d", i);
1578 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1582 ret = wl1271_rx_filter_clear_all(wl);
1586 /* Translate WoWLAN patterns into filters */
1587 for (i = 0; i < wow->n_patterns; i++) {
1588 struct cfg80211_pkt_pattern *p;
1589 struct wl12xx_rx_filter *filter = NULL;
1591 p = &wow->patterns[i];
1593 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1595 wl1271_warning("Failed to create an RX filter from "
1596 "wowlan pattern %d", i);
1600 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1602 wl1271_rx_filter_free(filter);
1607 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1613 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 struct wl12xx_vif *wlvif,
1615 struct cfg80211_wowlan *wow)
1619 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1622 ret = wl1271_ps_elp_wakeup(wl);
1626 ret = wl1271_configure_wowlan(wl, wow);
1630 if ((wl->conf.conn.suspend_wake_up_event ==
1631 wl->conf.conn.wake_up_event) &&
1632 (wl->conf.conn.suspend_listen_interval ==
1633 wl->conf.conn.listen_interval))
1636 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 wl->conf.conn.suspend_wake_up_event,
1638 wl->conf.conn.suspend_listen_interval);
1641 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1644 wl1271_ps_elp_sleep(wl);
1650 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1651 struct wl12xx_vif *wlvif)
1655 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1658 ret = wl1271_ps_elp_wakeup(wl);
1662 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1664 wl1271_ps_elp_sleep(wl);
1670 static int wl1271_configure_suspend(struct wl1271 *wl,
1671 struct wl12xx_vif *wlvif,
1672 struct cfg80211_wowlan *wow)
1674 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1675 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1676 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1677 return wl1271_configure_suspend_ap(wl, wlvif);
1681 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1684 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1685 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1687 if ((!is_ap) && (!is_sta))
1690 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1693 ret = wl1271_ps_elp_wakeup(wl);
1698 wl1271_configure_wowlan(wl, NULL);
1700 if ((wl->conf.conn.suspend_wake_up_event ==
1701 wl->conf.conn.wake_up_event) &&
1702 (wl->conf.conn.suspend_listen_interval ==
1703 wl->conf.conn.listen_interval))
1706 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1707 wl->conf.conn.wake_up_event,
1708 wl->conf.conn.listen_interval);
1711 wl1271_error("resume: wake up conditions failed: %d",
1715 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1719 wl1271_ps_elp_sleep(wl);
1722 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1723 struct cfg80211_wowlan *wow)
1725 struct wl1271 *wl = hw->priv;
1726 struct wl12xx_vif *wlvif;
1729 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 /* we want to perform the recovery before suspending */
1733 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1734 wl1271_warning("postponing suspend to perform recovery");
1738 wl1271_tx_flush(wl);
1740 mutex_lock(&wl->mutex);
1741 wl->wow_enabled = true;
1742 wl12xx_for_each_wlvif(wl, wlvif) {
1743 ret = wl1271_configure_suspend(wl, wlvif, wow);
1745 mutex_unlock(&wl->mutex);
1746 wl1271_warning("couldn't prepare device to suspend");
1750 mutex_unlock(&wl->mutex);
1751 /* flush any remaining work */
1752 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1755 * disable and re-enable interrupts in order to flush
1758 wlcore_disable_interrupts(wl);
1761 * set suspended flag to avoid triggering a new threaded_irq
1762 * work. no need for spinlock as interrupts are disabled.
1764 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766 wlcore_enable_interrupts(wl);
1767 flush_work(&wl->tx_work);
1768 flush_delayed_work(&wl->elp_work);
1773 static int wl1271_op_resume(struct ieee80211_hw *hw)
1775 struct wl1271 *wl = hw->priv;
1776 struct wl12xx_vif *wlvif;
1777 unsigned long flags;
1778 bool run_irq_work = false, pending_recovery;
1781 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1783 WARN_ON(!wl->wow_enabled);
1786 * re-enable irq_work enqueuing, and call irq_work directly if
1787 * there is a pending work.
1789 spin_lock_irqsave(&wl->wl_lock, flags);
1790 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1791 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1792 run_irq_work = true;
1793 spin_unlock_irqrestore(&wl->wl_lock, flags);
1795 mutex_lock(&wl->mutex);
1797 /* test the recovery flag before calling any SDIO functions */
1798 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1802 wl1271_debug(DEBUG_MAC80211,
1803 "run postponed irq_work directly");
1805 /* don't talk to the HW if recovery is pending */
1806 if (!pending_recovery) {
1807 ret = wlcore_irq_locked(wl);
1809 wl12xx_queue_recovery_work(wl);
1812 wlcore_enable_interrupts(wl);
1815 if (pending_recovery) {
1816 wl1271_warning("queuing forgotten recovery on resume");
1817 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1821 wl12xx_for_each_wlvif(wl, wlvif) {
1822 wl1271_configure_resume(wl, wlvif);
1826 wl->wow_enabled = false;
1827 mutex_unlock(&wl->mutex);
1833 static int wl1271_op_start(struct ieee80211_hw *hw)
1835 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1838 * We have to delay the booting of the hardware because
1839 * we need to know the local MAC address before downloading and
1840 * initializing the firmware. The MAC address cannot be changed
1841 * after boot, and without the proper MAC address, the firmware
1842 * will not function properly.
1844 * The MAC address is first known when the corresponding interface
1845 * is added. That is where we will initialize the hardware.
1851 static void wlcore_op_stop_locked(struct wl1271 *wl)
1855 if (wl->state == WLCORE_STATE_OFF) {
1856 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1858 wlcore_enable_interrupts(wl);
1864 * this must be before the cancel_work calls below, so that the work
1865 * functions don't perform further work.
1867 wl->state = WLCORE_STATE_OFF;
1870 * Use the nosync variant to disable interrupts, so the mutex could be
1871 * held while doing so without deadlocking.
1873 wlcore_disable_interrupts_nosync(wl);
1875 mutex_unlock(&wl->mutex);
1877 wlcore_synchronize_interrupts(wl);
1878 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1879 cancel_work_sync(&wl->recovery_work);
1880 wl1271_flush_deferred_work(wl);
1881 cancel_delayed_work_sync(&wl->scan_complete_work);
1882 cancel_work_sync(&wl->netstack_work);
1883 cancel_work_sync(&wl->tx_work);
1884 cancel_delayed_work_sync(&wl->elp_work);
1885 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1887 /* let's notify MAC80211 about the remaining pending TX frames */
1888 mutex_lock(&wl->mutex);
1889 wl12xx_tx_reset(wl);
1891 wl1271_power_off(wl);
1893 * In case a recovery was scheduled, interrupts were disabled to avoid
1894 * an interrupt storm. Now that the power is down, it is safe to
1895 * re-enable interrupts to balance the disable depth
1897 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1898 wlcore_enable_interrupts(wl);
1900 wl->band = IEEE80211_BAND_2GHZ;
1903 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1904 wl->channel_type = NL80211_CHAN_NO_HT;
1905 wl->tx_blocks_available = 0;
1906 wl->tx_allocated_blocks = 0;
1907 wl->tx_results_count = 0;
1908 wl->tx_packets_count = 0;
1909 wl->time_offset = 0;
1910 wl->ap_fw_ps_map = 0;
1912 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1913 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1914 memset(wl->links_map, 0, sizeof(wl->links_map));
1915 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1916 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1917 wl->active_sta_count = 0;
1918 wl->active_link_count = 0;
1920 /* The system link is always allocated */
1921 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1922 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1923 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1926 * this is performed after the cancel_work calls and the associated
1927 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1928 * get executed before all these vars have been reset.
1932 wl->tx_blocks_freed = 0;
1934 for (i = 0; i < NUM_TX_QUEUES; i++) {
1935 wl->tx_pkts_freed[i] = 0;
1936 wl->tx_allocated_pkts[i] = 0;
1939 wl1271_debugfs_reset(wl);
1941 kfree(wl->fw_status_1);
1942 wl->fw_status_1 = NULL;
1943 wl->fw_status_2 = NULL;
1944 kfree(wl->tx_res_if);
1945 wl->tx_res_if = NULL;
1946 kfree(wl->target_mem_map);
1947 wl->target_mem_map = NULL;
1950 * FW channels must be re-calibrated after recovery,
1951 * save current Reg-Domain channel configuration and clear it.
1953 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1954 sizeof(wl->reg_ch_conf_pending));
1955 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1958 static void wlcore_op_stop(struct ieee80211_hw *hw)
1960 struct wl1271 *wl = hw->priv;
1962 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1964 mutex_lock(&wl->mutex);
1966 wlcore_op_stop_locked(wl);
1968 mutex_unlock(&wl->mutex);
1971 static void wlcore_channel_switch_work(struct work_struct *work)
1973 struct delayed_work *dwork;
1975 struct ieee80211_vif *vif;
1976 struct wl12xx_vif *wlvif;
1979 dwork = container_of(work, struct delayed_work, work);
1980 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1983 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1985 mutex_lock(&wl->mutex);
1987 if (unlikely(wl->state != WLCORE_STATE_ON))
1990 /* check the channel switch is still ongoing */
1991 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1994 vif = wl12xx_wlvif_to_vif(wlvif);
1995 ieee80211_chswitch_done(vif, false);
1997 ret = wl1271_ps_elp_wakeup(wl);
2001 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2003 wl1271_ps_elp_sleep(wl);
2005 mutex_unlock(&wl->mutex);
2008 static void wlcore_connection_loss_work(struct work_struct *work)
2010 struct delayed_work *dwork;
2012 struct ieee80211_vif *vif;
2013 struct wl12xx_vif *wlvif;
2015 dwork = container_of(work, struct delayed_work, work);
2016 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2019 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2021 mutex_lock(&wl->mutex);
2023 if (unlikely(wl->state != WLCORE_STATE_ON))
2026 /* Call mac80211 connection loss */
2027 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2030 vif = wl12xx_wlvif_to_vif(wlvif);
2031 ieee80211_connection_loss(vif);
2033 mutex_unlock(&wl->mutex);
2036 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2038 struct delayed_work *dwork;
2040 struct wl12xx_vif *wlvif;
2041 unsigned long time_spare;
2044 dwork = container_of(work, struct delayed_work, work);
2045 wlvif = container_of(dwork, struct wl12xx_vif,
2046 pending_auth_complete_work);
2049 mutex_lock(&wl->mutex);
2051 if (unlikely(wl->state != WLCORE_STATE_ON))
2055 * Make sure a second really passed since the last auth reply. Maybe
2056 * a second auth reply arrived while we were stuck on the mutex.
2057 * Check for a little less than the timeout to protect from scheduler
2060 time_spare = jiffies +
2061 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2062 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2065 ret = wl1271_ps_elp_wakeup(wl);
2069 /* cancel the ROC if active */
2070 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2072 wl1271_ps_elp_sleep(wl);
2074 mutex_unlock(&wl->mutex);
2077 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2079 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2080 WL12XX_MAX_RATE_POLICIES);
2081 if (policy >= WL12XX_MAX_RATE_POLICIES)
2084 __set_bit(policy, wl->rate_policies_map);
2089 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2091 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2094 __clear_bit(*idx, wl->rate_policies_map);
2095 *idx = WL12XX_MAX_RATE_POLICIES;
2098 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2100 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2101 WLCORE_MAX_KLV_TEMPLATES);
2102 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2105 __set_bit(policy, wl->klv_templates_map);
2110 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2112 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2115 __clear_bit(*idx, wl->klv_templates_map);
2116 *idx = WLCORE_MAX_KLV_TEMPLATES;
2119 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2121 switch (wlvif->bss_type) {
2122 case BSS_TYPE_AP_BSS:
2124 return WL1271_ROLE_P2P_GO;
2126 return WL1271_ROLE_AP;
2128 case BSS_TYPE_STA_BSS:
2130 return WL1271_ROLE_P2P_CL;
2132 return WL1271_ROLE_STA;
2135 return WL1271_ROLE_IBSS;
2138 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2140 return WL12XX_INVALID_ROLE_TYPE;
2143 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2145 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2148 /* clear everything but the persistent data */
2149 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2151 switch (ieee80211_vif_type_p2p(vif)) {
2152 case NL80211_IFTYPE_P2P_CLIENT:
2155 case NL80211_IFTYPE_STATION:
2156 wlvif->bss_type = BSS_TYPE_STA_BSS;
2158 case NL80211_IFTYPE_ADHOC:
2159 wlvif->bss_type = BSS_TYPE_IBSS;
2161 case NL80211_IFTYPE_P2P_GO:
2164 case NL80211_IFTYPE_AP:
2165 wlvif->bss_type = BSS_TYPE_AP_BSS;
2168 wlvif->bss_type = MAX_BSS_TYPE;
2172 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2173 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2174 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2176 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2177 wlvif->bss_type == BSS_TYPE_IBSS) {
2178 /* init sta/ibss data */
2179 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2180 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2181 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2182 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2183 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2184 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2185 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2186 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2189 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2190 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2191 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2192 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2193 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2194 wl12xx_allocate_rate_policy(wl,
2195 &wlvif->ap.ucast_rate_idx[i]);
2196 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2198 * TODO: check if basic_rate shouldn't be
2199 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2200 * instead (the same thing for STA above).
2202 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2203 /* TODO: this seems to be used only for STA, check it */
2204 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2207 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2208 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2209 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2212 * mac80211 configures some values globally, while we treat them
2213 * per-interface. thus, on init, we have to copy them from wl
2215 wlvif->band = wl->band;
2216 wlvif->channel = wl->channel;
2217 wlvif->power_level = wl->power_level;
2218 wlvif->channel_type = wl->channel_type;
2220 INIT_WORK(&wlvif->rx_streaming_enable_work,
2221 wl1271_rx_streaming_enable_work);
2222 INIT_WORK(&wlvif->rx_streaming_disable_work,
2223 wl1271_rx_streaming_disable_work);
2224 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2225 wlcore_channel_switch_work);
2226 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2227 wlcore_connection_loss_work);
2228 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2229 wlcore_pending_auth_complete_work);
2230 INIT_LIST_HEAD(&wlvif->list);
2232 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2233 (unsigned long) wlvif);
2237 static int wl12xx_init_fw(struct wl1271 *wl)
2239 int retries = WL1271_BOOT_RETRIES;
2240 bool booted = false;
2241 struct wiphy *wiphy = wl->hw->wiphy;
2246 ret = wl12xx_chip_wakeup(wl, false);
2250 ret = wl->ops->boot(wl);
2254 ret = wl1271_hw_init(wl);
2262 mutex_unlock(&wl->mutex);
2263 /* Unlocking the mutex in the middle of handling is
2264 inherently unsafe. In this case we deem it safe to do,
2265 because we need to let any possibly pending IRQ out of
2266 the system (and while we are WLCORE_STATE_OFF the IRQ
2267 work function will not do anything.) Also, any other
2268 possible concurrent operations will fail due to the
2269 current state, hence the wl1271 struct should be safe. */
2270 wlcore_disable_interrupts(wl);
2271 wl1271_flush_deferred_work(wl);
2272 cancel_work_sync(&wl->netstack_work);
2273 mutex_lock(&wl->mutex);
2275 wl1271_power_off(wl);
2279 wl1271_error("firmware boot failed despite %d retries",
2280 WL1271_BOOT_RETRIES);
2284 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2286 /* update hw/fw version info in wiphy struct */
2287 wiphy->hw_version = wl->chip.id;
2288 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2289 sizeof(wiphy->fw_version));
2292 * Now we know if 11a is supported (info from the NVS), so disable
2293 * 11a channels if not supported
2295 if (!wl->enable_11a)
2296 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2298 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2299 wl->enable_11a ? "" : "not ");
2301 wl->state = WLCORE_STATE_ON;
2306 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2308 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2312 * Check whether a fw switch (i.e. moving from one loaded
2313 * fw to another) is needed. This function is also responsible
2314 * for updating wl->last_vif_count, so it must be called before
2315 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2318 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2319 struct vif_counter_data vif_counter_data,
2322 enum wl12xx_fw_type current_fw = wl->fw_type;
2323 u8 vif_count = vif_counter_data.counter;
2325 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2328 /* increase the vif count if this is a new vif */
2329 if (add && !vif_counter_data.cur_vif_running)
2332 wl->last_vif_count = vif_count;
2334 /* no need for fw change if the device is OFF */
2335 if (wl->state == WLCORE_STATE_OFF)
2338 /* no need for fw change if a single fw is used */
2339 if (!wl->mr_fw_name)
2342 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2344 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2351 * Enter "forced psm". Make sure the sta is in psm against the ap,
2352 * to make the fw switch a bit more disconnection-persistent.
2354 static void wl12xx_force_active_psm(struct wl1271 *wl)
2356 struct wl12xx_vif *wlvif;
2358 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2359 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2363 struct wlcore_hw_queue_iter_data {
2364 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2366 struct ieee80211_vif *vif;
2367 /* is the current vif among those iterated */
2371 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2372 struct ieee80211_vif *vif)
2374 struct wlcore_hw_queue_iter_data *iter_data = data;
2376 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2379 if (iter_data->cur_running || vif == iter_data->vif) {
2380 iter_data->cur_running = true;
2384 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2387 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2388 struct wl12xx_vif *wlvif)
2390 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2391 struct wlcore_hw_queue_iter_data iter_data = {};
2394 iter_data.vif = vif;
2396 /* mark all bits taken by active interfaces */
2397 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2398 IEEE80211_IFACE_ITER_RESUME_ALL,
2399 wlcore_hw_queue_iter, &iter_data);
2401 /* the current vif is already running in mac80211 (resume/recovery) */
2402 if (iter_data.cur_running) {
2403 wlvif->hw_queue_base = vif->hw_queue[0];
2404 wl1271_debug(DEBUG_MAC80211,
2405 "using pre-allocated hw queue base %d",
2406 wlvif->hw_queue_base);
2408 /* interface type might have changed type */
2409 goto adjust_cab_queue;
2412 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2413 WLCORE_NUM_MAC_ADDRESSES);
2414 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2417 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2418 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2419 wlvif->hw_queue_base);
2421 for (i = 0; i < NUM_TX_QUEUES; i++) {
2422 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2423 /* register hw queues in mac80211 */
2424 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2428 /* the last places are reserved for cab queues per interface */
2429 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2430 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2431 wlvif->hw_queue_base / NUM_TX_QUEUES;
2433 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2438 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2439 struct ieee80211_vif *vif)
2441 struct wl1271 *wl = hw->priv;
2442 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2443 struct vif_counter_data vif_count;
2448 wl1271_error("Adding Interface not allowed while in PLT mode");
2452 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2453 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2455 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2456 ieee80211_vif_type_p2p(vif), vif->addr);
2458 wl12xx_get_vif_count(hw, vif, &vif_count);
2460 mutex_lock(&wl->mutex);
2461 ret = wl1271_ps_elp_wakeup(wl);
2466 * in some very corner case HW recovery scenarios its possible to
2467 * get here before __wl1271_op_remove_interface is complete, so
2468 * opt out if that is the case.
2470 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2471 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2477 ret = wl12xx_init_vif_data(wl, vif);
2482 role_type = wl12xx_get_role_type(wl, wlvif);
2483 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2488 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2492 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2493 wl12xx_force_active_psm(wl);
2494 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2495 mutex_unlock(&wl->mutex);
2496 wl1271_recovery_work(&wl->recovery_work);
2501 * TODO: after the nvs issue will be solved, move this block
2502 * to start(), and make sure here the driver is ON.
2504 if (wl->state == WLCORE_STATE_OFF) {
2506 * we still need this in order to configure the fw
2507 * while uploading the nvs
2509 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2511 ret = wl12xx_init_fw(wl);
2516 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2517 role_type, &wlvif->role_id);
2521 ret = wl1271_init_vif_specific(wl, vif);
2525 list_add(&wlvif->list, &wl->wlvif_list);
2526 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2528 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2533 wl1271_ps_elp_sleep(wl);
2535 mutex_unlock(&wl->mutex);
2540 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2541 struct ieee80211_vif *vif,
2542 bool reset_tx_queues)
2544 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2546 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2548 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2550 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2553 /* because of hardware recovery, we may get here twice */
2554 if (wl->state == WLCORE_STATE_OFF)
2557 wl1271_info("down");
2559 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2560 wl->scan_wlvif == wlvif) {
2562 * Rearm the tx watchdog just before idling scan. This
2563 * prevents just-finished scans from triggering the watchdog
2565 wl12xx_rearm_tx_watchdog_locked(wl);
2567 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2568 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2569 wl->scan_wlvif = NULL;
2570 wl->scan.req = NULL;
2571 ieee80211_scan_completed(wl->hw, true);
2574 if (wl->sched_vif == wlvif) {
2575 ieee80211_sched_scan_stopped(wl->hw);
2576 wl->sched_vif = NULL;
2579 if (wl->roc_vif == vif) {
2581 ieee80211_remain_on_channel_expired(wl->hw);
2584 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2585 /* disable active roles */
2586 ret = wl1271_ps_elp_wakeup(wl);
2590 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2591 wlvif->bss_type == BSS_TYPE_IBSS) {
2592 if (wl12xx_dev_role_started(wlvif))
2593 wl12xx_stop_dev(wl, wlvif);
2596 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2600 wl1271_ps_elp_sleep(wl);
2603 wl12xx_tx_reset_wlvif(wl, wlvif);
2605 /* clear all hlids (except system_hlid) */
2606 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2608 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2609 wlvif->bss_type == BSS_TYPE_IBSS) {
2610 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2611 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2612 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2613 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2614 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2616 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2617 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2618 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2619 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2620 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2621 wl12xx_free_rate_policy(wl,
2622 &wlvif->ap.ucast_rate_idx[i]);
2623 wl1271_free_ap_keys(wl, wlvif);
2626 dev_kfree_skb(wlvif->probereq);
2627 wlvif->probereq = NULL;
2628 if (wl->last_wlvif == wlvif)
2629 wl->last_wlvif = NULL;
2630 list_del(&wlvif->list);
2631 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2632 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2633 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2641 * Last AP, have more stations. Configure sleep auth according to STA.
2642 * Don't do thin on unintended recovery.
2644 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2645 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2648 if (wl->ap_count == 0 && is_ap) {
2649 /* mask ap events */
2650 wl->event_mask &= ~wl->ap_event_mask;
2651 wl1271_event_unmask(wl);
2654 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2655 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2656 /* Configure for power according to debugfs */
2657 if (sta_auth != WL1271_PSM_ILLEGAL)
2658 wl1271_acx_sleep_auth(wl, sta_auth);
2659 /* Configure for ELP power saving */
2661 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2665 mutex_unlock(&wl->mutex);
2667 del_timer_sync(&wlvif->rx_streaming_timer);
2668 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2669 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2670 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2671 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2672 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2674 mutex_lock(&wl->mutex);
2677 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2678 struct ieee80211_vif *vif)
2680 struct wl1271 *wl = hw->priv;
2681 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2682 struct wl12xx_vif *iter;
2683 struct vif_counter_data vif_count;
2685 wl12xx_get_vif_count(hw, vif, &vif_count);
2686 mutex_lock(&wl->mutex);
2688 if (wl->state == WLCORE_STATE_OFF ||
2689 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2693 * wl->vif can be null here if someone shuts down the interface
2694 * just when hardware recovery has been started.
2696 wl12xx_for_each_wlvif(wl, iter) {
2700 __wl1271_op_remove_interface(wl, vif, true);
2703 WARN_ON(iter != wlvif);
2704 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2705 wl12xx_force_active_psm(wl);
2706 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2707 wl12xx_queue_recovery_work(wl);
2710 mutex_unlock(&wl->mutex);
2713 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2714 struct ieee80211_vif *vif,
2715 enum nl80211_iftype new_type, bool p2p)
2717 struct wl1271 *wl = hw->priv;
2720 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2721 wl1271_op_remove_interface(hw, vif);
2723 vif->type = new_type;
2725 ret = wl1271_op_add_interface(hw, vif);
2727 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2731 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2734 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2737 * One of the side effects of the JOIN command is that is clears
2738 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2739 * to a WPA/WPA2 access point will therefore kill the data-path.
2740 * Currently the only valid scenario for JOIN during association
2741 * is on roaming, in which case we will also be given new keys.
2742 * Keep the below message for now, unless it starts bothering
2743 * users who really like to roam a lot :)
2745 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2746 wl1271_info("JOIN while associated.");
2748 /* clear encryption type */
2749 wlvif->encryption_type = KEY_NONE;
2752 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2754 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2756 * TODO: this is an ugly workaround for wl12xx fw
2757 * bug - we are not able to tx/rx after the first
2758 * start_sta, so make dummy start+stop calls,
2759 * and then call start_sta again.
2760 * this should be fixed in the fw.
2762 wl12xx_cmd_role_start_sta(wl, wlvif);
2763 wl12xx_cmd_role_stop_sta(wl, wlvif);
2766 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2772 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2776 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2780 wl1271_error("No SSID in IEs!");
2785 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2786 wl1271_error("SSID is too long!");
2790 wlvif->ssid_len = ssid_len;
2791 memcpy(wlvif->ssid, ptr+2, ssid_len);
2795 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2797 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2798 struct sk_buff *skb;
2801 /* we currently only support setting the ssid from the ap probe req */
2802 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2805 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2809 ieoffset = offsetof(struct ieee80211_mgmt,
2810 u.probe_req.variable);
2811 wl1271_ssid_set(wlvif, skb, ieoffset);
2817 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2818 struct ieee80211_bss_conf *bss_conf,
2824 wlvif->aid = bss_conf->aid;
2825 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2826 wlvif->beacon_int = bss_conf->beacon_int;
2827 wlvif->wmm_enabled = bss_conf->qos;
2829 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2832 * with wl1271, we don't need to update the
2833 * beacon_int and dtim_period, because the firmware
2834 * updates it by itself when the first beacon is
2835 * received after a join.
2837 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2842 * Get a template for hardware connection maintenance
2844 dev_kfree_skb(wlvif->probereq);
2845 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2848 ieoffset = offsetof(struct ieee80211_mgmt,
2849 u.probe_req.variable);
2850 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2852 /* enable the connection monitoring feature */
2853 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2858 * The join command disable the keep-alive mode, shut down its process,
2859 * and also clear the template config, so we need to reset it all after
2860 * the join. The acx_aid starts the keep-alive process, and the order
2861 * of the commands below is relevant.
2863 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2867 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2871 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2875 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2876 wlvif->sta.klv_template_id,
2877 ACX_KEEP_ALIVE_TPL_VALID);
2882 * The default fw psm configuration is AUTO, while mac80211 default
2883 * setting is off (ACTIVE), so sync the fw with the correct value.
2885 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2891 wl1271_tx_enabled_rates_get(wl,
2894 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2902 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2905 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2907 /* make sure we are connected (sta) joined */
2909 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2912 /* make sure we are joined (ibss) */
2914 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2918 /* use defaults when not associated */
2921 /* free probe-request template */
2922 dev_kfree_skb(wlvif->probereq);
2923 wlvif->probereq = NULL;
2925 /* disable connection monitor features */
2926 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2930 /* Disable the keep-alive feature */
2931 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2936 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2937 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2939 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2940 ieee80211_chswitch_done(vif, false);
2941 cancel_delayed_work(&wlvif->channel_switch_work);
2944 /* invalidate keep-alive template */
2945 wl1271_acx_keep_alive_config(wl, wlvif,
2946 wlvif->sta.klv_template_id,
2947 ACX_KEEP_ALIVE_TPL_INVALID);
2952 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2954 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2955 wlvif->rate_set = wlvif->basic_rate_set;
2958 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2961 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2963 if (idle == cur_idle)
2967 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2969 /* The current firmware only supports sched_scan in idle */
2970 if (wl->sched_vif == wlvif)
2971 wl->ops->sched_scan_stop(wl, wlvif);
2973 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2977 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2978 struct ieee80211_conf *conf, u32 changed)
2982 if (conf->power_level != wlvif->power_level) {
2983 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2987 wlvif->power_level = conf->power_level;
2993 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2995 struct wl1271 *wl = hw->priv;
2996 struct wl12xx_vif *wlvif;
2997 struct ieee80211_conf *conf = &hw->conf;
3000 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3002 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3004 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3007 mutex_lock(&wl->mutex);
3009 if (changed & IEEE80211_CONF_CHANGE_POWER)
3010 wl->power_level = conf->power_level;
3012 if (unlikely(wl->state != WLCORE_STATE_ON))
3015 ret = wl1271_ps_elp_wakeup(wl);
3019 /* configure each interface */
3020 wl12xx_for_each_wlvif(wl, wlvif) {
3021 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3027 wl1271_ps_elp_sleep(wl);
3030 mutex_unlock(&wl->mutex);
3035 struct wl1271_filter_params {
3038 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3041 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3042 struct netdev_hw_addr_list *mc_list)
3044 struct wl1271_filter_params *fp;
3045 struct netdev_hw_addr *ha;
3047 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3049 wl1271_error("Out of memory setting filters.");
3053 /* update multicast filtering parameters */
3054 fp->mc_list_length = 0;
3055 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3056 fp->enabled = false;
3059 netdev_hw_addr_list_for_each(ha, mc_list) {
3060 memcpy(fp->mc_list[fp->mc_list_length],
3061 ha->addr, ETH_ALEN);
3062 fp->mc_list_length++;
3066 return (u64)(unsigned long)fp;
3069 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3072 FIF_BCN_PRBRESP_PROMISC | \
3076 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3077 unsigned int changed,
3078 unsigned int *total, u64 multicast)
3080 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3081 struct wl1271 *wl = hw->priv;
3082 struct wl12xx_vif *wlvif;
3086 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3087 " total %x", changed, *total);
3089 mutex_lock(&wl->mutex);
3091 *total &= WL1271_SUPPORTED_FILTERS;
3092 changed &= WL1271_SUPPORTED_FILTERS;
3094 if (unlikely(wl->state != WLCORE_STATE_ON))
3097 ret = wl1271_ps_elp_wakeup(wl);
3101 wl12xx_for_each_wlvif(wl, wlvif) {
3102 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3103 if (*total & FIF_ALLMULTI)
3104 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3108 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3111 fp->mc_list_length);
3118 * the fw doesn't provide an api to configure the filters. instead,
3119 * the filters configuration is based on the active roles / ROC
3124 wl1271_ps_elp_sleep(wl);
3127 mutex_unlock(&wl->mutex);
3131 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3132 u8 id, u8 key_type, u8 key_size,
3133 const u8 *key, u8 hlid, u32 tx_seq_32,
3136 struct wl1271_ap_key *ap_key;
3139 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3141 if (key_size > MAX_KEY_SIZE)
3145 * Find next free entry in ap_keys. Also check we are not replacing
3148 for (i = 0; i < MAX_NUM_KEYS; i++) {
3149 if (wlvif->ap.recorded_keys[i] == NULL)
3152 if (wlvif->ap.recorded_keys[i]->id == id) {
3153 wl1271_warning("trying to record key replacement");
3158 if (i == MAX_NUM_KEYS)
3161 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3166 ap_key->key_type = key_type;
3167 ap_key->key_size = key_size;
3168 memcpy(ap_key->key, key, key_size);
3169 ap_key->hlid = hlid;
3170 ap_key->tx_seq_32 = tx_seq_32;
3171 ap_key->tx_seq_16 = tx_seq_16;
3173 wlvif->ap.recorded_keys[i] = ap_key;
3177 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3181 for (i = 0; i < MAX_NUM_KEYS; i++) {
3182 kfree(wlvif->ap.recorded_keys[i]);
3183 wlvif->ap.recorded_keys[i] = NULL;
3187 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3190 struct wl1271_ap_key *key;
3191 bool wep_key_added = false;
3193 for (i = 0; i < MAX_NUM_KEYS; i++) {
3195 if (wlvif->ap.recorded_keys[i] == NULL)
3198 key = wlvif->ap.recorded_keys[i];
3200 if (hlid == WL12XX_INVALID_LINK_ID)
3201 hlid = wlvif->ap.bcast_hlid;
3203 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3204 key->id, key->key_type,
3205 key->key_size, key->key,
3206 hlid, key->tx_seq_32,
3211 if (key->key_type == KEY_WEP)
3212 wep_key_added = true;
3215 if (wep_key_added) {
3216 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3217 wlvif->ap.bcast_hlid);
3223 wl1271_free_ap_keys(wl, wlvif);
3227 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3228 u16 action, u8 id, u8 key_type,
3229 u8 key_size, const u8 *key, u32 tx_seq_32,
3230 u16 tx_seq_16, struct ieee80211_sta *sta)
3233 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3236 struct wl1271_station *wl_sta;
3240 wl_sta = (struct wl1271_station *)sta->drv_priv;
3241 hlid = wl_sta->hlid;
3243 hlid = wlvif->ap.bcast_hlid;
3246 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3248 * We do not support removing keys after AP shutdown.
3249 * Pretend we do to make mac80211 happy.
3251 if (action != KEY_ADD_OR_REPLACE)
3254 ret = wl1271_record_ap_key(wl, wlvif, id,
3256 key, hlid, tx_seq_32,
3259 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3260 id, key_type, key_size,
3261 key, hlid, tx_seq_32,
3269 static const u8 bcast_addr[ETH_ALEN] = {
3270 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3273 addr = sta ? sta->addr : bcast_addr;
3275 if (is_zero_ether_addr(addr)) {
3276 /* We dont support TX only encryption */
3280 /* The wl1271 does not allow to remove unicast keys - they
3281 will be cleared automatically on next CMD_JOIN. Ignore the
3282 request silently, as we dont want the mac80211 to emit
3283 an error message. */
3284 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3287 /* don't remove key if hlid was already deleted */
3288 if (action == KEY_REMOVE &&
3289 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3292 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3293 id, key_type, key_size,
3294 key, addr, tx_seq_32,
3304 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3305 struct ieee80211_vif *vif,
3306 struct ieee80211_sta *sta,
3307 struct ieee80211_key_conf *key_conf)
3309 struct wl1271 *wl = hw->priv;
3311 bool might_change_spare =
3312 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3313 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3315 if (might_change_spare) {
3317 * stop the queues and flush to ensure the next packets are
3318 * in sync with FW spare block accounting
3320 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3321 wl1271_tx_flush(wl);
3324 mutex_lock(&wl->mutex);
3326 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3328 goto out_wake_queues;
3331 ret = wl1271_ps_elp_wakeup(wl);
3333 goto out_wake_queues;
3335 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3337 wl1271_ps_elp_sleep(wl);
3340 if (might_change_spare)
3341 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3343 mutex_unlock(&wl->mutex);
3348 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3349 struct ieee80211_vif *vif,
3350 struct ieee80211_sta *sta,
3351 struct ieee80211_key_conf *key_conf)
3353 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3360 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3362 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3363 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3364 key_conf->cipher, key_conf->keyidx,
3365 key_conf->keylen, key_conf->flags);
3366 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3368 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3370 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3371 hlid = wl_sta->hlid;
3373 hlid = wlvif->ap.bcast_hlid;
3376 hlid = wlvif->sta.hlid;
3378 if (hlid != WL12XX_INVALID_LINK_ID) {
3379 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3380 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3381 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3384 switch (key_conf->cipher) {
3385 case WLAN_CIPHER_SUITE_WEP40:
3386 case WLAN_CIPHER_SUITE_WEP104:
3389 key_conf->hw_key_idx = key_conf->keyidx;
3391 case WLAN_CIPHER_SUITE_TKIP:
3392 key_type = KEY_TKIP;
3393 key_conf->hw_key_idx = key_conf->keyidx;
3395 case WLAN_CIPHER_SUITE_CCMP:
3397 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3399 case WL1271_CIPHER_SUITE_GEM:
3403 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3410 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3411 key_conf->keyidx, key_type,
3412 key_conf->keylen, key_conf->key,
3413 tx_seq_32, tx_seq_16, sta);
3415 wl1271_error("Could not add or replace key");
3420 * reconfiguring arp response if the unicast (or common)
3421 * encryption key type was changed
3423 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3424 (sta || key_type == KEY_WEP) &&
3425 wlvif->encryption_type != key_type) {
3426 wlvif->encryption_type = key_type;
3427 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3429 wl1271_warning("build arp rsp failed: %d", ret);
3436 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3437 key_conf->keyidx, key_type,
3438 key_conf->keylen, key_conf->key,
3441 wl1271_error("Could not remove key");
3447 wl1271_error("Unsupported key cmd 0x%x", cmd);
3453 EXPORT_SYMBOL_GPL(wlcore_set_key);
3455 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3456 struct ieee80211_vif *vif,
3459 struct wl1271 *wl = hw->priv;
3460 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3463 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3466 mutex_lock(&wl->mutex);
3468 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3473 ret = wl1271_ps_elp_wakeup(wl);
3477 wlvif->default_key = key_idx;
3479 /* the default WEP key needs to be configured at least once */
3480 if (wlvif->encryption_type == KEY_WEP) {
3481 ret = wl12xx_cmd_set_default_wep_key(wl,
3489 wl1271_ps_elp_sleep(wl);
3492 mutex_unlock(&wl->mutex);
3495 void wlcore_regdomain_config(struct wl1271 *wl)
3499 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3502 mutex_lock(&wl->mutex);
3504 if (unlikely(wl->state != WLCORE_STATE_ON))
3507 ret = wl1271_ps_elp_wakeup(wl);
3511 ret = wlcore_cmd_regdomain_config_locked(wl);
3513 wl12xx_queue_recovery_work(wl);
3517 wl1271_ps_elp_sleep(wl);
3519 mutex_unlock(&wl->mutex);
3522 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3523 struct ieee80211_vif *vif,
3524 struct cfg80211_scan_request *req)
3526 struct wl1271 *wl = hw->priv;
3531 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3534 ssid = req->ssids[0].ssid;
3535 len = req->ssids[0].ssid_len;
3538 mutex_lock(&wl->mutex);
3540 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3542 * We cannot return -EBUSY here because cfg80211 will expect
3543 * a call to ieee80211_scan_completed if we do - in this case
3544 * there won't be any call.
3550 ret = wl1271_ps_elp_wakeup(wl);
3554 /* fail if there is any role in ROC */
3555 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3556 /* don't allow scanning right now */
3561 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3563 wl1271_ps_elp_sleep(wl);
3565 mutex_unlock(&wl->mutex);
3570 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3571 struct ieee80211_vif *vif)
3573 struct wl1271 *wl = hw->priv;
3574 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3577 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3579 mutex_lock(&wl->mutex);
3581 if (unlikely(wl->state != WLCORE_STATE_ON))
3584 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3587 ret = wl1271_ps_elp_wakeup(wl);
3591 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3592 ret = wl->ops->scan_stop(wl, wlvif);
3598 * Rearm the tx watchdog just before idling scan. This
3599 * prevents just-finished scans from triggering the watchdog
3601 wl12xx_rearm_tx_watchdog_locked(wl);
3603 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3604 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3605 wl->scan_wlvif = NULL;
3606 wl->scan.req = NULL;
3607 ieee80211_scan_completed(wl->hw, true);
3610 wl1271_ps_elp_sleep(wl);
3612 mutex_unlock(&wl->mutex);
3614 cancel_delayed_work_sync(&wl->scan_complete_work);
3617 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3618 struct ieee80211_vif *vif,
3619 struct cfg80211_sched_scan_request *req,
3620 struct ieee80211_sched_scan_ies *ies)
3622 struct wl1271 *wl = hw->priv;
3623 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3626 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3628 mutex_lock(&wl->mutex);
3630 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3635 ret = wl1271_ps_elp_wakeup(wl);
3639 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3643 wl->sched_vif = wlvif;
3646 wl1271_ps_elp_sleep(wl);
3648 mutex_unlock(&wl->mutex);
3652 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3653 struct ieee80211_vif *vif)
3655 struct wl1271 *wl = hw->priv;
3656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3659 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3661 mutex_lock(&wl->mutex);
3663 if (unlikely(wl->state != WLCORE_STATE_ON))
3666 ret = wl1271_ps_elp_wakeup(wl);
3670 wl->ops->sched_scan_stop(wl, wlvif);
3672 wl1271_ps_elp_sleep(wl);
3674 mutex_unlock(&wl->mutex);
3677 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3679 struct wl1271 *wl = hw->priv;
3682 mutex_lock(&wl->mutex);
3684 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3689 ret = wl1271_ps_elp_wakeup(wl);
3693 ret = wl1271_acx_frag_threshold(wl, value);
3695 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3697 wl1271_ps_elp_sleep(wl);
3700 mutex_unlock(&wl->mutex);
3705 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3707 struct wl1271 *wl = hw->priv;
3708 struct wl12xx_vif *wlvif;
3711 mutex_lock(&wl->mutex);
3713 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3718 ret = wl1271_ps_elp_wakeup(wl);
3722 wl12xx_for_each_wlvif(wl, wlvif) {
3723 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3725 wl1271_warning("set rts threshold failed: %d", ret);
3727 wl1271_ps_elp_sleep(wl);
3730 mutex_unlock(&wl->mutex);
3735 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3738 const u8 *next, *end = skb->data + skb->len;
3739 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3740 skb->len - ieoffset);
3745 memmove(ie, next, end - next);
3746 skb_trim(skb, skb->len - len);
3749 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3750 unsigned int oui, u8 oui_type,
3754 const u8 *next, *end = skb->data + skb->len;
3755 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3756 skb->data + ieoffset,
3757 skb->len - ieoffset);
3762 memmove(ie, next, end - next);
3763 skb_trim(skb, skb->len - len);
3766 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3767 struct ieee80211_vif *vif)
3769 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 struct sk_buff *skb;
3773 skb = ieee80211_proberesp_get(wl->hw, vif);
3777 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3778 CMD_TEMPL_AP_PROBE_RESPONSE,
3787 wl1271_debug(DEBUG_AP, "probe response updated");
3788 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3794 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3795 struct ieee80211_vif *vif,
3797 size_t probe_rsp_len,
3800 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3801 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3802 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3803 int ssid_ie_offset, ie_offset, templ_len;
3806 /* no need to change probe response if the SSID is set correctly */
3807 if (wlvif->ssid_len > 0)
3808 return wl1271_cmd_template_set(wl, wlvif->role_id,
3809 CMD_TEMPL_AP_PROBE_RESPONSE,
3814 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3815 wl1271_error("probe_rsp template too big");
3819 /* start searching from IE offset */
3820 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3822 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3823 probe_rsp_len - ie_offset);
3825 wl1271_error("No SSID in beacon!");
3829 ssid_ie_offset = ptr - probe_rsp_data;
3830 ptr += (ptr[1] + 2);
3832 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3834 /* insert SSID from bss_conf */
3835 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3836 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3837 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3838 bss_conf->ssid, bss_conf->ssid_len);
3839 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3841 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3842 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3843 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3845 return wl1271_cmd_template_set(wl, wlvif->role_id,
3846 CMD_TEMPL_AP_PROBE_RESPONSE,
3852 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3853 struct ieee80211_vif *vif,
3854 struct ieee80211_bss_conf *bss_conf,
3857 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3860 if (changed & BSS_CHANGED_ERP_SLOT) {
3861 if (bss_conf->use_short_slot)
3862 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3864 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3866 wl1271_warning("Set slot time failed %d", ret);
3871 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3872 if (bss_conf->use_short_preamble)
3873 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3875 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3878 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3879 if (bss_conf->use_cts_prot)
3880 ret = wl1271_acx_cts_protect(wl, wlvif,
3883 ret = wl1271_acx_cts_protect(wl, wlvif,
3884 CTSPROTECT_DISABLE);
3886 wl1271_warning("Set ctsprotect failed %d", ret);
3895 static int wlcore_set_beacon_template(struct wl1271 *wl,
3896 struct ieee80211_vif *vif,
3899 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3900 struct ieee80211_hdr *hdr;
3903 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3904 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3912 wl1271_debug(DEBUG_MASTER, "beacon updated");
3914 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3916 dev_kfree_skb(beacon);
3919 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3920 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3922 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3927 dev_kfree_skb(beacon);
3931 wlvif->wmm_enabled =
3932 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3933 WLAN_OUI_TYPE_MICROSOFT_WMM,
3934 beacon->data + ieoffset,
3935 beacon->len - ieoffset);
3938 * In case we already have a probe-resp beacon set explicitly
3939 * by usermode, don't use the beacon data.
3941 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3944 /* remove TIM ie from probe response */
3945 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3948 * remove p2p ie from probe response.
3949 * the fw reponds to probe requests that don't include
3950 * the p2p ie. probe requests with p2p ie will be passed,
3951 * and will be responded by the supplicant (the spec
3952 * forbids including the p2p ie when responding to probe
3953 * requests that didn't include it).
3955 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3956 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3958 hdr = (struct ieee80211_hdr *) beacon->data;
3959 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3960 IEEE80211_STYPE_PROBE_RESP);
3962 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3967 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3968 CMD_TEMPL_PROBE_RESPONSE,
3973 dev_kfree_skb(beacon);
3981 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3982 struct ieee80211_vif *vif,
3983 struct ieee80211_bss_conf *bss_conf,
3986 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3987 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3990 if (changed & BSS_CHANGED_BEACON_INT) {
3991 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3992 bss_conf->beacon_int);
3994 wlvif->beacon_int = bss_conf->beacon_int;
3997 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3998 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4000 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4003 if (changed & BSS_CHANGED_BEACON) {
4004 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4011 wl1271_error("beacon info change failed: %d", ret);
4015 /* AP mode changes */
4016 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4017 struct ieee80211_vif *vif,
4018 struct ieee80211_bss_conf *bss_conf,
4021 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4024 if (changed & BSS_CHANGED_BASIC_RATES) {
4025 u32 rates = bss_conf->basic_rates;
4027 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4029 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4030 wlvif->basic_rate_set);
4032 ret = wl1271_init_ap_rates(wl, wlvif);
4034 wl1271_error("AP rate policy change failed %d", ret);
4038 ret = wl1271_ap_init_templates(wl, vif);
4042 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4046 ret = wlcore_set_beacon_template(wl, vif, true);
4051 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4055 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4056 if (bss_conf->enable_beacon) {
4057 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4058 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4062 ret = wl1271_ap_init_hwenc(wl, wlvif);
4066 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4067 wl1271_debug(DEBUG_AP, "started AP");
4070 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4072 * AP might be in ROC in case we have just
4073 * sent auth reply. handle it.
4075 if (test_bit(wlvif->role_id, wl->roc_map))
4076 wl12xx_croc(wl, wlvif->role_id);
4078 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4082 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4083 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4085 wl1271_debug(DEBUG_AP, "stopped AP");
4090 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4094 /* Handle HT information change */
4095 if ((changed & BSS_CHANGED_HT) &&
4096 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4097 ret = wl1271_acx_set_ht_information(wl, wlvif,
4098 bss_conf->ht_operation_mode);
4100 wl1271_warning("Set ht information failed %d", ret);
4109 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4110 struct ieee80211_bss_conf *bss_conf,
4116 wl1271_debug(DEBUG_MAC80211,
4117 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4118 bss_conf->bssid, bss_conf->aid,
4119 bss_conf->beacon_int,
4120 bss_conf->basic_rates, sta_rate_set);
4122 wlvif->beacon_int = bss_conf->beacon_int;
4123 rates = bss_conf->basic_rates;
4124 wlvif->basic_rate_set =
4125 wl1271_tx_enabled_rates_get(wl, rates,
4128 wl1271_tx_min_rate_get(wl,
4129 wlvif->basic_rate_set);
4133 wl1271_tx_enabled_rates_get(wl,
4137 /* we only support sched_scan while not connected */
4138 if (wl->sched_vif == wlvif)
4139 wl->ops->sched_scan_stop(wl, wlvif);
4141 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4145 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4149 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4153 wlcore_set_ssid(wl, wlvif);
4155 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4160 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4164 /* revert back to minimum rates for the current band */
4165 wl1271_set_band_rate(wl, wlvif);
4166 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4172 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4173 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4174 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4179 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4182 /* STA/IBSS mode changes */
4183 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4184 struct ieee80211_vif *vif,
4185 struct ieee80211_bss_conf *bss_conf,
4188 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4189 bool do_join = false;
4190 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4191 bool ibss_joined = false;
4192 u32 sta_rate_set = 0;
4194 struct ieee80211_sta *sta;
4195 bool sta_exists = false;
4196 struct ieee80211_sta_ht_cap sta_ht_cap;
4199 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4205 if (changed & BSS_CHANGED_IBSS) {
4206 if (bss_conf->ibss_joined) {
4207 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4210 wlcore_unset_assoc(wl, wlvif);
4211 wl12xx_cmd_role_stop_sta(wl, wlvif);
4215 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4218 /* Need to update the SSID (for filtering etc) */
4219 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4222 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4223 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4224 bss_conf->enable_beacon ? "enabled" : "disabled");
4229 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4230 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4232 if (changed & BSS_CHANGED_CQM) {
4233 bool enable = false;
4234 if (bss_conf->cqm_rssi_thold)
4236 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4237 bss_conf->cqm_rssi_thold,
4238 bss_conf->cqm_rssi_hyst);
4241 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4244 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4245 BSS_CHANGED_ASSOC)) {
4247 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4249 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4251 /* save the supp_rates of the ap */
4252 sta_rate_set = sta->supp_rates[wlvif->band];
4253 if (sta->ht_cap.ht_supported)
4255 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4256 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4257 sta_ht_cap = sta->ht_cap;
4264 if (changed & BSS_CHANGED_BSSID) {
4265 if (!is_zero_ether_addr(bss_conf->bssid)) {
4266 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4271 /* Need to update the BSSID (for filtering etc) */
4274 ret = wlcore_clear_bssid(wl, wlvif);
4280 if (changed & BSS_CHANGED_IBSS) {
4281 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4282 bss_conf->ibss_joined);
4284 if (bss_conf->ibss_joined) {
4285 u32 rates = bss_conf->basic_rates;
4286 wlvif->basic_rate_set =
4287 wl1271_tx_enabled_rates_get(wl, rates,
4290 wl1271_tx_min_rate_get(wl,
4291 wlvif->basic_rate_set);
4293 /* by default, use 11b + OFDM rates */
4294 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4295 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4301 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4306 ret = wlcore_join(wl, wlvif);
4308 wl1271_warning("cmd join failed %d", ret);
4313 if (changed & BSS_CHANGED_ASSOC) {
4314 if (bss_conf->assoc) {
4315 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4320 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4321 wl12xx_set_authorized(wl, wlvif);
4323 wlcore_unset_assoc(wl, wlvif);
4327 if (changed & BSS_CHANGED_PS) {
4328 if ((bss_conf->ps) &&
4329 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4330 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4334 if (wl->conf.conn.forced_ps) {
4335 ps_mode = STATION_POWER_SAVE_MODE;
4336 ps_mode_str = "forced";
4338 ps_mode = STATION_AUTO_PS_MODE;
4339 ps_mode_str = "auto";
4342 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4344 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4346 wl1271_warning("enter %s ps failed %d",
4348 } else if (!bss_conf->ps &&
4349 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4350 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4352 ret = wl1271_ps_set_mode(wl, wlvif,
4353 STATION_ACTIVE_MODE);
4355 wl1271_warning("exit auto ps failed %d", ret);
4359 /* Handle new association with HT. Do this after join. */
4362 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4364 ret = wlcore_hw_set_peer_cap(wl,
4370 wl1271_warning("Set ht cap failed %d", ret);
4376 ret = wl1271_acx_set_ht_information(wl, wlvif,
4377 bss_conf->ht_operation_mode);
4379 wl1271_warning("Set ht information failed %d",
4386 /* Handle arp filtering. Done after join. */
4387 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4388 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4389 __be32 addr = bss_conf->arp_addr_list[0];
4390 wlvif->sta.qos = bss_conf->qos;
4391 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4393 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4394 wlvif->ip_addr = addr;
4396 * The template should have been configured only upon
4397 * association. however, it seems that the correct ip
4398 * isn't being set (when sending), so we have to
4399 * reconfigure the template upon every ip change.
4401 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4403 wl1271_warning("build arp rsp failed: %d", ret);
4407 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4408 (ACX_ARP_FILTER_ARP_FILTERING |
4409 ACX_ARP_FILTER_AUTO_ARP),
4413 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4424 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4425 struct ieee80211_vif *vif,
4426 struct ieee80211_bss_conf *bss_conf,
4429 struct wl1271 *wl = hw->priv;
4430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4431 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4434 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4435 wlvif->role_id, (int)changed);
4438 * make sure to cancel pending disconnections if our association
4441 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4442 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4444 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4445 !bss_conf->enable_beacon)
4446 wl1271_tx_flush(wl);
4448 mutex_lock(&wl->mutex);
4450 if (unlikely(wl->state != WLCORE_STATE_ON))
4453 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4456 ret = wl1271_ps_elp_wakeup(wl);
4461 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4463 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4465 wl1271_ps_elp_sleep(wl);
4468 mutex_unlock(&wl->mutex);
4471 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4472 struct ieee80211_chanctx_conf *ctx)
4474 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4475 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4476 cfg80211_get_chandef_type(&ctx->def));
4480 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4481 struct ieee80211_chanctx_conf *ctx)
4483 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4484 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4485 cfg80211_get_chandef_type(&ctx->def));
4488 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4489 struct ieee80211_chanctx_conf *ctx,
4492 wl1271_debug(DEBUG_MAC80211,
4493 "mac80211 change chanctx %d (type %d) changed 0x%x",
4494 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4495 cfg80211_get_chandef_type(&ctx->def), changed);
4498 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4499 struct ieee80211_vif *vif,
4500 struct ieee80211_chanctx_conf *ctx)
4502 struct wl1271 *wl = hw->priv;
4503 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4504 int channel = ieee80211_frequency_to_channel(
4505 ctx->def.chan->center_freq);
4507 wl1271_debug(DEBUG_MAC80211,
4508 "mac80211 assign chanctx (role %d) %d (type %d)",
4509 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4511 mutex_lock(&wl->mutex);
4513 wlvif->band = ctx->def.chan->band;
4514 wlvif->channel = channel;
4515 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4517 /* update default rates according to the band */
4518 wl1271_set_band_rate(wl, wlvif);
4520 mutex_unlock(&wl->mutex);
4525 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4526 struct ieee80211_vif *vif,
4527 struct ieee80211_chanctx_conf *ctx)
4529 struct wl1271 *wl = hw->priv;
4530 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4532 wl1271_debug(DEBUG_MAC80211,
4533 "mac80211 unassign chanctx (role %d) %d (type %d)",
4535 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4536 cfg80211_get_chandef_type(&ctx->def));
4538 wl1271_tx_flush(wl);
4541 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4542 struct ieee80211_vif *vif, u16 queue,
4543 const struct ieee80211_tx_queue_params *params)
4545 struct wl1271 *wl = hw->priv;
4546 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4550 mutex_lock(&wl->mutex);
4552 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4555 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4557 ps_scheme = CONF_PS_SCHEME_LEGACY;
4559 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4562 ret = wl1271_ps_elp_wakeup(wl);
4567 * the txop is confed in units of 32us by the mac80211,
4570 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4571 params->cw_min, params->cw_max,
4572 params->aifs, params->txop << 5);
4576 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4577 CONF_CHANNEL_TYPE_EDCF,
4578 wl1271_tx_get_queue(queue),
4579 ps_scheme, CONF_ACK_POLICY_LEGACY,
4583 wl1271_ps_elp_sleep(wl);
4586 mutex_unlock(&wl->mutex);
4591 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4592 struct ieee80211_vif *vif)
4595 struct wl1271 *wl = hw->priv;
4596 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4597 u64 mactime = ULLONG_MAX;
4600 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4602 mutex_lock(&wl->mutex);
4604 if (unlikely(wl->state != WLCORE_STATE_ON))
4607 ret = wl1271_ps_elp_wakeup(wl);
4611 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4616 wl1271_ps_elp_sleep(wl);
4619 mutex_unlock(&wl->mutex);
4623 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4624 struct survey_info *survey)
4626 struct ieee80211_conf *conf = &hw->conf;
4631 survey->channel = conf->chandef.chan;
4636 static int wl1271_allocate_sta(struct wl1271 *wl,
4637 struct wl12xx_vif *wlvif,
4638 struct ieee80211_sta *sta)
4640 struct wl1271_station *wl_sta;
4644 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4645 wl1271_warning("could not allocate HLID - too much stations");
4649 wl_sta = (struct wl1271_station *)sta->drv_priv;
4650 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4652 wl1271_warning("could not allocate HLID - too many links");
4656 /* use the previous security seq, if this is a recovery/resume */
4657 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4659 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4660 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4661 wl->active_sta_count++;
4665 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4667 struct wl1271_station *wl_sta;
4668 struct ieee80211_sta *sta;
4669 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4671 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4674 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4675 __clear_bit(hlid, &wl->ap_ps_map);
4676 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4679 * save the last used PN in the private part of iee80211_sta,
4680 * in case of recovery/suspend
4683 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4685 wl_sta = (void *)sta->drv_priv;
4686 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4689 * increment the initial seq number on recovery to account for
4690 * transmitted packets that we haven't yet got in the FW status
4692 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4693 wl_sta->total_freed_pkts +=
4694 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4698 wl12xx_free_link(wl, wlvif, &hlid);
4699 wl->active_sta_count--;
4702 * rearm the tx watchdog when the last STA is freed - give the FW a
4703 * chance to return STA-buffered packets before complaining.
4705 if (wl->active_sta_count == 0)
4706 wl12xx_rearm_tx_watchdog_locked(wl);
4709 static int wl12xx_sta_add(struct wl1271 *wl,
4710 struct wl12xx_vif *wlvif,
4711 struct ieee80211_sta *sta)
4713 struct wl1271_station *wl_sta;
4717 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4719 ret = wl1271_allocate_sta(wl, wlvif, sta);
4723 wl_sta = (struct wl1271_station *)sta->drv_priv;
4724 hlid = wl_sta->hlid;
4726 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4728 wl1271_free_sta(wl, wlvif, hlid);
4733 static int wl12xx_sta_remove(struct wl1271 *wl,
4734 struct wl12xx_vif *wlvif,
4735 struct ieee80211_sta *sta)
4737 struct wl1271_station *wl_sta;
4740 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4742 wl_sta = (struct wl1271_station *)sta->drv_priv;
4744 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4747 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4751 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4755 static void wlcore_roc_if_possible(struct wl1271 *wl,
4756 struct wl12xx_vif *wlvif)
4758 if (find_first_bit(wl->roc_map,
4759 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4762 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4765 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4769 * when wl_sta is NULL, we treat this call as if coming from a
4770 * pending auth reply.
4771 * wl->mutex must be taken and the FW must be awake when the call
4774 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4775 struct wl1271_station *wl_sta, bool in_conn)
4778 if (WARN_ON(wl_sta && wl_sta->in_connection))
4781 if (!wlvif->ap_pending_auth_reply &&
4782 !wlvif->inconn_count)
4783 wlcore_roc_if_possible(wl, wlvif);
4786 wl_sta->in_connection = true;
4787 wlvif->inconn_count++;
4789 wlvif->ap_pending_auth_reply = true;
4792 if (wl_sta && !wl_sta->in_connection)
4795 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4798 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4802 wl_sta->in_connection = false;
4803 wlvif->inconn_count--;
4805 wlvif->ap_pending_auth_reply = false;
4808 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4809 test_bit(wlvif->role_id, wl->roc_map))
4810 wl12xx_croc(wl, wlvif->role_id);
4814 static int wl12xx_update_sta_state(struct wl1271 *wl,
4815 struct wl12xx_vif *wlvif,
4816 struct ieee80211_sta *sta,
4817 enum ieee80211_sta_state old_state,
4818 enum ieee80211_sta_state new_state)
4820 struct wl1271_station *wl_sta;
4821 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4822 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4825 wl_sta = (struct wl1271_station *)sta->drv_priv;
4827 /* Add station (AP mode) */
4829 old_state == IEEE80211_STA_NOTEXIST &&
4830 new_state == IEEE80211_STA_NONE) {
4831 ret = wl12xx_sta_add(wl, wlvif, sta);
4835 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4838 /* Remove station (AP mode) */
4840 old_state == IEEE80211_STA_NONE &&
4841 new_state == IEEE80211_STA_NOTEXIST) {
4843 wl12xx_sta_remove(wl, wlvif, sta);
4845 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4848 /* Authorize station (AP mode) */
4850 new_state == IEEE80211_STA_AUTHORIZED) {
4851 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4855 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4860 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4863 /* Authorize station */
4865 new_state == IEEE80211_STA_AUTHORIZED) {
4866 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4867 ret = wl12xx_set_authorized(wl, wlvif);
4873 old_state == IEEE80211_STA_AUTHORIZED &&
4874 new_state == IEEE80211_STA_ASSOC) {
4875 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4876 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4879 /* clear ROCs on failure or authorization */
4881 (new_state == IEEE80211_STA_AUTHORIZED ||
4882 new_state == IEEE80211_STA_NOTEXIST)) {
4883 if (test_bit(wlvif->role_id, wl->roc_map))
4884 wl12xx_croc(wl, wlvif->role_id);
4888 old_state == IEEE80211_STA_NOTEXIST &&
4889 new_state == IEEE80211_STA_NONE) {
4890 if (find_first_bit(wl->roc_map,
4891 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4892 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4893 wl12xx_roc(wl, wlvif, wlvif->role_id,
4894 wlvif->band, wlvif->channel);
4900 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4901 struct ieee80211_vif *vif,
4902 struct ieee80211_sta *sta,
4903 enum ieee80211_sta_state old_state,
4904 enum ieee80211_sta_state new_state)
4906 struct wl1271 *wl = hw->priv;
4907 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4910 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4911 sta->aid, old_state, new_state);
4913 mutex_lock(&wl->mutex);
4915 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4920 ret = wl1271_ps_elp_wakeup(wl);
4924 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4926 wl1271_ps_elp_sleep(wl);
4928 mutex_unlock(&wl->mutex);
4929 if (new_state < old_state)
4934 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4935 struct ieee80211_vif *vif,
4936 enum ieee80211_ampdu_mlme_action action,
4937 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4940 struct wl1271 *wl = hw->priv;
4941 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4943 u8 hlid, *ba_bitmap;
4945 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4948 /* sanity check - the fields in FW are only 8bits wide */
4949 if (WARN_ON(tid > 0xFF))
4952 mutex_lock(&wl->mutex);
4954 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4959 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4960 hlid = wlvif->sta.hlid;
4961 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4962 struct wl1271_station *wl_sta;
4964 wl_sta = (struct wl1271_station *)sta->drv_priv;
4965 hlid = wl_sta->hlid;
4971 ba_bitmap = &wl->links[hlid].ba_bitmap;
4973 ret = wl1271_ps_elp_wakeup(wl);
4977 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4981 case IEEE80211_AMPDU_RX_START:
4982 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4987 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4989 wl1271_error("exceeded max RX BA sessions");
4993 if (*ba_bitmap & BIT(tid)) {
4995 wl1271_error("cannot enable RX BA session on active "
5000 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5003 *ba_bitmap |= BIT(tid);
5004 wl->ba_rx_session_count++;
5008 case IEEE80211_AMPDU_RX_STOP:
5009 if (!(*ba_bitmap & BIT(tid))) {
5011 * this happens on reconfig - so only output a debug
5012 * message for now, and don't fail the function.
5014 wl1271_debug(DEBUG_MAC80211,
5015 "no active RX BA session on tid: %d",
5021 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5024 *ba_bitmap &= ~BIT(tid);
5025 wl->ba_rx_session_count--;
5030 * The BA initiator session management in FW independently.
5031 * Falling break here on purpose for all TX APDU commands.
5033 case IEEE80211_AMPDU_TX_START:
5034 case IEEE80211_AMPDU_TX_STOP_CONT:
5035 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5036 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5037 case IEEE80211_AMPDU_TX_OPERATIONAL:
5042 wl1271_error("Incorrect ampdu action id=%x\n", action);
5046 wl1271_ps_elp_sleep(wl);
5049 mutex_unlock(&wl->mutex);
5054 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5055 struct ieee80211_vif *vif,
5056 const struct cfg80211_bitrate_mask *mask)
5058 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5059 struct wl1271 *wl = hw->priv;
5062 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5063 mask->control[NL80211_BAND_2GHZ].legacy,
5064 mask->control[NL80211_BAND_5GHZ].legacy);
5066 mutex_lock(&wl->mutex);
5068 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5069 wlvif->bitrate_masks[i] =
5070 wl1271_tx_enabled_rates_get(wl,
5071 mask->control[i].legacy,
5074 if (unlikely(wl->state != WLCORE_STATE_ON))
5077 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5078 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5080 ret = wl1271_ps_elp_wakeup(wl);
5084 wl1271_set_band_rate(wl, wlvif);
5086 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5087 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5089 wl1271_ps_elp_sleep(wl);
5092 mutex_unlock(&wl->mutex);
5097 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5098 struct ieee80211_channel_switch *ch_switch)
5100 struct wl1271 *wl = hw->priv;
5101 struct wl12xx_vif *wlvif;
5104 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5106 wl1271_tx_flush(wl);
5108 mutex_lock(&wl->mutex);
5110 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5111 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5112 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5113 ieee80211_chswitch_done(vif, false);
5116 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5120 ret = wl1271_ps_elp_wakeup(wl);
5124 /* TODO: change mac80211 to pass vif as param */
5125 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5126 unsigned long delay_usec;
5128 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5132 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5134 /* indicate failure 5 seconds after channel switch time */
5135 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5137 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5138 usecs_to_jiffies(delay_usec) +
5139 msecs_to_jiffies(5000));
5143 wl1271_ps_elp_sleep(wl);
5146 mutex_unlock(&wl->mutex);
5149 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5151 struct wl1271 *wl = hw->priv;
5153 wl1271_tx_flush(wl);
5156 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5157 struct ieee80211_vif *vif,
5158 struct ieee80211_channel *chan,
5160 enum ieee80211_roc_type type)
5162 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5163 struct wl1271 *wl = hw->priv;
5164 int channel, ret = 0;
5166 channel = ieee80211_frequency_to_channel(chan->center_freq);
5168 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5169 channel, wlvif->role_id);
5171 mutex_lock(&wl->mutex);
5173 if (unlikely(wl->state != WLCORE_STATE_ON))
5176 /* return EBUSY if we can't ROC right now */
5177 if (WARN_ON(wl->roc_vif ||
5178 find_first_bit(wl->roc_map,
5179 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5184 ret = wl1271_ps_elp_wakeup(wl);
5188 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5193 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5194 msecs_to_jiffies(duration));
5196 wl1271_ps_elp_sleep(wl);
5198 mutex_unlock(&wl->mutex);
5202 static int __wlcore_roc_completed(struct wl1271 *wl)
5204 struct wl12xx_vif *wlvif;
5207 /* already completed */
5208 if (unlikely(!wl->roc_vif))
5211 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5213 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5216 ret = wl12xx_stop_dev(wl, wlvif);
5225 static int wlcore_roc_completed(struct wl1271 *wl)
5229 wl1271_debug(DEBUG_MAC80211, "roc complete");
5231 mutex_lock(&wl->mutex);
5233 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5238 ret = wl1271_ps_elp_wakeup(wl);
5242 ret = __wlcore_roc_completed(wl);
5244 wl1271_ps_elp_sleep(wl);
5246 mutex_unlock(&wl->mutex);
5251 static void wlcore_roc_complete_work(struct work_struct *work)
5253 struct delayed_work *dwork;
5257 dwork = container_of(work, struct delayed_work, work);
5258 wl = container_of(dwork, struct wl1271, roc_complete_work);
5260 ret = wlcore_roc_completed(wl);
5262 ieee80211_remain_on_channel_expired(wl->hw);
5265 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5267 struct wl1271 *wl = hw->priv;
5269 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5272 wl1271_tx_flush(wl);
5275 * we can't just flush_work here, because it might deadlock
5276 * (as we might get called from the same workqueue)
5278 cancel_delayed_work_sync(&wl->roc_complete_work);
5279 wlcore_roc_completed(wl);
5284 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5285 struct ieee80211_vif *vif,
5286 struct ieee80211_sta *sta,
5289 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5290 struct wl1271 *wl = hw->priv;
5292 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5295 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5296 struct ieee80211_vif *vif,
5297 struct ieee80211_sta *sta,
5300 struct wl1271 *wl = hw->priv;
5301 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5304 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5306 mutex_lock(&wl->mutex);
5308 if (unlikely(wl->state != WLCORE_STATE_ON))
5311 ret = wl1271_ps_elp_wakeup(wl);
5315 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5320 wl1271_ps_elp_sleep(wl);
5323 mutex_unlock(&wl->mutex);
5328 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5330 struct wl1271 *wl = hw->priv;
5333 mutex_lock(&wl->mutex);
5335 if (unlikely(wl->state != WLCORE_STATE_ON))
5338 /* packets are considered pending if in the TX queue or the FW */
5339 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5341 mutex_unlock(&wl->mutex);
5346 /* can't be const, mac80211 writes to this */
5347 static struct ieee80211_rate wl1271_rates[] = {
5349 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5350 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5352 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5353 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5354 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5356 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5357 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5358 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5360 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5361 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5362 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5364 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5365 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5367 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5368 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5370 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5371 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5373 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5374 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5376 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5377 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5379 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5380 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5382 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5383 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5385 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5386 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5389 /* can't be const, mac80211 writes to this */
5390 static struct ieee80211_channel wl1271_channels[] = {
5391 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5392 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5393 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5394 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5395 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5396 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5397 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5398 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5399 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5400 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5401 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5402 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5403 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5404 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5407 /* can't be const, mac80211 writes to this */
5408 static struct ieee80211_supported_band wl1271_band_2ghz = {
5409 .channels = wl1271_channels,
5410 .n_channels = ARRAY_SIZE(wl1271_channels),
5411 .bitrates = wl1271_rates,
5412 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5415 /* 5 GHz data rates for WL1273 */
5416 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5418 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5419 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5421 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5422 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5424 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5425 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5427 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5428 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5430 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5431 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5433 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5434 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5436 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5437 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5439 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5440 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5443 /* 5 GHz band channels for WL1273 */
5444 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5445 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5446 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5447 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5448 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5449 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5450 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5451 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5452 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5453 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5454 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5455 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5456 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5457 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5458 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5459 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5460 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5461 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5462 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5463 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5464 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5465 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5466 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5467 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5468 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5469 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5478 static struct ieee80211_supported_band wl1271_band_5ghz = {
5479 .channels = wl1271_channels_5ghz,
5480 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5481 .bitrates = wl1271_rates_5ghz,
5482 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5485 static const struct ieee80211_ops wl1271_ops = {
5486 .start = wl1271_op_start,
5487 .stop = wlcore_op_stop,
5488 .add_interface = wl1271_op_add_interface,
5489 .remove_interface = wl1271_op_remove_interface,
5490 .change_interface = wl12xx_op_change_interface,
5492 .suspend = wl1271_op_suspend,
5493 .resume = wl1271_op_resume,
5495 .config = wl1271_op_config,
5496 .prepare_multicast = wl1271_op_prepare_multicast,
5497 .configure_filter = wl1271_op_configure_filter,
5499 .set_key = wlcore_op_set_key,
5500 .hw_scan = wl1271_op_hw_scan,
5501 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5502 .sched_scan_start = wl1271_op_sched_scan_start,
5503 .sched_scan_stop = wl1271_op_sched_scan_stop,
5504 .bss_info_changed = wl1271_op_bss_info_changed,
5505 .set_frag_threshold = wl1271_op_set_frag_threshold,
5506 .set_rts_threshold = wl1271_op_set_rts_threshold,
5507 .conf_tx = wl1271_op_conf_tx,
5508 .get_tsf = wl1271_op_get_tsf,
5509 .get_survey = wl1271_op_get_survey,
5510 .sta_state = wl12xx_op_sta_state,
5511 .ampdu_action = wl1271_op_ampdu_action,
5512 .tx_frames_pending = wl1271_tx_frames_pending,
5513 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5514 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5515 .channel_switch = wl12xx_op_channel_switch,
5516 .flush = wlcore_op_flush,
5517 .remain_on_channel = wlcore_op_remain_on_channel,
5518 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5519 .add_chanctx = wlcore_op_add_chanctx,
5520 .remove_chanctx = wlcore_op_remove_chanctx,
5521 .change_chanctx = wlcore_op_change_chanctx,
5522 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5523 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5524 .sta_rc_update = wlcore_op_sta_rc_update,
5525 .get_rssi = wlcore_op_get_rssi,
5526 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5530 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5536 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5537 wl1271_error("Illegal RX rate from HW: %d", rate);
5541 idx = wl->band_rate_to_idx[band][rate];
5542 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5543 wl1271_error("Unsupported RX rate from HW: %d", rate);
5550 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5554 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5557 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5558 wl1271_warning("NIC part of the MAC address wraps around!");
5560 for (i = 0; i < wl->num_mac_addr; i++) {
5561 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5562 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5563 wl->addresses[i].addr[2] = (u8) oui;
5564 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5565 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5566 wl->addresses[i].addr[5] = (u8) nic;
5570 /* we may be one address short at the most */
5571 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5574 * turn on the LAA bit in the first address and use it as
5577 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5578 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5579 memcpy(&wl->addresses[idx], &wl->addresses[0],
5580 sizeof(wl->addresses[0]));
5582 wl->addresses[idx].addr[2] |= BIT(1);
5585 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5586 wl->hw->wiphy->addresses = wl->addresses;
5589 static int wl12xx_get_hw_info(struct wl1271 *wl)
5593 ret = wl12xx_set_power_on(wl);
5597 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5601 wl->fuse_oui_addr = 0;
5602 wl->fuse_nic_addr = 0;
5604 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5608 if (wl->ops->get_mac)
5609 ret = wl->ops->get_mac(wl);
5612 wl1271_power_off(wl);
5616 static int wl1271_register_hw(struct wl1271 *wl)
5619 u32 oui_addr = 0, nic_addr = 0;
5621 if (wl->mac80211_registered)
5624 if (wl->nvs_len >= 12) {
5625 /* NOTE: The wl->nvs->nvs element must be first, in
5626 * order to simplify the casting, we assume it is at
5627 * the beginning of the wl->nvs structure.
5629 u8 *nvs_ptr = (u8 *)wl->nvs;
5632 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5634 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5637 /* if the MAC address is zeroed in the NVS derive from fuse */
5638 if (oui_addr == 0 && nic_addr == 0) {
5639 oui_addr = wl->fuse_oui_addr;
5640 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5641 nic_addr = wl->fuse_nic_addr + 1;
5644 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5646 ret = ieee80211_register_hw(wl->hw);
5648 wl1271_error("unable to register mac80211 hw: %d", ret);
5652 wl->mac80211_registered = true;
5654 wl1271_debugfs_init(wl);
5656 wl1271_notice("loaded");
5662 static void wl1271_unregister_hw(struct wl1271 *wl)
5665 wl1271_plt_stop(wl);
5667 ieee80211_unregister_hw(wl->hw);
5668 wl->mac80211_registered = false;
5672 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5675 .types = BIT(NL80211_IFTYPE_STATION),
5679 .types = BIT(NL80211_IFTYPE_AP) |
5680 BIT(NL80211_IFTYPE_P2P_GO) |
5681 BIT(NL80211_IFTYPE_P2P_CLIENT),
5685 static struct ieee80211_iface_combination
5686 wlcore_iface_combinations[] = {
5688 .max_interfaces = 3,
5689 .limits = wlcore_iface_limits,
5690 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5694 static int wl1271_init_ieee80211(struct wl1271 *wl)
5697 static const u32 cipher_suites[] = {
5698 WLAN_CIPHER_SUITE_WEP40,
5699 WLAN_CIPHER_SUITE_WEP104,
5700 WLAN_CIPHER_SUITE_TKIP,
5701 WLAN_CIPHER_SUITE_CCMP,
5702 WL1271_CIPHER_SUITE_GEM,
5705 /* The tx descriptor buffer */
5706 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5708 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5709 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5712 /* FIXME: find a proper value */
5713 wl->hw->channel_change_time = 10000;
5714 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5716 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5717 IEEE80211_HW_SUPPORTS_PS |
5718 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5719 IEEE80211_HW_SUPPORTS_UAPSD |
5720 IEEE80211_HW_HAS_RATE_CONTROL |
5721 IEEE80211_HW_CONNECTION_MONITOR |
5722 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5723 IEEE80211_HW_SPECTRUM_MGMT |
5724 IEEE80211_HW_AP_LINK_PS |
5725 IEEE80211_HW_AMPDU_AGGREGATION |
5726 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5727 IEEE80211_HW_QUEUE_CONTROL;
5729 wl->hw->wiphy->cipher_suites = cipher_suites;
5730 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5732 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5733 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5734 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5735 wl->hw->wiphy->max_scan_ssids = 1;
5736 wl->hw->wiphy->max_sched_scan_ssids = 16;
5737 wl->hw->wiphy->max_match_sets = 16;
5739 * Maximum length of elements in scanning probe request templates
5740 * should be the maximum length possible for a template, without
5741 * the IEEE80211 header of the template
5743 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5744 sizeof(struct ieee80211_header);
5746 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5747 sizeof(struct ieee80211_header);
5749 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5751 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5752 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5753 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5755 /* make sure all our channels fit in the scanned_ch bitmask */
5756 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5757 ARRAY_SIZE(wl1271_channels_5ghz) >
5758 WL1271_MAX_CHANNELS);
5760 * clear channel flags from the previous usage
5761 * and restore max_power & max_antenna_gain values.
5763 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5764 wl1271_band_2ghz.channels[i].flags = 0;
5765 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5766 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5769 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5770 wl1271_band_5ghz.channels[i].flags = 0;
5771 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5772 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5776 * We keep local copies of the band structs because we need to
5777 * modify them on a per-device basis.
5779 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5780 sizeof(wl1271_band_2ghz));
5781 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5782 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5783 sizeof(*wl->ht_cap));
5784 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5785 sizeof(wl1271_band_5ghz));
5786 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5787 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5788 sizeof(*wl->ht_cap));
5790 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5791 &wl->bands[IEEE80211_BAND_2GHZ];
5792 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5793 &wl->bands[IEEE80211_BAND_5GHZ];
5796 * allow 4 queues per mac address we support +
5797 * 1 cab queue per mac + one global offchannel Tx queue
5799 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5801 /* the last queue is the offchannel queue */
5802 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5803 wl->hw->max_rates = 1;
5805 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5807 /* the FW answers probe-requests in AP-mode */
5808 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5809 wl->hw->wiphy->probe_resp_offload =
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5812 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5814 /* allowed interface combinations */
5815 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5816 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5817 wl->hw->wiphy->n_iface_combinations =
5818 ARRAY_SIZE(wlcore_iface_combinations);
5820 SET_IEEE80211_DEV(wl->hw, wl->dev);
5822 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5823 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5825 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5830 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5833 struct ieee80211_hw *hw;
5838 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5840 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5842 wl1271_error("could not alloc ieee80211_hw");
5848 memset(wl, 0, sizeof(*wl));
5850 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5852 wl1271_error("could not alloc wl priv");
5854 goto err_priv_alloc;
5857 INIT_LIST_HEAD(&wl->wlvif_list);
5861 for (i = 0; i < NUM_TX_QUEUES; i++)
5862 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5863 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5865 skb_queue_head_init(&wl->deferred_rx_queue);
5866 skb_queue_head_init(&wl->deferred_tx_queue);
5868 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5869 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5870 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5871 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5872 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5873 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5874 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5876 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5877 if (!wl->freezable_wq) {
5884 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5885 wl->band = IEEE80211_BAND_2GHZ;
5886 wl->channel_type = NL80211_CHAN_NO_HT;
5888 wl->sg_enabled = true;
5889 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5890 wl->recovery_count = 0;
5893 wl->ap_fw_ps_map = 0;
5895 wl->platform_quirks = 0;
5896 wl->system_hlid = WL12XX_SYSTEM_HLID;
5897 wl->active_sta_count = 0;
5898 wl->active_link_count = 0;
5900 init_waitqueue_head(&wl->fwlog_waitq);
5902 /* The system link is always allocated */
5903 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5905 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5906 for (i = 0; i < wl->num_tx_desc; i++)
5907 wl->tx_frames[i] = NULL;
5909 spin_lock_init(&wl->wl_lock);
5911 wl->state = WLCORE_STATE_OFF;
5912 wl->fw_type = WL12XX_FW_TYPE_NONE;
5913 mutex_init(&wl->mutex);
5914 mutex_init(&wl->flush_mutex);
5915 init_completion(&wl->nvs_loading_complete);
5917 order = get_order(aggr_buf_size);
5918 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5919 if (!wl->aggr_buf) {
5923 wl->aggr_buf_size = aggr_buf_size;
5925 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5926 if (!wl->dummy_packet) {
5931 /* Allocate one page for the FW log */
5932 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5935 goto err_dummy_packet;
5938 wl->mbox_size = mbox_size;
5939 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5945 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5946 if (!wl->buffer_32) {
5957 free_page((unsigned long)wl->fwlog);
5960 dev_kfree_skb(wl->dummy_packet);
5963 free_pages((unsigned long)wl->aggr_buf, order);
5966 destroy_workqueue(wl->freezable_wq);
5969 wl1271_debugfs_exit(wl);
5973 ieee80211_free_hw(hw);
5977 return ERR_PTR(ret);
5979 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5981 int wlcore_free_hw(struct wl1271 *wl)
5983 /* Unblock any fwlog readers */
5984 mutex_lock(&wl->mutex);
5985 wl->fwlog_size = -1;
5986 wake_up_interruptible_all(&wl->fwlog_waitq);
5987 mutex_unlock(&wl->mutex);
5989 wlcore_sysfs_free(wl);
5991 kfree(wl->buffer_32);
5993 free_page((unsigned long)wl->fwlog);
5994 dev_kfree_skb(wl->dummy_packet);
5995 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5997 wl1271_debugfs_exit(wl);
6001 wl->fw_type = WL12XX_FW_TYPE_NONE;
6005 kfree(wl->fw_status_1);
6006 kfree(wl->tx_res_if);
6007 destroy_workqueue(wl->freezable_wq);
6010 ieee80211_free_hw(wl->hw);
6014 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6017 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6018 .flags = WIPHY_WOWLAN_ANY,
6019 .n_patterns = WL1271_MAX_RX_FILTERS,
6020 .pattern_min_len = 1,
6021 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6025 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6027 return IRQ_WAKE_THREAD;
6030 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6032 struct wl1271 *wl = context;
6033 struct platform_device *pdev = wl->pdev;
6034 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6035 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6036 unsigned long irqflags;
6038 irq_handler_t hardirq_fn = NULL;
6041 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6043 wl1271_error("Could not allocate nvs data");
6046 wl->nvs_len = fw->size;
6048 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6054 ret = wl->ops->setup(wl);
6058 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6060 /* adjust some runtime configuration parameters */
6061 wlcore_adjust_conf(wl);
6063 wl->irq = platform_get_irq(pdev, 0);
6064 wl->platform_quirks = pdata->platform_quirks;
6065 wl->if_ops = pdev_data->if_ops;
6067 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6068 irqflags = IRQF_TRIGGER_RISING;
6069 hardirq_fn = wlcore_hardirq;
6071 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6074 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6075 irqflags, pdev->name, wl);
6077 wl1271_error("request_irq() failed: %d", ret);
6082 ret = enable_irq_wake(wl->irq);
6084 wl->irq_wake_enabled = true;
6085 device_init_wakeup(wl->dev, 1);
6086 if (pdata->pwr_in_suspend)
6087 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6090 disable_irq(wl->irq);
6092 ret = wl12xx_get_hw_info(wl);
6094 wl1271_error("couldn't get hw info");
6098 ret = wl->ops->identify_chip(wl);
6102 ret = wl1271_init_ieee80211(wl);
6106 ret = wl1271_register_hw(wl);
6110 ret = wlcore_sysfs_init(wl);
6114 wl->initialized = true;
6118 wl1271_unregister_hw(wl);
6121 free_irq(wl->irq, wl);
6127 release_firmware(fw);
6128 complete_all(&wl->nvs_loading_complete);
6131 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6135 if (!wl->ops || !wl->ptable)
6138 wl->dev = &pdev->dev;
6140 platform_set_drvdata(pdev, wl);
6142 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6143 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6146 wl1271_error("request_firmware_nowait failed: %d", ret);
6147 complete_all(&wl->nvs_loading_complete);
6152 EXPORT_SYMBOL_GPL(wlcore_probe);
6154 int wlcore_remove(struct platform_device *pdev)
6156 struct wl1271 *wl = platform_get_drvdata(pdev);
6158 wait_for_completion(&wl->nvs_loading_complete);
6159 if (!wl->initialized)
6162 if (wl->irq_wake_enabled) {
6163 device_init_wakeup(wl->dev, 0);
6164 disable_irq_wake(wl->irq);
6166 wl1271_unregister_hw(wl);
6167 free_irq(wl->irq, wl);
6172 EXPORT_SYMBOL_GPL(wlcore_remove);
6174 u32 wl12xx_debug_level = DEBUG_NONE;
6175 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6176 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6177 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6179 module_param_named(fwlog, fwlog_param, charp, 0);
6180 MODULE_PARM_DESC(fwlog,
6181 "FW logger options: continuous, ondemand, dbgpins or disable");
6183 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6184 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6186 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6187 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6189 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6190 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6192 MODULE_LICENSE("GPL");
6193 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6194 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6195 MODULE_FIRMWARE(WL12XX_NVS_NAME);