]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorJohn W. Linville <linville@tuxdriver.com>
Mon, 25 Apr 2011 18:34:25 +0000 (14:34 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Mon, 25 Apr 2011 18:34:25 +0000 (14:34 -0400)
Conflicts:
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h

35 files changed:
1  2 
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/desc.c
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc_hst.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00config.c
drivers/net/wireless/rt2x00/rt2x00crypto.c
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt2x00usb.h
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/pci.c
include/linux/nl80211.h
include/net/mac80211.h
net/bluetooth/l2cap_sock.c
net/mac80211/ieee80211_i.h
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h

index 349a5963931b21a60d3f361878cd18022e425b52,7583841fc29a83bf4d6f547582f30bd4fe878c0b..203243bacc8919b264b3d05fa12cf33dfeaa9570
@@@ -1443,6 -1443,21 +1443,21 @@@ ath5k_receive_frame_ok(struct ath5k_sof
        return true;
  }
  
+ static void
+ ath5k_set_current_imask(struct ath5k_softc *sc)
+ {
+       enum ath5k_int imask = sc->imask;
+       unsigned long flags;
+       spin_lock_irqsave(&sc->irqlock, flags);
+       if (sc->rx_pending)
+               imask &= ~AR5K_INT_RX_ALL;
+       if (sc->tx_pending)
+               imask &= ~AR5K_INT_TX_ALL;
+       ath5k_hw_set_imr(sc->ah, imask);
+       spin_unlock_irqrestore(&sc->irqlock, flags);
+ }
  static void
  ath5k_tasklet_rx(unsigned long data)
  {
@@@ -1506,6 -1521,8 +1521,8 @@@ next
        } while (ath5k_rxbuf_setup(sc, bf) == 0);
  unlock:
        spin_unlock(&sc->rxbuflock);
+       sc->rx_pending = false;
+       ath5k_set_current_imask(sc);
  }
  
  
@@@ -1573,28 -1590,28 +1590,28 @@@ ath5k_tx_frame_completed(struct ath5k_s
                         struct ath5k_txq *txq, struct ath5k_tx_status *ts)
  {
        struct ieee80211_tx_info *info;
+       u8 tries[3];
        int i;
  
        sc->stats.tx_all_count++;
        sc->stats.tx_bytes_count += skb->len;
        info = IEEE80211_SKB_CB(skb);
  
+       tries[0] = info->status.rates[0].count;
+       tries[1] = info->status.rates[1].count;
+       tries[2] = info->status.rates[2].count;
        ieee80211_tx_info_clear_status(info);
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < ts->ts_final_idx; i++) {
                struct ieee80211_tx_rate *r =
                        &info->status.rates[i];
  
-               if (ts->ts_rate[i]) {
-                       r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
-                       r->count = ts->ts_retry[i];
-               } else {
-                       r->idx = -1;
-                       r->count = 0;
-               }
+               r->count = tries[i];
        }
  
-       /* count the successful attempt as well */
-       info->status.rates[ts->ts_final_idx].count++;
+       info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
+       info->status.rates[ts->ts_final_idx + 1].idx = -1;
  
        if (unlikely(ts->ts_status)) {
                sc->stats.ack_fail++;
        } else {
                info->flags |= IEEE80211_TX_STAT_ACK;
                info->status.ack_signal = ts->ts_rssi;
+               /* count the successful attempt as well */
+               info->status.rates[ts->ts_final_idx].count++;
        }
  
        /*
@@@ -1690,6 -1710,9 +1710,9 @@@ ath5k_tasklet_tx(unsigned long data
        for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
                if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
                        ath5k_tx_processq(sc, &sc->txqs[i]);
+       sc->tx_pending = false;
+       ath5k_set_current_imask(sc);
  }
  
  
@@@ -1953,7 -1976,7 +1976,7 @@@ ath5k_beacon_update_timers(struct ath5k
  
  #define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
        /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
 -       * Since we later substract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
 +       * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
         * configuration we need to make sure it is bigger than that. */
  
        if (bc_tsf == -1) {
                intval |= AR5K_BEACON_RESET_TSF;
        } else if (bc_tsf > hw_tsf) {
                /*
 -               * beacon received, SW merge happend but HW TSF not yet updated.
 +               * beacon received, SW merge happened but HW TSF not yet updated.
                 * not possible to reconfigure timers yet, but next time we
                 * receive a beacon with the same BSSID, the hardware will
                 * automatically update the TSF and then we need to reconfigure
@@@ -2119,6 -2142,20 +2142,20 @@@ ath5k_intr_calibration_poll(struct ath5
         * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
  }
  
+ static void
+ ath5k_schedule_rx(struct ath5k_softc *sc)
+ {
+       sc->rx_pending = true;
+       tasklet_schedule(&sc->rxtq);
+ }
+ static void
+ ath5k_schedule_tx(struct ath5k_softc *sc)
+ {
+       sc->tx_pending = true;
+       tasklet_schedule(&sc->txtq);
+ }
  irqreturn_t
  ath5k_intr(int irq, void *dev_id)
  {
                                ieee80211_queue_work(sc->hw, &sc->reset_work);
                        }
                        else
-                               tasklet_schedule(&sc->rxtq);
+                               ath5k_schedule_rx(sc);
                } else {
                        if (status & AR5K_INT_SWBA) {
                                tasklet_hi_schedule(&sc->beacontq);
                                ath5k_hw_update_tx_triglevel(ah, true);
                        }
                        if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
-                               tasklet_schedule(&sc->rxtq);
+                               ath5k_schedule_rx(sc);
                        if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
                                        | AR5K_INT_TXERR | AR5K_INT_TXEOL))
-                               tasklet_schedule(&sc->txtq);
+                               ath5k_schedule_tx(sc);
                        if (status & AR5K_INT_BMISS) {
                                /* TODO */
                        }
  
        } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
  
+       if (sc->rx_pending || sc->tx_pending)
+               ath5k_set_current_imask(sc);
        if (unlikely(!counter))
                ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
  
@@@ -2572,6 -2612,8 +2612,8 @@@ done
  
  static void stop_tasklets(struct ath5k_softc *sc)
  {
+       sc->rx_pending = false;
+       sc->tx_pending = false;
        tasklet_kill(&sc->rxtq);
        tasklet_kill(&sc->txtq);
        tasklet_kill(&sc->calib);
@@@ -2651,7 -2693,7 +2693,7 @@@ ath5k_reset(struct ath5k_softc *sc, str
        synchronize_irq(sc->irq);
        stop_tasklets(sc);
  
 -      /* Save ani mode and disable ANI durring
 +      /* Save ani mode and disable ANI during
         * reset. If we don't we might get false
         * PHY error interrupts. */
        ani_mode = ah->ah_sc->ani_state.ani_mode;
@@@ -2838,7 -2880,7 +2880,7 @@@ ath5k_init(struct ieee80211_hw *hw
        INIT_WORK(&sc->reset_work, ath5k_reset_work);
        INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
  
-       ret = ath5k_eeprom_read_mac(ah, mac);
+       ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
        if (ret) {
                ATH5K_ERR(sc, "unable to read address from EEPROM\n");
                goto err_queues;
@@@ -2898,7 -2940,6 +2940,6 @@@ ath5k_deinit_softc(struct ath5k_softc *
         * XXX: ??? detach ath5k_hw ???
         * Other than that, it's straightforward...
         */
-       ath5k_debug_finish_device(sc);
        ieee80211_unregister_hw(hw);
        ath5k_desc_free(sc);
        ath5k_txq_release(sc);
index a8fcc94269f72e0e5bfdcbf9500b17b53123cb2f,dd7cd95c364aa97fa2b12029660a62445073c50a..62172d5857239549ba6fa233df721096773fd707
@@@ -51,7 -51,7 +51,7 @@@ ath5k_hw_setup_2word_tx_desc(struct ath
        /*
         * Validate input
         * - Zero retries don't make sense.
 -       * - A zero rate will put the HW into a mode where it continously sends
 +       * - A zero rate will put the HW into a mode where it continuously sends
         *   noise on the channel, so it is important to avoid this.
         */
        if (unlikely(tx_tries0 == 0)) {
@@@ -185,12 -185,18 +185,18 @@@ static int ath5k_hw_setup_4word_tx_desc
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        unsigned int frame_len;
  
+       /*
+        * Use local variables for these to reduce load/store access on
+        * uncached memory
+        */
+       u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
        tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
  
        /*
         * Validate input
         * - Zero retries don't make sense.
 -       * - A zero rate will put the HW into a mode where it continously sends
 +       * - A zero rate will put the HW into a mode where it continuously sends
         *   noise on the channel, so it is important to avoid this.
         */
        if (unlikely(tx_tries0 == 0)) {
        if (tx_power > AR5K_TUNE_MAX_TXPOWER)
                tx_power = AR5K_TUNE_MAX_TXPOWER;
  
-       /* Clear descriptor */
-       memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
+       /* Clear descriptor status area */
+       memset(&desc->ud.ds_tx5212.tx_stat, 0,
+              sizeof(desc->ud.ds_tx5212.tx_stat));
  
        /* Setup control descriptor */
  
        if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
                return -EINVAL;
  
-       tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
+       txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
  
        /* Verify and set buffer length */
  
        if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
                return -EINVAL;
  
-       tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
+       txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
  
-       tx_ctl->tx_control_0 |=
-               AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
-               AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
-       tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
-                                       AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
-       tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
-                                       AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
-       tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
+       txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
+                 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
+       txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
+       txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
+       txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
  
  #define _TX_FLAGS(_c, _flag)                                  \
        if (flags & AR5K_TXDESC_##_flag) {                      \
-               tx_ctl->tx_control_##_c |=                      \
-                       AR5K_4W_TX_DESC_CTL##_c##_##_flag;      \
+               txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
        }
  
        _TX_FLAGS(0, CLRDMASK);
         * WEP crap
         */
        if (key_index != AR5K_TXKEYIX_INVALID) {
-               tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
-               tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
+               txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
+               txctl1 |= AR5K_REG_SM(key_index,
                                AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
        }
  
                if ((flags & AR5K_TXDESC_RTSENA) &&
                                (flags & AR5K_TXDESC_CTSENA))
                        return -EINVAL;
-               tx_ctl->tx_control_2 |= rtscts_duration &
-                               AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
-               tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
+               txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
+               txctl3 |= AR5K_REG_SM(rtscts_rate,
                                AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
        }
  
+       tx_ctl->tx_control_0 = txctl0;
+       tx_ctl->tx_control_1 = txctl1;
+       tx_ctl->tx_control_2 = txctl2;
+       tx_ctl->tx_control_3 = txctl3;
        return 0;
  }
  
@@@ -300,7 -307,7 +307,7 @@@ ath5k_hw_setup_mrr_tx_desc(struct ath5k
        /*
         * Rates can be 0 as long as the retry count is 0 too.
         * A zero rate and nonzero retry count will put the HW into a mode where
 -       * it continously sends noise on the channel, so it is important to
 +       * it continuously sends noise on the channel, so it is important to
         * avoid this.
         */
        if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
  \***********************/
  
  /*
 - * Proccess the tx status descriptor on 5210/5211
 + * Process the tx status descriptor on 5210/5211
   */
  static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
                struct ath5k_desc *desc, struct ath5k_tx_status *ts)
                AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
        ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
                AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
-       ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
+       ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
                AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
        /*TODO: ts->ts_virtcol + test*/
        ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
                AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
        ts->ts_antenna = 1;
        ts->ts_status = 0;
-       ts->ts_rate[0] = AR5K_REG_MS(tx_ctl->tx_control_0,
-               AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
-       ts->ts_retry[0] = ts->ts_longretry;
        ts->ts_final_idx = 0;
  
        if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
  }
  
  /*
 - * Proccess a tx status descriptor on 5212
 + * Process a tx status descriptor on 5212
   */
  static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
                struct ath5k_desc *desc, struct ath5k_tx_status *ts)
  {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        struct ath5k_hw_tx_status *tx_status;
+       u32 txstat0, txstat1;
  
        tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
        tx_status = &desc->ud.ds_tx5212.tx_stat;
  
+       txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
        /* No frame has been send or error */
-       if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE)))
+       if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
                return -EINPROGRESS;
  
+       txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
        /*
         * Get descriptor status
         */
-       ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
+       ts->ts_tstamp = AR5K_REG_MS(txstat0,
                AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
-       ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
+       ts->ts_shortretry = AR5K_REG_MS(txstat0,
                AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
-       ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
+       ts->ts_final_retry = AR5K_REG_MS(txstat0,
                AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
-       ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
+       ts->ts_seqnum = AR5K_REG_MS(txstat1,
                AR5K_DESC_TX_STATUS1_SEQ_NUM);
-       ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
+       ts->ts_rssi = AR5K_REG_MS(txstat1,
                AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
-       ts->ts_antenna = (tx_status->tx_status_1 &
+       ts->ts_antenna = (txstat1 &
                AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
        ts->ts_status = 0;
  
-       ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1,
+       ts->ts_final_idx = AR5K_REG_MS(txstat1,
                        AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
  
-       /* The longretry counter has the number of un-acked retries
-        * for the final rate. To get the total number of retries
-        * we have to add the retry counters for the other rates
-        * as well
-        */
-       ts->ts_retry[ts->ts_final_idx] = ts->ts_longretry;
-       switch (ts->ts_final_idx) {
-       case 3:
-               ts->ts_rate[3] = AR5K_REG_MS(tx_ctl->tx_control_3,
-                       AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
-               ts->ts_retry[2] = AR5K_REG_MS(tx_ctl->tx_control_2,
-                       AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
-               ts->ts_longretry += ts->ts_retry[2];
-               /* fall through */
-       case 2:
-               ts->ts_rate[2] = AR5K_REG_MS(tx_ctl->tx_control_3,
-                       AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
-               ts->ts_retry[1] = AR5K_REG_MS(tx_ctl->tx_control_2,
-                       AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
-               ts->ts_longretry += ts->ts_retry[1];
-               /* fall through */
-       case 1:
-               ts->ts_rate[1] = AR5K_REG_MS(tx_ctl->tx_control_3,
-                       AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
-               ts->ts_retry[0] = AR5K_REG_MS(tx_ctl->tx_control_2,
-                       AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
-               ts->ts_longretry += ts->ts_retry[0];
-               /* fall through */
-       case 0:
-               ts->ts_rate[0] = tx_ctl->tx_control_3 &
-                       AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
-               break;
-       }
        /* TX error */
-       if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
-               if (tx_status->tx_status_0 &
-                               AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
+       if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
+               if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
                        ts->ts_status |= AR5K_TXERR_XRETRY;
  
-               if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
+               if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
                        ts->ts_status |= AR5K_TXERR_FIFO;
  
-               if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
+               if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
                        ts->ts_status |= AR5K_TXERR_FILT;
        }
  
@@@ -519,7 -490,7 +490,7 @@@ int ath5k_hw_setup_rx_desc(struct ath5k
  }
  
  /*
 - * Proccess the rx status descriptor on 5210/5211
 + * Process the rx status descriptor on 5210/5211
   */
  static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
                struct ath5k_desc *desc, struct ath5k_rx_status *rs)
  }
  
  /*
 - * Proccess the rx status descriptor on 5212
 + * Process the rx status descriptor on 5212
   */
  static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
                                        struct ath5k_desc *desc,
                                        struct ath5k_rx_status *rs)
  {
        struct ath5k_hw_rx_status *rx_status;
+       u32 rxstat0, rxstat1;
  
        rx_status = &desc->ud.ds_rx.rx_stat;
+       rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
  
        /* No frame received / not ready */
-       if (unlikely(!(rx_status->rx_status_1 &
-                               AR5K_5212_RX_DESC_STATUS1_DONE)))
+       if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
                return -EINPROGRESS;
  
        memset(rs, 0, sizeof(struct ath5k_rx_status));
+       rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
  
        /*
         * Frame receive status
         */
-       rs->rs_datalen = rx_status->rx_status_0 &
-               AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
-       rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
+       rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
+       rs->rs_rssi = AR5K_REG_MS(rxstat0,
                AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
-       rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
+       rs->rs_rate = AR5K_REG_MS(rxstat0,
                AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
-       rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+       rs->rs_antenna = AR5K_REG_MS(rxstat0,
                AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
-       rs->rs_more = !!(rx_status->rx_status_0 &
-               AR5K_5212_RX_DESC_STATUS0_MORE);
-       rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
+       rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
+       rs->rs_tstamp = AR5K_REG_MS(rxstat1,
                AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
  
        /*
         * Key table status
         */
-       if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
-               rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
+       if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
+               rs->rs_keyix = AR5K_REG_MS(rxstat1,
                                           AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
        else
                rs->rs_keyix = AR5K_RXKEYIX_INVALID;
        /*
         * Receive/descriptor errors
         */
-       if (!(rx_status->rx_status_1 &
-           AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
-               if (rx_status->rx_status_1 &
-                               AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
+       if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+               if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
                        rs->rs_status |= AR5K_RXERR_CRC;
  
-               if (rx_status->rx_status_1 &
-                               AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
+               if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
                        rs->rs_status |= AR5K_RXERR_PHY;
-                       rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
+                       rs->rs_phyerr = AR5K_REG_MS(rxstat1,
                                AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
                        if (!ah->ah_capabilities.cap_has_phyerr_counters)
                                ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
                }
  
-               if (rx_status->rx_status_1 &
-                               AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
+               if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
                        rs->rs_status |= AR5K_RXERR_DECRYPT;
  
-               if (rx_status->rx_status_1 &
-                               AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
+               if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
                        rs->rs_status |= AR5K_RXERR_MIC;
        }
        return 0;
index efb672cb31e49521c95b2207f8f49278db142a4f,e9263e4c7f3eb8cf25244cd856bdc92e403ce976..1fef84f87c78f5196e57ff5c9a3ad78ca7fed50a
@@@ -660,6 -660,53 +660,53 @@@ ath5k_get_pcdac_intercepts(struct ath5k
                vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
  }
  
+ static int
+ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
+ {
+       struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+       struct ath5k_chan_pcal_info *chinfo;
+       u8 pier, pdg;
+       switch (mode) {
+       case AR5K_EEPROM_MODE_11A:
+               if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
+                       return 0;
+               chinfo = ee->ee_pwr_cal_a;
+               break;
+       case AR5K_EEPROM_MODE_11B:
+               if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
+                       return 0;
+               chinfo = ee->ee_pwr_cal_b;
+               break;
+       case AR5K_EEPROM_MODE_11G:
+               if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
+                       return 0;
+               chinfo = ee->ee_pwr_cal_g;
+               break;
+       default:
+               return -EINVAL;
+       }
+       for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+               if (!chinfo[pier].pd_curves)
+                       continue;
+               for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+                       struct ath5k_pdgain_info *pd =
+                                       &chinfo[pier].pd_curves[pdg];
+                       if (pd != NULL) {
+                               kfree(pd->pd_step);
+                               kfree(pd->pd_pwr);
+                       }
+               }
+               kfree(chinfo[pier].pd_curves);
+       }
+       return 0;
+ }
  /* Convert RF5111 specific data to generic raw data
   * used by interpolation code */
  static int
@@@ -684,7 -731,7 +731,7 @@@ ath5k_eeprom_convert_pcal_info_5111(str
                                GFP_KERNEL);
  
                if (!chinfo[pier].pd_curves)
-                       return -ENOMEM;
+                       goto err_out;
  
                /* Only one curve for RF5111
                 * find out which one and place
                pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
                                        sizeof(u8), GFP_KERNEL);
                if (!pd->pd_step)
-                       return -ENOMEM;
+                       goto err_out;
  
                pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
                                        sizeof(s16), GFP_KERNEL);
                if (!pd->pd_pwr)
-                       return -ENOMEM;
+                       goto err_out;
  
                /* Fill raw dataset
                 * (convert power to 0.25dB units
        }
  
        return 0;
+ err_out:
+       ath5k_eeprom_free_pcal_info(ah, mode);
+       return -ENOMEM;
  }
  
  /* Parse EEPROM data */
@@@ -867,7 -918,7 +918,7 @@@ ath5k_eeprom_convert_pcal_info_5112(str
                                        GFP_KERNEL);
  
                if (!chinfo[pier].pd_curves)
-                       return -ENOMEM;
+                       goto err_out;
  
                /* Fill pd_curves */
                for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
                                                sizeof(u8), GFP_KERNEL);
  
                                if (!pd->pd_step)
-                                       return -ENOMEM;
+                                       goto err_out;
  
                                pd->pd_pwr = kcalloc(pd->pd_points,
                                                sizeof(s16), GFP_KERNEL);
  
                                if (!pd->pd_pwr)
-                                       return -ENOMEM;
+                                       goto err_out;
  
                                /* Fill raw dataset
                                 * (all power levels are in 0.25dB units) */
                                                sizeof(u8), GFP_KERNEL);
  
                                if (!pd->pd_step)
-                                       return -ENOMEM;
+                                       goto err_out;
  
                                pd->pd_pwr = kcalloc(pd->pd_points,
                                                sizeof(s16), GFP_KERNEL);
  
                                if (!pd->pd_pwr)
-                                       return -ENOMEM;
+                                       goto err_out;
  
                                /* Fill raw dataset
                                 * (all power levels are in 0.25dB units) */
        }
  
        return 0;
+ err_out:
+       ath5k_eeprom_free_pcal_info(ah, mode);
+       return -ENOMEM;
  }
  
  /* Parse EEPROM data */
@@@ -1080,7 -1134,7 +1134,7 @@@ ath5k_eeprom_read_pcal_info_5112(struc
   *
   * To recreate the curves we read here the points and interpolate
   * later. Note that in most cases only 2 (higher and lower) curves are
 - * used (like RF5112) but vendors have the oportunity to include all
 + * used (like RF5112) but vendors have the opportunity to include all
   * 4 curves on eeprom. The final curve (higher power) has an extra
   * point for better accuracy like RF5112.
   */
@@@ -1156,7 -1210,7 +1210,7 @@@ ath5k_eeprom_convert_pcal_info_2413(str
                                        GFP_KERNEL);
  
                if (!chinfo[pier].pd_curves)
-                       return -ENOMEM;
+                       goto err_out;
  
                /* Fill pd_curves */
                for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
                                        sizeof(u8), GFP_KERNEL);
  
                        if (!pd->pd_step)
-                               return -ENOMEM;
+                               goto err_out;
  
                        pd->pd_pwr = kcalloc(pd->pd_points,
                                        sizeof(s16), GFP_KERNEL);
  
                        if (!pd->pd_pwr)
-                               return -ENOMEM;
+                               goto err_out;
  
                        /* Fill raw dataset
                         * convert all pwr levels to
        }
  
        return 0;
+ err_out:
+       ath5k_eeprom_free_pcal_info(ah, mode);
+       return -ENOMEM;
  }
  
  /* Parse EEPROM data */
@@@ -1302,7 -1360,7 +1360,7 @@@ ath5k_eeprom_read_pcal_info_2413(struc
                        /*
                         * Pd gain 0 is not the last pd gain
                         * so it only has 2 pd points.
 -                       * Continue wih pd gain 1.
 +                       * Continue with pd gain 1.
                         */
                        pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
  
@@@ -1534,53 -1592,6 +1592,6 @@@ ath5k_eeprom_read_pcal_info(struct ath5
        return 0;
  }
  
- static int
- ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
- {
-       struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-       struct ath5k_chan_pcal_info *chinfo;
-       u8 pier, pdg;
-       switch (mode) {
-       case AR5K_EEPROM_MODE_11A:
-               if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
-                       return 0;
-               chinfo = ee->ee_pwr_cal_a;
-               break;
-       case AR5K_EEPROM_MODE_11B:
-               if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
-                       return 0;
-               chinfo = ee->ee_pwr_cal_b;
-               break;
-       case AR5K_EEPROM_MODE_11G:
-               if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
-                       return 0;
-               chinfo = ee->ee_pwr_cal_g;
-               break;
-       default:
-               return -EINVAL;
-       }
-       for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
-               if (!chinfo[pier].pd_curves)
-                       continue;
-               for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
-                       struct ath5k_pdgain_info *pd =
-                                       &chinfo[pier].pd_curves[pdg];
-                       if (pd != NULL) {
-                               kfree(pd->pd_step);
-                               kfree(pd->pd_pwr);
-                       }
-               }
-               kfree(chinfo[pier].pd_curves);
-       }
-       return 0;
- }
  /* Read conformance test limits used for regulatory control */
  static int
  ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@@ -1721,35 -1732,6 +1732,6 @@@ ath5k_eeprom_read_spur_chans(struct ath
        return ret;
  }
  
- /*
-  * Read the MAC address from eeprom
-  */
- int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
- {
-       u8 mac_d[ETH_ALEN] = {};
-       u32 total, offset;
-       u16 data;
-       int octet;
-       AR5K_EEPROM_READ(0x20, data);
-       for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
-               AR5K_EEPROM_READ(offset, data);
-               total += data;
-               mac_d[octet + 1] = data & 0xff;
-               mac_d[octet] = data >> 8;
-               octet += 2;
-       }
-       if (!total || total == 3 * 0xffff)
-               return -EINVAL;
-       memcpy(mac, mac_d, ETH_ALEN);
-       return 0;
- }
  
  /***********************\
  * Init/Detach functions *
index 3c44689a700b7b73030860e35ee941a4f66b6678,5cc4a2fe47b6ba33cf113d51636b92703f1af212..296c316a83412eaa64bd4c6bd47b264dc4910822
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/nl80211.h>
  #include <linux/pci.h>
  #include <linux/pci-aspm.h>
+ #include <linux/etherdevice.h>
  #include "../ath.h"
  #include "ath5k.h"
  #include "debug.h"
@@@ -57,7 -58,7 +58,7 @@@ static void ath5k_pci_read_cachesize(st
        *csz = (int)u8tmp;
  
        /*
 -       * This check was put in to avoid "unplesant" consequences if
 +       * This check was put in to avoid "unpleasant" consequences if
         * the bootrom has not fully initialized all PCI devices.
         * Sometimes the cache line size register is not set
         */
@@@ -108,11 -109,42 +109,42 @@@ int ath5k_hw_read_srev(struct ath5k_hw 
        return 0;
  }
  
+ /*
+  * Read the MAC address from eeprom or platform_data
+  */
+ static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+ {
+       u8 mac_d[ETH_ALEN] = {};
+       u32 total, offset;
+       u16 data;
+       int octet;
+       AR5K_EEPROM_READ(0x20, data);
+       for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
+               AR5K_EEPROM_READ(offset, data);
+               total += data;
+               mac_d[octet + 1] = data & 0xff;
+               mac_d[octet] = data >> 8;
+               octet += 2;
+       }
+       if (!total || total == 3 * 0xffff)
+               return -EINVAL;
+       memcpy(mac, mac_d, ETH_ALEN);
+       return 0;
+ }
  /* Common ath_bus_opts structure */
  static const struct ath_bus_ops ath_pci_bus_ops = {
        .ath_bus_type = ATH_PCI,
        .read_cachesize = ath5k_pci_read_cachesize,
        .eeprom_read = ath5k_pci_eeprom_read,
+       .eeprom_read_mac = ath5k_pci_eeprom_read_mac,
  };
  
  /********************\
index d9b3f828455aeaf3fd19907aed1cd9030af6f54d,71b60b7c617eaaaa32ed0032befa035191f4e7c5..712a9ac4000e47d313ad9b29296bb5c940cc98ff
@@@ -75,7 -75,7 +75,7 @@@ static const unsigned int ack_rates_hig
   * bwmodes.
   */
  int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
-               int len, struct ieee80211_rate *rate)
+               int len, struct ieee80211_rate *rate, bool shortpre)
  {
        struct ath5k_softc *sc = ah->ah_sc;
        int sifs, preamble, plcp_bits, sym_time;
  
        /* Fallback */
        if (!ah->ah_bwmode) {
-               dur = ieee80211_generic_frame_duration(sc->hw,
-                                               NULL, len, rate);
-               return le16_to_cpu(dur);
+               __le16 raw_dur = ieee80211_generic_frame_duration(sc->hw,
+                                       NULL, len, rate);
+               /* subtract difference between long and short preamble */
+               dur = le16_to_cpu(raw_dur);
+               if (shortpre)
+                       dur -= 96;
+               return dur;
        }
  
        bitrate = rate->bitrate;
@@@ -145,9 -151,9 +151,9 @@@ unsigned int ath5k_hw_get_default_slott
                slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
                break;
        case AR5K_BWMODE_DEFAULT:
-               slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
        default:
-               if (channel->hw_value & CHANNEL_CCK)
+               slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
+               if ((channel->hw_value & CHANNEL_CCK) && !ah->ah_short_slot)
                        slot_time = AR5K_INIT_SLOT_TIME_B;
                break;
        }
@@@ -263,27 -269,14 +269,14 @@@ static inline void ath5k_hw_write_rate_
                 * actual rate for this rate. See mac80211 tx.c
                 * ieee80211_duration() for a brief description of
                 * what rate we should choose to TX ACKs. */
-               tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
+               tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
  
                ath5k_hw_reg_write(ah, tx_time, reg);
  
                if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
                        continue;
  
-               /*
-                * We're not distinguishing short preamble here,
-                * This is true, all we'll get is a longer value here
-                * which is not necessarilly bad. We could use
-                * export ieee80211_frame_duration() but that needs to be
-                * fixed first to be properly used by mac802111 drivers:
-                *
-                *  - remove erp stuff and let the routine figure ofdm
-                *    erp rates
-                *  - remove passing argument ieee80211_local as
-                *    drivers don't have access to it
-                *  - move drivers using ieee80211_generic_frame_duration()
-                *    to this
-                */
+               tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true);
                ath5k_hw_reg_write(ah, tx_time,
                        reg + (AR5K_SET_SHORT_PREAMBLE << 2));
        }
@@@ -472,7 -465,7 +465,7 @@@ void ath5k_hw_set_rx_filter(struct ath5
        }
  
        /*
 -       * The AR5210 uses promiscous mode to detect radar activity
 +       * The AR5210 uses promiscuous mode to detect radar activity
         */
        if (ah->ah_version == AR5K_AR5210 &&
                        (filter & AR5K_RX_FILTER_RADARERR)) {
@@@ -706,8 -699,8 +699,8 @@@ ath5k_check_timer_win(int a, int b, in
   * The need for this function arises from the fact that we have 4 separate
   * HW timer registers (TIMER0 - TIMER3), which are closely related to the
   * next beacon target time (NBTT), and that the HW updates these timers
 - * seperately based on the current TSF value. The hardware increments each
 - * timer by the beacon interval, when the local TSF coverted to TU is equal
 + * separately based on the current TSF value. The hardware increments each
 + * timer by the beacon interval, when the local TSF converted to TU is equal
   * to the value stored in the timer.
   *
   * The reception of a beacon with the same BSSID can update the local HW TSF
index 2d10239ce8295e2633699c6680a9f1b7072703e1,48bcc1a21076b03c4fb2899e859a4a7f4de0aa8d..2e3a33a53406a85d08c4266d90e33462a1a8208b
  #include "htc.h"
  
  /* identify firmware images */
- #define FIRMWARE_AR7010               "ar7010.fw"
- #define FIRMWARE_AR7010_1_1   "ar7010_1_1.fw"
- #define FIRMWARE_AR9271               "ar9271.fw"
+ #define FIRMWARE_AR7010_1_1     "htc_7010.fw"
+ #define FIRMWARE_AR9271         "htc_9271.fw"
  
- MODULE_FIRMWARE(FIRMWARE_AR7010);
  MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
  MODULE_FIRMWARE(FIRMWARE_AR9271);
  
@@@ -80,7 -78,7 +78,7 @@@ static void hif_usb_regout_cb(struct ur
  
        if (cmd) {
                ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
-                                         cmd->skb, 1);
+                                         cmd->skb, true);
                kfree(cmd);
        }
  
@@@ -126,6 -124,90 +124,90 @@@ static int hif_usb_send_regout(struct h
        return ret;
  }
  
+ static void hif_usb_mgmt_cb(struct urb *urb)
+ {
+       struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+       struct hif_device_usb *hif_dev = cmd->hif_dev;
+       bool txok = true;
+       if (!cmd || !cmd->skb || !cmd->hif_dev)
+               return;
+       switch (urb->status) {
+       case 0:
+               break;
+       case -ENOENT:
+       case -ECONNRESET:
+       case -ENODEV:
+       case -ESHUTDOWN:
+               txok = false;
+               /*
+                * If the URBs are being flushed, no need to complete
+                * this packet.
+                */
+               spin_lock(&hif_dev->tx.tx_lock);
+               if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
+                       spin_unlock(&hif_dev->tx.tx_lock);
+                       dev_kfree_skb_any(cmd->skb);
+                       kfree(cmd);
+                       return;
+               }
+               spin_unlock(&hif_dev->tx.tx_lock);
+               break;
+       default:
+               txok = false;
+               break;
+       }
+       skb_pull(cmd->skb, 4);
+       ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+                                 cmd->skb, txok);
+       kfree(cmd);
+ }
+ static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
+                            struct sk_buff *skb)
+ {
+       struct urb *urb;
+       struct cmd_buf *cmd;
+       int ret = 0;
+       __le16 *hdr;
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (urb == NULL)
+               return -ENOMEM;
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (cmd == NULL) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+       cmd->skb = skb;
+       cmd->hif_dev = hif_dev;
+       hdr = (__le16 *) skb_push(skb, 4);
+       *hdr++ = cpu_to_le16(skb->len - 4);
+       *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
+       usb_fill_bulk_urb(urb, hif_dev->udev,
+                        usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+                        skb->data, skb->len,
+                        hif_usb_mgmt_cb, cmd);
+       usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
+       ret = usb_submit_urb(urb, GFP_ATOMIC);
+       if (ret) {
+               usb_unanchor_urb(urb);
+               kfree(cmd);
+       }
+       usb_free_urb(urb);
+       return ret;
+ }
  static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
                                         struct sk_buff_head *list)
  {
  
        while ((skb = __skb_dequeue(list)) != NULL) {
                dev_kfree_skb_any(skb);
-               TX_STAT_INC(skb_dropped);
+       }
+ }
+ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
+                                           struct sk_buff_head *queue,
+                                           bool txok)
+ {
+       struct sk_buff *skb;
+       while ((skb = __skb_dequeue(queue)) != NULL) {
+               ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+                                         skb, txok);
+               if (txok)
+                       TX_STAT_INC(skb_success);
+               else
+                       TX_STAT_INC(skb_failed);
        }
  }
  
@@@ -141,7 -238,7 +238,7 @@@ static void hif_usb_tx_cb(struct urb *u
  {
        struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
        struct hif_device_usb *hif_dev;
-       struct sk_buff *skb;
+       bool txok = true;
  
        if (!tx_buf || !tx_buf->hif_dev)
                return;
        case -ECONNRESET:
        case -ENODEV:
        case -ESHUTDOWN:
-               /*
-                * The URB has been killed, free the SKBs.
-                */
-               ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+               txok = false;
  
                /*
                 * If the URBs are being flushed, no need to add this
                spin_lock(&hif_dev->tx.tx_lock);
                if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
                        spin_unlock(&hif_dev->tx.tx_lock);
+                       ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
                        return;
                }
                spin_unlock(&hif_dev->tx.tx_lock);
  
-               /*
-                * In the stop() case, this URB has to be added to
-                * the free list.
-                */
-               goto add_free;
+               break;
        default:
+               txok = false;
                break;
        }
  
-       /*
-        * Check if TX has been stopped, this is needed because
-        * this CB could have been invoked just after the TX lock
-        * was released in hif_stop() and kill_urb() hasn't been
-        * called yet.
-        */
-       spin_lock(&hif_dev->tx.tx_lock);
-       if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
-               spin_unlock(&hif_dev->tx.tx_lock);
-               ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
-               goto add_free;
-       }
-       spin_unlock(&hif_dev->tx.tx_lock);
-       /* Complete the queued SKBs. */
-       while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
-               ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
-                                         skb, 1);
-               TX_STAT_INC(skb_completed);
-       }
+       ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
  
- add_free:
        /* Re-initialize the SKB queue */
        tx_buf->len = tx_buf->offset = 0;
        __skb_queue_head_init(&tx_buf->skb_queue);
@@@ -274,7 -346,7 +346,7 @@@ static int __hif_usb_tx(struct hif_devi
        ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
        if (ret) {
                tx_buf->len = tx_buf->offset = 0;
-               ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+               ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
                __skb_queue_head_init(&tx_buf->skb_queue);
                list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
                hif_dev->tx.tx_buf_cnt++;
        return ret;
  }
  
- static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
-                          struct ath9k_htc_tx_ctl *tx_ctl)
+ static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
  {
+       struct ath9k_htc_tx_ctl *tx_ctl;
        unsigned long flags;
+       int ret = 0;
  
        spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  
                return -ENOMEM;
        }
  
-       __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
-       hif_dev->tx.tx_skb_cnt++;
+       spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  
-       /* Send normal frames immediately */
-       if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
-               __hif_usb_tx(hif_dev);
+       tx_ctl = HTC_SKB_CB(skb);
+       /* Mgmt/Beacon frames don't use the TX buffer pool */
+       if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
+           (tx_ctl->type == ATH9K_HTC_BEACON)) {
+               ret = hif_usb_send_mgmt(hif_dev, skb);
+       }
+       spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+       if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
+           (tx_ctl->type == ATH9K_HTC_AMPDU)) {
+               __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+               hif_dev->tx.tx_skb_cnt++;
+       }
  
        /* Check if AMPDUs have to be sent immediately */
-       if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
-           (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+       if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
            (hif_dev->tx.tx_skb_cnt < 2)) {
                __hif_usb_tx(hif_dev);
        }
  
        spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  
-       return 0;
+       return ret;
  }
  
- static void hif_usb_start(void *hif_handle, u8 pipe_id)
+ static void hif_usb_start(void *hif_handle)
  {
        struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
        unsigned long flags;
        spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  }
  
- static void hif_usb_stop(void *hif_handle, u8 pipe_id)
+ static void hif_usb_stop(void *hif_handle)
  {
        struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
        struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
        unsigned long flags;
  
        spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
-       ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
+       ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
        hif_dev->tx.tx_skb_cnt = 0;
        hif_dev->tx.flags |= HIF_USB_TX_STOP;
        spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
                                 &hif_dev->tx.tx_pending, list) {
                usb_kill_urb(tx_buf->urb);
        }
+       usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
  }
  
- static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
-                       struct ath9k_htc_tx_ctl *tx_ctl)
+ static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
  {
        struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
        int ret = 0;
  
        switch (pipe_id) {
        case USB_WLAN_TX_PIPE:
-               ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
+               ret = hif_usb_send_tx(hif_dev, skb);
                break;
        case USB_REG_OUT_PIPE:
                ret = hif_usb_send_regout(hif_dev, skb);
        return ret;
  }
  
+ static inline bool check_index(struct sk_buff *skb, u8 idx)
+ {
+       struct ath9k_htc_tx_ctl *tx_ctl;
+       tx_ctl = HTC_SKB_CB(skb);
+       if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
+           (tx_ctl->sta_idx == idx))
+               return true;
+       return false;
+ }
+ static void hif_usb_sta_drain(void *hif_handle, u8 idx)
+ {
+       struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+       struct sk_buff *skb, *tmp;
+       unsigned long flags;
+       spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+       skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
+               if (check_index(skb, idx)) {
+                       __skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
+                       ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+                                                 skb, false);
+                       hif_dev->tx.tx_skb_cnt--;
+                       TX_STAT_INC(skb_failed);
+               }
+       }
+       spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ }
  static struct ath9k_htc_hif hif_usb = {
        .transport = ATH9K_HIF_USB,
        .name = "ath9k_hif_usb",
  
        .start = hif_usb_start,
        .stop = hif_usb_stop,
+       .sta_drain = hif_usb_sta_drain,
        .send = hif_usb_send,
  };
  
@@@ -567,6 -686,9 +686,9 @@@ static void ath9k_hif_usb_reg_in_cb(str
        case -ESHUTDOWN:
                goto free;
        default:
+               skb_reset_tail_pointer(skb);
+               skb_trim(skb, 0);
                goto resubmit;
        }
  
                                                 USB_REG_IN_PIPE),
                                 nskb->data, MAX_REG_IN_BUF_SIZE,
                                 ath9k_hif_usb_reg_in_cb, nskb);
-               ret = usb_submit_urb(urb, GFP_ATOMIC);
-               if (ret) {
-                       kfree_skb(nskb);
-                       urb->context = NULL;
-               }
-               return;
        }
  
  resubmit:
-       skb_reset_tail_pointer(skb);
-       skb_trim(skb, 0);
+       usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
        ret = usb_submit_urb(urb, GFP_ATOMIC);
-       if (ret)
+       if (ret) {
+               usb_unanchor_urb(urb);
                goto free;
+       }
  
        return;
  free:
@@@ -641,6 -755,8 +755,8 @@@ static void ath9k_hif_usb_dealloc_tx_ur
                kfree(tx_buf->buf);
                kfree(tx_buf);
        }
+       usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
  }
  
  static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
        INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
        spin_lock_init(&hif_dev->tx.tx_lock);
        __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+       init_usb_anchor(&hif_dev->mgmt_submitted);
  
        for (i = 0; i < MAX_TX_URB_NUM; i++) {
                tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
@@@ -748,43 -865,67 +865,67 @@@ err_urb
        return ret;
  }
  
- static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
+ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
  {
-       if (hif_dev->reg_in_urb) {
-               usb_kill_urb(hif_dev->reg_in_urb);
-               if (hif_dev->reg_in_urb->context)
-                       kfree_skb((void *)hif_dev->reg_in_urb->context);
-               usb_free_urb(hif_dev->reg_in_urb);
-               hif_dev->reg_in_urb = NULL;
-       }
+       usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
  }
  
- static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
+ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
  {
-       struct sk_buff *skb;
+       struct urb *urb = NULL;
+       struct sk_buff *skb = NULL;
+       int i, ret;
  
-       hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (hif_dev->reg_in_urb == NULL)
-               return -ENOMEM;
+       init_usb_anchor(&hif_dev->reg_in_submitted);
  
-       skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
-       if (!skb)
-               goto err;
+       for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
  
-       usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
-                        usb_rcvbulkpipe(hif_dev->udev,
-                                        USB_REG_IN_PIPE),
-                        skb->data, MAX_REG_IN_BUF_SIZE,
-                        ath9k_hif_usb_reg_in_cb, skb);
+               /* Allocate URB */
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (urb == NULL) {
+                       ret = -ENOMEM;
+                       goto err_urb;
+               }
  
-       if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
-               goto err;
+               /* Allocate buffer */
+               skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+               if (!skb) {
+                       ret = -ENOMEM;
+                       goto err_skb;
+               }
+               usb_fill_bulk_urb(urb, hif_dev->udev,
+                                 usb_rcvbulkpipe(hif_dev->udev,
+                                                 USB_REG_IN_PIPE),
+                                 skb->data, MAX_REG_IN_BUF_SIZE,
+                                 ath9k_hif_usb_reg_in_cb, skb);
+               /* Anchor URB */
+               usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
+               /* Submit URB */
+               ret = usb_submit_urb(urb, GFP_KERNEL);
+               if (ret) {
+                       usb_unanchor_urb(urb);
+                       goto err_submit;
+               }
+               /*
+                * Drop reference count.
+                * This ensures that the URB is freed when killing them.
+                */
+               usb_free_urb(urb);
+       }
  
        return 0;
  
- err:
-       ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
-       return -ENOMEM;
+ err_submit:
+       kfree_skb(skb);
+ err_skb:
+       usb_free_urb(urb);
+ err_urb:
+       ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
+       return ret;
  }
  
  static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
                goto err_rx;
  
        /* Register Read */
-       if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
+       if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
                goto err_reg;
  
        return 0;
@@@ -816,7 -957,7 +957,7 @@@ err
  static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
  {
        usb_kill_anchored_urbs(&hif_dev->regout_submitted);
-       ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+       ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
        ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
        ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
  }
@@@ -1026,10 -1167,7 +1167,7 @@@ static int ath9k_hif_usb_probe(struct u
        /* Find out which firmware to load */
  
        if (IS_AR7010_DEVICE(id->driver_info))
-               if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
-                       hif_dev->fw_name = FIRMWARE_AR7010_1_1;
-               else
-                       hif_dev->fw_name = FIRMWARE_AR7010;
+               hif_dev->fw_name = FIRMWARE_AR7010_1_1;
        else
                hif_dev->fw_name = FIRMWARE_AR9271;
  
        }
  
        ret = ath9k_htc_hw_init(hif_dev->htc_handle,
 -                              &hif_dev->udev->dev, hif_dev->device_id,
 +                              &interface->dev, hif_dev->device_id,
                                hif_dev->udev->product, id->driver_info);
        if (ret) {
                ret = -EINVAL;
@@@ -1158,7 -1296,7 +1296,7 @@@ fail_resume
  #endif
  
  static struct usb_driver ath9k_hif_usb_driver = {
 -      .name = "ath9k_hif_usb",
 +      .name = KBUILD_MODNAME,
        .probe = ath9k_hif_usb_probe,
        .disconnect = ath9k_hif_usb_disconnect,
  #ifdef CONFIG_PM
index 62e139a30a74d9b05f6ffeae825c15bd13c0ce0b,5c76352b1319adef0820a48d2878ed76b0bc592e..cee970fdf65256c5124d358b674b977d9201c9b5
@@@ -17,8 -17,8 +17,8 @@@
  #include "htc.h"
  
  static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
-                         u16 len, u8 flags, u8 epid,
-                         struct ath9k_htc_tx_ctl *tx_ctl)
+                         u16 len, u8 flags, u8 epid)
  {
        struct htc_frame_hdr *hdr;
        struct htc_endpoint *endpoint = &target->endpoint[epid];
@@@ -30,8 -30,8 +30,8 @@@
        hdr->flags = flags;
        hdr->payload_len = cpu_to_be16(len);
  
-       status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
-                                  tx_ctl);
+       status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
        return status;
  }
  
@@@ -162,7 -162,7 +162,7 @@@ static int htc_config_pipe_credits(stru
  
        target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
  
-       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
        if (ret)
                goto err;
  
@@@ -197,7 -197,7 +197,7 @@@ static int htc_setup_complete(struct ht
  
        target->htc_flags |= HTC_OP_START_WAIT;
  
-       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
        if (ret)
                goto err;
  
@@@ -268,7 -268,7 +268,7 @@@ int htc_connect_service(struct htc_targ
        conn_msg->dl_pipeid = endpoint->dl_pipeid;
        conn_msg->ul_pipeid = endpoint->ul_pipeid;
  
-       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+       ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
        if (ret)
                goto err;
  
@@@ -286,35 -286,33 +286,33 @@@ err
        return ret;
  }
  
- int htc_send(struct htc_target *target, struct sk_buff *skb,
-            enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
+ int htc_send(struct htc_target *target, struct sk_buff *skb)
  {
-       return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
+       struct ath9k_htc_tx_ctl *tx_ctl;
+       tx_ctl = HTC_SKB_CB(skb);
+       return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid);
  }
  
- void htc_stop(struct htc_target *target)
+ int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
+                 enum htc_endpoint_id epid)
  {
-       enum htc_endpoint_id epid;
-       struct htc_endpoint *endpoint;
+       return htc_issue_send(target, skb, skb->len, 0, epid);
+ }
  
-       for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
-               endpoint = &target->endpoint[epid];
-               if (endpoint->service_id != 0)
-                       target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
-       }
+ void htc_stop(struct htc_target *target)
+ {
+       target->hif->stop(target->hif_dev);
  }
  
  void htc_start(struct htc_target *target)
  {
-       enum htc_endpoint_id epid;
-       struct htc_endpoint *endpoint;
+       target->hif->start(target->hif_dev);
+ }
  
-       for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
-               endpoint = &target->endpoint[epid];
-               if (endpoint->service_id != 0)
-                       target->hif->start(target->hif_dev,
-                                          endpoint->ul_pipeid);
-       }
+ void htc_sta_drain(struct htc_target *target, u8 idx)
+ {
+       target->hif->sta_drain(target->hif_dev, idx);
  }
  
  void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
@@@ -360,7 -358,7 +358,7 @@@ ret
   * HTC Messages are handled directly here and the obtained SKB
   * is freed.
   *
 - * Sevice messages (Data, WMI) passed to the corresponding
 + * Service messages (Data, WMI) passed to the corresponding
   * endpoint RX handlers, which have to free the SKB.
   */
  void ath9k_htc_rx_msg(struct htc_target *htc_handle,
index c8a2d0dae7964de2a50a57b8b076ad54c131ba62,3a8c41c782e9564107c3d09747c780b4866a69ae..045abd5578407b2a2cf5645ebaaa232ea2cfaed1
@@@ -676,42 -676,55 +676,55 @@@ unsigned long ar9003_get_pll_sqsum_dvc(
  }
  EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
  
- #define DPLL2_KD_VAL            0x3D
- #define DPLL2_KI_VAL            0x06
- #define DPLL3_PHASE_SHIFT_VAL   0x1
+ #define DPLL3_PHASE_SHIFT_VAL 0x1
  static void ath9k_hw_init_pll(struct ath_hw *ah,
                              struct ath9k_channel *chan)
  {
        u32 pll;
  
        if (AR_SREV_9485(ah)) {
-               REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
-               REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
-               REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
-                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
  
-               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
-               udelay(1000);
+               /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KD, 0x40);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KI, 0x4);
  
-               REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+                             AR_CH0_BB_DPLL1_REFDIV, 0x5);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+                             AR_CH0_BB_DPLL1_NINI, 0x58);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+                             AR_CH0_BB_DPLL1_NFRAC, 0x0);
  
                REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
-                             AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+                             AR_CH0_BB_DPLL2_OUTDIV, 0x1);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1);
                REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
-                             AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+                             AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1);
  
+               /* program BB PLL phase_shift to 0x6 */
                REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
-                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
-               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+                             AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_BB_DPLL2_PLL_PWD, 0x0);
                udelay(1000);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
        }
  
        pll = ath9k_hw_compute_pll_control(ah, chan);
  
        REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
  
+       if (AR_SREV_9485(ah))
+               udelay(1000);
        /* Switch the core clock for ar9271 to 117Mhz */
        if (AR_SREV_9271(ah)) {
                udelay(500);
@@@ -1249,6 -1262,15 +1262,6 @@@ int ath9k_hw_reset(struct ath_hw *ah, s
        ah->txchainmask = common->tx_chainmask;
        ah->rxchainmask = common->rx_chainmask;
  
 -      if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
 -              ath9k_hw_abortpcurecv(ah);
 -              if (!ath9k_hw_stopdmarecv(ah)) {
 -                      ath_dbg(common, ATH_DBG_XMIT,
 -                              "Failed to stop receive dma\n");
 -                      bChannelChange = false;
 -              }
 -      }
 -
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
  
index c2a59386fb9cf05a7eabb12214e613103e0c08b0,a60edb44127bef70bc6dcaf3fe49403e60dfd176..b60c130917f7e959a356381757c852934c2ada17
@@@ -239,7 -239,6 +239,6 @@@ struct ath_desc 
        void *ds_vdata;
  } __packed __aligned(4);
  
- #define ATH9K_TXDESC_CLRDMASK         0x0001
  #define ATH9K_TXDESC_NOACK            0x0002
  #define ATH9K_TXDESC_RTSENA           0x0004
  #define ATH9K_TXDESC_CTSENA           0x0008
@@@ -695,7 -694,7 +694,7 @@@ bool ath9k_hw_setrxabort(struct ath_hw 
  void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
  void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
  void ath9k_hw_abortpcurecv(struct ath_hw *ah);
 -bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
 +bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
  int ath9k_hw_beaconq_setup(struct ath_hw *ah);
  
  /* Interrupt Handling */
index 4c5c9997dac6e18f178ba7684889f2cc1f21ddc1,e7d6d98ed1ccb5e214df503100cd80959fe3566e..a8d9009a76d5a183f4a20527c187779a859c4030
@@@ -1334,6 -1334,7 +1334,6 @@@ static void ath9k_calculate_summary_sta
  
        ath9k_calculate_iter_data(hw, vif, &iter_data);
  
 -      ath9k_ps_wakeup(sc);
        /* Set BSSID mask. */
        memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
        ath_hw_setbssidmask(common);
        }
  
        ath9k_hw_set_interrupts(ah, ah->imask);
 -      ath9k_ps_restore(sc);
  
        /* Set up ANI */
        if ((iter_data.naps + iter_data.nadhocs) > 0) {
                sc->sc_flags |= SC_OP_ANI_RUN;
                ath_start_ani(common);
+       } else {
+               sc->sc_flags &= ~SC_OP_ANI_RUN;
+               del_timer_sync(&common->ani.timer);
        }
  }
  
@@@ -1409,7 -1414,6 +1412,7 @@@ static int ath9k_add_interface(struct i
        struct ath_common *common = ath9k_hw_common(ah);
        int ret = 0;
  
 +      ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
  
        switch (vif->type) {
        ath9k_do_vif_add_setup(hw, vif);
  out:
        mutex_unlock(&sc->mutex);
 +      ath9k_ps_restore(sc);
        return ret;
  }
  
@@@ -1468,7 -1471,6 +1471,7 @@@ static int ath9k_change_interface(struc
  
        ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
 +      ath9k_ps_wakeup(sc);
  
        /* See if new interface type is valid. */
        if ((new_type == NL80211_IFTYPE_ADHOC) &&
  
        ath9k_do_vif_add_setup(hw, vif);
  out:
 +      ath9k_ps_restore(sc);
        mutex_unlock(&sc->mutex);
        return ret;
  }
@@@ -1511,7 -1512,6 +1514,7 @@@ static void ath9k_remove_interface(stru
  
        ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
  
 +      ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
  
        sc->nvifs--;
        ath9k_calculate_summary_state(hw, NULL);
  
        mutex_unlock(&sc->mutex);
 +      ath9k_ps_restore(sc);
  }
  
  static void ath9k_enable_ps(struct ath_softc *sc)
@@@ -1733,23 -1732,63 +1736,63 @@@ static int ath9k_sta_add(struct ieee802
                         struct ieee80211_sta *sta)
  {
        struct ath_softc *sc = hw->priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_node *an = (struct ath_node *) sta->drv_priv;
+       struct ieee80211_key_conf ps_key = { };
  
        ath_node_attach(sc, sta);
+       an->ps_key = ath_key_config(common, vif, sta, &ps_key);
  
        return 0;
  }
  
+ static void ath9k_del_ps_key(struct ath_softc *sc,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta)
+ {
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_node *an = (struct ath_node *) sta->drv_priv;
+       struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
+       if (!an->ps_key)
+           return;
+       ath_key_delete(common, &ps_key);
+ }
  static int ath9k_sta_remove(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta)
  {
        struct ath_softc *sc = hw->priv;
  
+       ath9k_del_ps_key(sc, vif, sta);
        ath_node_detach(sc, sta);
  
        return 0;
  }
  
+ static void ath9k_sta_notify(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        enum sta_notify_cmd cmd,
+                        struct ieee80211_sta *sta)
+ {
+       struct ath_softc *sc = hw->priv;
+       struct ath_node *an = (struct ath_node *) sta->drv_priv;
+       switch (cmd) {
+       case STA_NOTIFY_SLEEP:
+               an->sleeping = true;
+               if (ath_tx_aggr_sleep(sc, an))
+                       ieee80211_sta_set_tim(sta);
+               break;
+       case STA_NOTIFY_AWAKE:
+               an->sleeping = false;
+               ath_tx_aggr_wakeup(sc, an);
+               break;
+       }
+ }
  static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                         const struct ieee80211_tx_queue_params *params)
  {
  
        txq = sc->tx.txq_map[queue];
  
 +      ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
  
        memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
                        ath_beaconq_config(sc);
  
        mutex_unlock(&sc->mutex);
 +      ath9k_ps_restore(sc);
  
        return ret;
  }
@@@ -1826,6 -1863,9 +1869,9 @@@ static int ath9k_set_key(struct ieee802
  
        switch (cmd) {
        case SET_KEY:
+               if (sta)
+                       ath9k_del_ps_key(sc, vif, sta);
                ret = ath_key_config(common, vif, sta, key);
                if (ret >= 0) {
                        key->hw_key_idx = ret;
@@@ -1944,7 -1984,6 +1990,7 @@@ static void ath9k_bss_info_changed(stru
        int slottime;
        int error;
  
 +      ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
  
        if (changed & BSS_CHANGED_BSSID) {
        }
  
        mutex_unlock(&sc->mutex);
 +      ath9k_ps_restore(sc);
  }
  
  static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
        ath9k_ps_restore(sc);
  }
  
+ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
+ {
+       struct ath_softc *sc = hw->priv;
+       int i;
+       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+               if (!ATH_TXQ_SETUP(sc, i))
+                       continue;
+               if (ath9k_has_pending_frames(sc, &sc->tx.txq[i]))
+                       return true;
+       }
+       return false;
+ }
  struct ieee80211_ops ath9k_ops = {
        .tx                 = ath9k_tx,
        .start              = ath9k_start,
        .configure_filter   = ath9k_configure_filter,
        .sta_add            = ath9k_sta_add,
        .sta_remove         = ath9k_sta_remove,
+       .sta_notify         = ath9k_sta_notify,
        .conf_tx            = ath9k_conf_tx,
        .bss_info_changed   = ath9k_bss_info_changed,
        .set_key            = ath9k_set_key,
        .rfkill_poll        = ath9k_rfkill_poll_state,
        .set_coverage_class = ath9k_set_coverage_class,
        .flush              = ath9k_flush,
+       .tx_frames_pending  = ath9k_tx_frames_pending,
  };
index 4c0d36a6980f5e319ce2a40b9782a53a4d8c22d6,2a40532126f35c428a990f1cb08b9bb7ce6fda96..18094094b298fd9e055fa422feb331b4820ce938
@@@ -792,7 -792,7 +792,7 @@@ static void ath_get_rate(void *priv, st
  
                tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
 -              /* Set the choosen rate. No RTS for first series entry. */
 +              /* Set the chosen rate. No RTS for first series entry. */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
        }
@@@ -1092,8 -1092,7 +1092,7 @@@ static int ath_rc_get_rateindex(const s
        if (!(rate->flags & IEEE80211_TX_RC_MCS))
                return rate->idx;
  
-       while (rate->idx > mcs_rix_off[i] &&
-              i < ARRAY_SIZE(mcs_rix_off)) {
+       while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
                rix++; i++;
        }
  
index cfaf0a48b93998464719cfe0c5e1ef19bf700dea,b81bfc4d66ef9b50b68b7c3868a03253a6ae146d..642504f9638ca79fd018756378a32c9d3a295c0d
@@@ -75,7 -75,6 +75,6 @@@ static void ath_rx_buf_link(struct ath_
                *sc->rx.rxlink = bf->bf_daddr;
  
        sc->rx.rxlink = &ds->ds_link;
-       ath9k_hw_rxena(ah);
  }
  
  static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
@@@ -426,9 -425,7 +425,7 @@@ u32 ath_calcrxfilter(struct ath_softc *
        else
                rfilt |= ATH9K_RX_FILTER_BEACON;
  
-       if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) ||
-           AR_SREV_9285_12_OR_LATER(sc->sc_ah)) &&
-           (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
+       if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
            (sc->rx.rxfilter & FIF_PSPOLL))
                rfilt |= ATH9K_RX_FILTER_PSPOLL;
  
@@@ -486,12 -483,12 +483,12 @@@ start_recv
  bool ath_stoprecv(struct ath_softc *sc)
  {
        struct ath_hw *ah = sc->sc_ah;
 -      bool stopped;
 +      bool stopped, reset = false;
  
        spin_lock_bh(&sc->rx.rxbuflock);
        ath9k_hw_abortpcurecv(ah);
        ath9k_hw_setrxfilter(ah, 0);
 -      stopped = ath9k_hw_stopdmarecv(ah);
 +      stopped = ath9k_hw_stopdmarecv(ah, &reset);
  
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                ath_edma_stop_recv(sc);
                        "confusing the DMA engine when we start RX up\n");
                ATH_DBG_WARN_ON_ONCE(!stopped);
        }
 -      return stopped;
 +      return stopped || reset;
  }
  
  void ath_flushrecv(struct ath_softc *sc)
@@@ -1767,6 -1764,7 +1764,7 @@@ requeue
                } else {
                        list_move_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_buf_link(sc, bf);
+                       ath9k_hw_rxena(ah);
                }
        } while (1);
  
index 3cea3f76e37351d4c80e8985861391e14b50de72,65d46c6ebced44245d2071dca7b400f71b4fa70e..e9e99f730ca8ee2827f2f8e27094149bd6024065
@@@ -357,6 -357,7 +357,7 @@@ static void ath_tx_complete_aggr(struc
        struct ath_frame_info *fi;
        int nframes;
        u8 tidno;
+       bool clear_filter;
  
        skb = bf->bf_mpdu;
        hdr = (struct ieee80211_hdr *)skb->data;
                        /* transmit completion */
                        acked_cnt++;
                } else {
-                       if (!(tid->state & AGGR_CLEANUP) && retry) {
-                               if (fi->retries < ATH_MAX_SW_RETRIES) {
-                                       ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-                                       txpending = 1;
-                               } else {
-                                       bf->bf_state.bf_type |= BUF_XRETRY;
-                                       txfail = 1;
-                                       sendbar = 1;
-                                       txfail_cnt++;
-                               }
-                       } else {
+                       if ((tid->state & AGGR_CLEANUP) || !retry) {
                                /*
                                 * cleanup in progress, just fail
                                 * the un-acked sub-frames
                                 */
                                txfail = 1;
+                       } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+                               if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
+                                   !an->sleeping)
+                                       ath_tx_set_retry(sc, txq, bf->bf_mpdu);
+                               clear_filter = true;
+                               txpending = 1;
+                       } else {
+                               bf->bf_state.bf_type |= BUF_XRETRY;
+                               txfail = 1;
+                               sendbar = 1;
+                               txfail_cnt++;
                        }
                }
  
                                !txfail, sendbar);
                } else {
                        /* retry the un-acked ones */
+                       ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
                        if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
                                if (bf->bf_next == NULL && bf_last->bf_stale) {
                                        struct ath_buf *tbf;
  
        /* prepend un-acked frames to the beginning of the pending frame queue */
        if (!list_empty(&bf_pending)) {
+               if (an->sleeping)
+                       ieee80211_sta_set_tim(sta);
                spin_lock_bh(&txq->axq_lock);
+               if (clear_filter)
+                       tid->ac->clear_ps_filter = true;
                list_splice(&bf_pending, &tid->buf_q);
                ath_tx_queue_tid(txq, tid);
                spin_unlock_bh(&txq->axq_lock);
@@@ -628,8 -637,8 +637,8 @@@ static u32 ath_lookup_rate(struct ath_s
                                 (u32)ATH_AMPDU_LIMIT_MAX);
  
        /*
 -       * h/w can accept aggregates upto 16 bit lengths (65535).
 -       * The IE, however can hold upto 65536, which shows up here
 +       * h/w can accept aggregates up to 16 bit lengths (65535).
 +       * The IE, however can hold up to 65536, which shows up here
         * as zero. Ignore 65536 since we  are constrained by hw.
         */
        if (tid->an->maxampdu)
@@@ -816,6 -825,11 +825,11 @@@ static void ath_tx_sched_aggr(struct at
                bf = list_first_entry(&bf_q, struct ath_buf, list);
                bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  
+               if (tid->ac->clear_ps_filter) {
+                       tid->ac->clear_ps_filter = false;
+                       ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
+               }
                /* if only one frame, send as non-aggregate */
                if (bf == bf->bf_lastbf) {
                        fi = get_frame_info(bf->bf_mpdu);
@@@ -896,6 -910,67 +910,67 @@@ void ath_tx_aggr_stop(struct ath_softc 
        ath_tx_flush_tid(sc, txtid);
  }
  
+ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
+ {
+       struct ath_atx_tid *tid;
+       struct ath_atx_ac *ac;
+       struct ath_txq *txq;
+       bool buffered = false;
+       int tidno;
+       for (tidno = 0, tid = &an->tid[tidno];
+            tidno < WME_NUM_TID; tidno++, tid++) {
+               if (!tid->sched)
+                       continue;
+               ac = tid->ac;
+               txq = ac->txq;
+               spin_lock_bh(&txq->axq_lock);
+               if (!list_empty(&tid->buf_q))
+                       buffered = true;
+               tid->sched = false;
+               list_del(&tid->list);
+               if (ac->sched) {
+                       ac->sched = false;
+                       list_del(&ac->list);
+               }
+               spin_unlock_bh(&txq->axq_lock);
+       }
+       return buffered;
+ }
+ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
+ {
+       struct ath_atx_tid *tid;
+       struct ath_atx_ac *ac;
+       struct ath_txq *txq;
+       int tidno;
+       for (tidno = 0, tid = &an->tid[tidno];
+            tidno < WME_NUM_TID; tidno++, tid++) {
+               ac = tid->ac;
+               txq = ac->txq;
+               spin_lock_bh(&txq->axq_lock);
+               ac->clear_ps_filter = true;
+               if (!list_empty(&tid->buf_q) && !tid->paused) {
+                       ath_tx_queue_tid(txq, tid);
+                       ath_txq_schedule(sc, txq);
+               }
+               spin_unlock_bh(&txq->axq_lock);
+       }
+ }
  void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  {
        struct ath_atx_tid *txtid;
@@@ -1451,7 -1526,7 +1526,7 @@@ static void setup_frame_info(struct iee
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr;
        struct ath_frame_info *fi = get_frame_info(skb);
-       struct ath_node *an;
+       struct ath_node *an = NULL;
        struct ath_atx_tid *tid;
        enum ath9k_key_type keytype;
        u16 seqno = 0;
  
        keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
  
+       if (sta)
+               an = (struct ath_node *) sta->drv_priv;
        hdr = (struct ieee80211_hdr *)skb->data;
-       if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
+       if (an && ieee80211_is_data_qos(hdr->frame_control) &&
                conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
  
-               an = (struct ath_node *) sta->drv_priv;
                tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  
                /*
        memset(fi, 0, sizeof(*fi));
        if (hw_key)
                fi->keyix = hw_key->hw_key_idx;
+       else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
+               fi->keyix = an->ps_key;
        else
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
@@@ -1491,7 -1570,6 +1570,6 @@@ static int setup_tx_flags(struct sk_buf
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        int flags = 0;
  
-       flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
        flags |= ATH9K_TXDESC_INTREQ;
  
        if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
@@@ -1754,6 -1832,9 +1832,9 @@@ static void ath_tx_start_dma(struct ath
                if (txctl->paprd)
                        bf->bf_state.bfs_paprd_timestamp = jiffies;
  
+               if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+                       ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
                ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
        }
  
index 0e1b8793c8641012617070e7d97ef0bfb8060a4f,02b896208b1adc3a40a9e457ed20d14103e06e14..028310f263c81c01060f5d551496cca88519b3b9
@@@ -97,8 -97,8 +97,8 @@@ static const struct ieee80211_regdomai
        }
  };
  
- /* Can be used by 0x67, 0x6A and 0x68 */
- static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
+ /* Can be used by 0x67, 0x68, 0x6A and 0x6C */
+ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
        .n_reg_rules = 4,
        .alpha2 =  "99",
        .reg_rules = {
@@@ -151,7 -151,8 +151,8 @@@ ieee80211_regdomain *ath_world_regdomai
        case 0x67:
        case 0x68:
        case 0x6A:
-               return &ath_world_regdom_67_68_6A;
+       case 0x6C:
+               return &ath_world_regdom_67_68_6A_6C;
        default:
                WARN_ON(1);
                return ath_default_world_regdomain();
@@@ -268,7 -269,7 +269,7 @@@ ath_reg_apply_active_scan_flags(struct 
        }
  
        /*
 -       * If a country IE has been recieved check its rule for this
 +       * If a country IE has been received check its rule for this
         * channel first before enabling active scan. The passive scan
         * would have been enforced by the initial processing of our
         * custom regulatory domain.
@@@ -333,6 -334,7 +334,7 @@@ static void ath_reg_apply_world_flags(s
        case 0x63:
        case 0x66:
        case 0x67:
+       case 0x6C:
                ath_reg_apply_beaconing_flags(wiphy, initiator);
                break;
        case 0x68:
@@@ -476,7 -478,7 +478,7 @@@ ath_regd_init_wiphy(struct ath_regulato
                wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
        } else {
                /*
 -               * This gets applied in the case of the absense of CRDA,
 +               * This gets applied in the case of the absence of CRDA,
                 * it's our own custom world regulatory domain, similar to
                 * cfg80211's but we enable passive scanning.
                 */
index d484c3678163f9191570f59dade85c9d642e018e,dd90619fdce7f4cda4c0e987511f58660b7a1e20..f8870543d68f8555025c1a26d094dfa91d4c3a8d
@@@ -3140,6 -3140,12 +3140,6 @@@ static int iwl4965_init_drv(struct iwl_
  
        iwl_legacy_init_scan_params(priv);
  
 -      /* Set the tx_power_user_lmt to the lowest power level
 -       * this value will get overwritten by channel max power avg
 -       * from eeprom */
 -      priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
 -      priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
 -
        ret = iwl_legacy_init_channel_map(priv);
        if (ret) {
                IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
@@@ -3173,7 -3179,7 +3173,7 @@@ static void iwl4965_hw_detect(struct iw
  {
        priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
        priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
-       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+       priv->rev_id = priv->pci_dev->revision;
        IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
  }
  
index e7a1bc6b76fd6073e8782f9d646084ffe92569cd,b90924e890a703478d269be4406e697393e3df2d..6dfa806aefecf2695198b596c3dd49a8891fa507
  /**
   * Keep-Warm (KW) buffer base address.
   *
-  * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+  * Driver must allocate a 4KByte buffer that is for keeping the
   * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
-  * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+  * DRAM access when doing Txing or Rxing.  The dummy accesses prevent host
   * from going into a power-savings mode that would cause higher DRAM latency,
   * and possible data over/under-runs, before all Tx/Rx is complete.
   *
   * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
-  * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+  * of the buffer, which must be 4K aligned.  Once this is set up, the device
   * automatically invokes keep-warm accesses when normal accesses might not
   * be sufficient to maintain fast DRAM response.
   *
@@@ -97,7 -97,7 +97,7 @@@
  /**
   * TFD Circular Buffers Base (CBBC) addresses
   *
-  * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+  * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
   * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
   * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
   * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
  /**
   * Rx SRAM Control and Status Registers (RSCSR)
   *
-  * These registers provide handshake between driver and 4965 for the Rx queue
+  * These registers provide handshake between driver and device for the Rx queue
   * (this queue handles *all* command responses, notifications, Rx data, etc.
-  * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+  * sent from uCode to host driver).  Unlike Tx, there is only one Rx
   * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
   * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
   * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
   * mapping between RBDs and RBs.
   *
   * Driver must allocate host DRAM memory for the following, and set the
-  * physical address of each into 4965 registers:
+  * physical address of each into device registers:
   *
   * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
   *     entries (although any power of 2, up to 4096, is selectable by driver).
   *     Driver sets physical address [35:8] of base of RBD circular buffer
   *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
   *
-  * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+  * 2)  Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
   *     (RBs) have been filled, via a "write pointer", actually the index of
   *     the RB's corresponding RBD within the circular buffer.  Driver sets
   *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
   *
   *     Bit fields in lower dword of Rx status buffer (upper dword not used
-  *     by driver; see struct iwl4965_shared, val0):
+  *     by driver:
   *     31-12:  Not used by driver
   *     11- 0:  Index of last filled Rx buffer descriptor
-  *             (4965 writes, driver reads this value)
+  *             (device writes, driver reads this value)
   *
-  * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+  * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
   * enter pointers to these RBs into contiguous RBD circular buffer entries,
-  * and update the 4965's "write" index register,
+  * and update the device's "write" index register,
   * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
   *
   * This "write" index corresponds to the *next* RBD that the driver will make
   * RBs), should be 8 after preparing the first 8 RBs (for example), and must
   * wrap back to 0 at the end of the circular buffer (but don't wrap before
   * "read" index has advanced past 1!  See below).
-  * NOTE:  4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+  * NOTE:  DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
   *
-  * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+  * As the device fills RBs (referenced from contiguous RBDs within the circular
   * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
   * to tell the driver the index of the latest filled RBD.  The driver must
-  * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
+  * read this "read" index from DRAM after receiving an Rx interrupt from device
   *
   * The driver must also internally keep track of a third index, which is the
   * next RBD to process.  When receiving an Rx interrupt, driver should process
   * driver may process the RB pointed to by RBD 0.  Depending on volume of
   * traffic, there may be many RBs to process.
   *
-  * If read index == write index, 4965 thinks there is no room to put new data.
+  * If read index == write index, device thinks there is no room to put new data.
   * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
   * be safe, make sure that there is a gap of at least 2 RBDs between "write"
   * and "read" indexes; that is, make sure that there are no more than 254
  /**
   * Transmit DMA Channel Control/Status Registers (TCSR)
   *
-  * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+  * Device has one configuration register for each of 8 Tx DMA/FIFO channels
   * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
   * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
   *
  #define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
  
  /* Find Control/Status reg for given Tx DMA/FIFO channel */
- #define FH49_TCSR_CHNL_NUM                            (7)
  #define FH50_TCSR_CHNL_NUM                            (8)
  
  /* TCSR: tx_config register values */
  #define RX_LOW_WATERMARK 8
  
  /* Size of one Rx buffer in host DRAM */
- #define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
  #define IWL_RX_BUF_SIZE_4K (4 * 1024)
  #define IWL_RX_BUF_SIZE_8K (8 * 1024)
  
   * @finished_rb_num [0:11] - Indicates the index of the current RB
   *    in which the last frame was written to
   * @finished_fr_num [0:11] - Indicates the index of the RX Frame
 - *    which was transfered
 + *    which was transferred
   */
  struct iwl_rb_status {
        __le16 closed_rb_num;
        __le16 closed_fr_num;
        __le16 finished_rb_num;
        __le16 finished_fr_nam;
-       __le32 __unused; /* 3945 only */
+       __le32 __unused;
  } __packed;
  
  
index 6331c61957a36b01d8476fe937d85f25a061b585,d79c8fd41138aef72ca9bcde4601cd0d044acd7b..5cd096e2ae36a606f82220dde23d141812c28756
@@@ -730,34 -730,20 +730,20 @@@ void rt2800_txdone(struct rt2x00_dev *r
        struct data_queue *queue;
        struct queue_entry *entry;
        u32 reg;
-       u8 pid;
-       int i;
+       u8 qid;
  
-       /*
-        * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
-        * at most X times and also stop processing once the TX_STA_FIFO_VALID
-        * flag is not set anymore.
-        *
-        * The legacy drivers use X=TX_RING_SIZE but state in a comment
-        * that the TX_STA_FIFO stack has a size of 16. We stick to our
-        * tx ring size for now.
-        */
-       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
-               rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
-               if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
-                       break;
+       while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
  
-               /*
-                * Skip this entry when it contains an invalid
-                * queue identication number.
+               /* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
+                * qid is guaranteed to be one of the TX QIDs
                 */
-               pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
-               if (pid >= QID_RX)
-                       continue;
-               queue = rt2x00queue_get_tx_queue(rt2x00dev, pid);
-               if (unlikely(!queue))
+               qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+               if (unlikely(!queue)) {
+                       WARNING(rt2x00dev, "Got TX status for an unavailable "
+                                          "queue %u, dropping\n", qid);
                        continue;
+               }
  
                /*
                 * Inside each queue, we process each entry in a chronological
@@@ -949,25 -935,49 +935,49 @@@ static void rt2800_brightness_set(struc
        unsigned int ledmode =
                rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
                                   EEPROM_FREQ_LED_MODE);
+       u32 reg;
  
-       if (led->type == LED_TYPE_RADIO) {
-               rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
-                                     enabled ? 0x20 : 0);
-       } else if (led->type == LED_TYPE_ASSOC) {
-               rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
-                                     enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
-       } else if (led->type == LED_TYPE_QUALITY) {
-               /*
-                * The brightness is divided into 6 levels (0 - 5),
-                * The specs tell us the following levels:
-                *      0, 1 ,3, 7, 15, 31
-                * to determine the level in a simple way we can simply
-                * work with bitshifting:
-                *      (1 << level) - 1
-                */
-               rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
-                                     (1 << brightness / (LED_FULL / 6)) - 1,
-                                     polarity);
+       /* Check for SoC (SOC devices don't support MCU requests) */
+       if (rt2x00_is_soc(led->rt2x00dev)) {
+               rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
+               /* Set LED Polarity */
+               rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, polarity);
+               /* Set LED Mode */
+               if (led->type == LED_TYPE_RADIO) {
+                       rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE,
+                                          enabled ? 3 : 0);
+               } else if (led->type == LED_TYPE_ASSOC) {
+                       rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE,
+                                          enabled ? 3 : 0);
+               } else if (led->type == LED_TYPE_QUALITY) {
+                       rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE,
+                                          enabled ? 3 : 0);
+               }
+               rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
+       } else {
+               if (led->type == LED_TYPE_RADIO) {
+                       rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+                                             enabled ? 0x20 : 0);
+               } else if (led->type == LED_TYPE_ASSOC) {
+                       rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+                                             enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
+               } else if (led->type == LED_TYPE_QUALITY) {
+                       /*
+                        * The brightness is divided into 6 levels (0 - 5),
+                        * The specs tell us the following levels:
+                        *      0, 1 ,3, 7, 15, 31
+                        * to determine the level in a simple way we can simply
+                        * work with bitshifting:
+                        *      (1 << level) - 1
+                        */
+                       rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
+                                             (1 << brightness / (LED_FULL / 6)) - 1,
+                                             polarity);
+               }
        }
  }
  
@@@ -1221,6 -1231,25 +1231,25 @@@ void rt2800_config_intf(struct rt2x00_d
                rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+               if (conf->sync == TSF_SYNC_AP_NONE) {
+                       /*
+                        * Tune beacon queue transmit parameters for AP mode
+                        */
+                       rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, &reg);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 0);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 1);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 0);
+                       rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
+               } else {
+                       rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, &reg);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 4);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 2);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
+                       rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 16);
+                       rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
+               }
        }
  
        if (flags & CONFIG_UPDATE_MAC) {
@@@ -1521,7 -1550,7 +1550,7 @@@ static void rt2800_config_channel_rf2xx
        if (rf->channel > 14) {
                /*
                 * When TX power is below 0, we should increase it by 7 to
 -               * make it a positive value (Minumum value is -7).
 +               * make it a positive value (Minimum value is -7).
                 * However this means that values between 0 and 7 have
                 * double meaning, and we should set a 7DBm boost flag.
                 */
@@@ -1739,8 -1768,8 +1768,8 @@@ static void rt2800_config_channel(struc
  
        if (rf->channel <= 14) {
                if (!rt2x00_rt(rt2x00dev, RT5390)) {
-                       if (test_bit(CONFIG_EXTERNAL_LNA_BG,
-                                    &rt2x00dev->flags)) {
+                       if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
+                                    &rt2x00dev->cap_flags)) {
                                rt2800_bbp_write(rt2x00dev, 82, 0x62);
                                rt2800_bbp_write(rt2x00dev, 75, 0x46);
                        } else {
        } else {
                rt2800_bbp_write(rt2x00dev, 82, 0xf2);
  
-               if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
+               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
                        rt2800_bbp_write(rt2x00dev, 75, 0x46);
                else
                        rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@@ -1984,7 -2013,7 +2013,7 @@@ static u8 rt2800_compensate_txpower(str
        if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
                return txpower;
  
-       if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
+       if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
                /*
                 * Check if eirp txpower exceed txpower_limit.
                 * We use OFDM 6M as criterion and its eirp txpower
@@@ -2384,7 -2413,7 +2413,7 @@@ static int rt2800_init_registers(struc
        } else if (rt2800_is_305x_soc(rt2x00dev)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
-               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
        } else if (rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@@ -3285,8 -3314,8 +3314,8 @@@ static int rt2800_init_rfcsr(struct rt2
                    rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
                    rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
                    rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-                       if (!test_bit(CONFIG_EXTERNAL_LNA_BG,
-                                     &rt2x00dev->flags))
+                       if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG,
+                                     &rt2x00dev->cap_flags))
                                rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
                }
                rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@@ -3709,15 -3738,15 +3738,15 @@@ int rt2800_init_eeprom(struct rt2x00_de
        rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
  
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G))
-               __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
+               __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G))
-               __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
+               __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
  
        /*
         * Detect if this device has an hardware controlled radio.
         */
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO))
-               __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
+               __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
  
        /*
         * Store led settings, for correct led behaviour.
  
        if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
                                        EIRP_MAX_TX_POWER_LIMIT)
-               __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
+               __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags);
  
        return 0;
  }
index a2bd5feb9d5ce6bb9c486595bf0c3baad8ba5ad2,acf561f7cde3ee6bcab213d32ce96b6a0cede233..9d1a158e2c333f330ba3bdf8b2a78b1879b54ec3
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/etherdevice.h>
  #include <linux/input-polldev.h>
  #include <linux/kfifo.h>
+ #include <linux/timer.h>
  
  #include <net/mac80211.h>
  
@@@ -489,13 -490,13 +490,13 @@@ struct rt2x00intf_conf 
        enum nl80211_iftype type;
  
        /*
 -       * TSF sync value, this is dependant on the operation type.
 +       * TSF sync value, this is dependent on the operation type.
         */
        enum tsf_sync sync;
  
        /*
 -       * The MAC and BSSID addressess are simple array of bytes,
 -       * these arrays are little endian, so when sending the addressess
 +       * The MAC and BSSID addresses are simple array of bytes,
 +       * these arrays are little endian, so when sending the addresses
         * to the drivers, copy the it into a endian-signed variable.
         *
         * Note that all devices (except rt2500usb) have 32 bits
@@@ -570,7 -571,8 +571,8 @@@ struct rt2x00lib_ops 
        void (*start_queue) (struct data_queue *queue);
        void (*kick_queue) (struct data_queue *queue);
        void (*stop_queue) (struct data_queue *queue);
-       void (*flush_queue) (struct data_queue *queue);
+       void (*flush_queue) (struct data_queue *queue, bool drop);
+       void (*tx_dma_done) (struct queue_entry *entry);
  
        /*
         * TX control handlers
@@@ -643,11 -645,11 +645,11 @@@ struct rt2x00_ops 
  };
  
  /*
-  * rt2x00 device flags
+  * rt2x00 state flags
   */
- enum rt2x00_flags {
+ enum rt2x00_state_flags {
        /*
-        * Device state flags
+        * Device flags
         */
        DEVICE_STATE_PRESENT,
        DEVICE_STATE_REGISTERED_HW,
        DEVICE_STATE_ENABLED_RADIO,
        DEVICE_STATE_SCANNING,
  
-       /*
-        * Driver requirements
-        */
-       DRIVER_REQUIRE_FIRMWARE,
-       DRIVER_REQUIRE_BEACON_GUARD,
-       DRIVER_REQUIRE_ATIM_QUEUE,
-       DRIVER_REQUIRE_DMA,
-       DRIVER_REQUIRE_COPY_IV,
-       DRIVER_REQUIRE_L2PAD,
-       DRIVER_REQUIRE_TXSTATUS_FIFO,
-       DRIVER_REQUIRE_TASKLET_CONTEXT,
-       DRIVER_REQUIRE_SW_SEQNO,
-       DRIVER_REQUIRE_HT_TX_DESC,
-       /*
-        * Driver features
-        */
-       CONFIG_SUPPORT_HW_BUTTON,
-       CONFIG_SUPPORT_HW_CRYPTO,
-       CONFIG_SUPPORT_POWER_LIMIT,
-       DRIVER_SUPPORT_CONTROL_FILTERS,
-       DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
-       DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
-       DRIVER_SUPPORT_LINK_TUNING,
        /*
         * Driver configuration
         */
-       CONFIG_FRAME_TYPE,
-       CONFIG_RF_SEQUENCE,
-       CONFIG_EXTERNAL_LNA_A,
-       CONFIG_EXTERNAL_LNA_BG,
-       CONFIG_DOUBLE_ANTENNA,
        CONFIG_CHANNEL_HT40,
  };
  
+ /*
+  * rt2x00 capability flags
+  */
+ enum rt2x00_capability_flags {
+       /*
+        * Requirements
+        */
+       REQUIRE_FIRMWARE,
+       REQUIRE_BEACON_GUARD,
+       REQUIRE_ATIM_QUEUE,
+       REQUIRE_DMA,
+       REQUIRE_COPY_IV,
+       REQUIRE_L2PAD,
+       REQUIRE_TXSTATUS_FIFO,
+       REQUIRE_TASKLET_CONTEXT,
+       REQUIRE_SW_SEQNO,
+       REQUIRE_HT_TX_DESC,
+       /*
+        * Capabilities
+        */
+       CAPABILITY_HW_BUTTON,
+       CAPABILITY_HW_CRYPTO,
+       CAPABILITY_POWER_LIMIT,
+       CAPABILITY_CONTROL_FILTERS,
+       CAPABILITY_CONTROL_FILTER_PSPOLL,
+       CAPABILITY_PRE_TBTT_INTERRUPT,
+       CAPABILITY_LINK_TUNING,
+       CAPABILITY_FRAME_TYPE,
+       CAPABILITY_RF_SEQUENCE,
+       CAPABILITY_EXTERNAL_LNA_A,
+       CAPABILITY_EXTERNAL_LNA_BG,
+       CAPABILITY_DOUBLE_ANTENNA,
+ };
  /*
   * rt2x00 device structure.
   */
@@@ -738,12 -745,19 +745,19 @@@ struct rt2x00_dev 
  #endif /* CONFIG_RT2X00_LIB_LEDS */
  
        /*
-        * Device flags.
-        * In these flags the current status and some
-        * of the device capabilities are stored.
+        * Device state flags.
+        * In these flags the current status is stored.
+        * Access to these flags should occur atomically.
         */
        unsigned long flags;
  
+       /*
+        * Device capabiltiy flags.
+        * In these flags the device/driver capabilities are stored.
+        * Access to these flags should occur non-atomically.
+        */
+       unsigned long cap_flags;
        /*
         * Device information, Bus IRQ and name (PCI, SoC)
         */
         */
        DECLARE_KFIFO_PTR(txstatus_fifo, u32);
  
+       /*
+        * Timer to ensure tx status reports are read (rt2800usb).
+        */
+       struct timer_list txstatus_timer;
        /*
         * Tasklet for processing tx status reports (rt2800pci).
         */
@@@ -1136,7 -1155,7 +1155,7 @@@ void rt2x00queue_stop_queue(struct data
   * @drop: True to drop all pending frames.
   *
   * This function will flush the queue. After this call
 - * the queue is guarenteed to be empty.
 + * the queue is guaranteed to be empty.
   */
  void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
  
@@@ -1235,6 -1254,10 +1254,10 @@@ int rt2x00mac_conf_tx(struct ieee80211_
                      const struct ieee80211_tx_queue_params *params);
  void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
  void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
+ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
+ int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
+                            u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
  
  /*
   * Driver allocation handlers.
index 9416e36de29e900f253a69022ee735ea29b89052,2a313b6d378d238ac629d7fb02002ec95e825892..f7872640459278d6ec69f788fa5f2019429d891a
@@@ -60,7 -60,7 +60,7 @@@ void rt2x00lib_config_intf(struct rt2x0
         * Note that when NULL is passed as address we will send
         * 00:00:00:00:00 to the device to clear the address.
         * This will prevent the device being confused when it wants
 -       * to ACK frames or consideres itself associated.
 +       * to ACK frames or considers itself associated.
         */
        memset(conf.mac, 0, sizeof(conf.mac));
        if (mac)
@@@ -109,15 -109,6 +109,6 @@@ void rt2x00lib_config_erp(struct rt2x00
        rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed);
  }
  
- static inline
- enum antenna rt2x00lib_config_antenna_check(enum antenna current_ant,
-                                           enum antenna default_ant)
- {
-       if (current_ant != ANTENNA_SW_DIVERSITY)
-               return current_ant;
-       return (default_ant != ANTENNA_SW_DIVERSITY) ? default_ant : ANTENNA_B;
- }
  void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
                              struct antenna_setup config)
  {
        struct antenna_setup *active = &rt2x00dev->link.ant.active;
  
        /*
-        * Failsafe: Make sure we are not sending the
-        * ANTENNA_SW_DIVERSITY state to the driver.
-        * If that happens, fallback to hardware defaults,
-        * or our own default.
+        * When the caller tries to send the SW diversity,
+        * we must update the ANTENNA_RX_DIVERSITY flag to
+        * enable the antenna diversity in the link tuner.
+        *
+        * Secondly, we must guarentee we never send the
+        * software antenna diversity command to the driver.
         */
-       if (!(ant->flags & ANTENNA_RX_DIVERSITY))
-               config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
-       else if (config.rx == ANTENNA_SW_DIVERSITY)
+       if (!(ant->flags & ANTENNA_RX_DIVERSITY)) {
+               if (config.rx == ANTENNA_SW_DIVERSITY) {
+                       ant->flags |= ANTENNA_RX_DIVERSITY;
+                       if (def->rx == ANTENNA_SW_DIVERSITY)
+                               config.rx = ANTENNA_B;
+                       else
+                               config.rx = def->rx;
+               }
+       } else if (config.rx == ANTENNA_SW_DIVERSITY)
                config.rx = active->rx;
  
-       if (!(ant->flags & ANTENNA_TX_DIVERSITY))
-               config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx);
-       else if (config.tx == ANTENNA_SW_DIVERSITY)
+       if (!(ant->flags & ANTENNA_TX_DIVERSITY)) {
+               if (config.tx == ANTENNA_SW_DIVERSITY) {
+                       ant->flags |= ANTENNA_TX_DIVERSITY;
+                       if (def->tx == ANTENNA_SW_DIVERSITY)
+                               config.tx = ANTENNA_B;
+                       else
+                               config.tx = def->tx;
+               }
+       } else if (config.tx == ANTENNA_SW_DIVERSITY)
                config.tx = active->tx;
  
        /*
                rt2x00queue_start_queue(rt2x00dev->rx);
  }
  
+ static u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+                                  struct ieee80211_conf *conf)
+ {
+       struct hw_mode_spec *spec = &rt2x00dev->spec;
+       int center_channel;
+       u16 i;
+       /*
+        * Initialize center channel to current channel.
+        */
+       center_channel = spec->channels[conf->channel->hw_value].channel;
+       /*
+        * Adjust center channel to HT40+ and HT40- operation.
+        */
+       if (conf_is_ht40_plus(conf))
+               center_channel += 2;
+       else if (conf_is_ht40_minus(conf))
+               center_channel -= (center_channel == 14) ? 1 : 2;
+       for (i = 0; i < spec->num_channels; i++)
+               if (spec->channels[i].channel == center_channel)
+                       return i;
+       WARN_ON(1);
+       return conf->channel->hw_value;
+ }
  void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
                      struct ieee80211_conf *conf,
                      unsigned int ieee80211_flags)
  
        if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
                if (conf_is_ht40(conf)) {
-                       __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+                       set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
                        hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
                } else {
-                       __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+                       clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
                        hw_value = conf->channel->hw_value;
                }
  
index 3f5688fbf3f7aa89624ba3c4b0a721a3872a795e,e1e0c51fcde82978669aac992c222b79f21c8ac6..1bb9d46077ffe88c9c9e8bd56f5b5cca0ebc299b
@@@ -52,7 -52,7 +52,7 @@@ void rt2x00crypto_create_tx_descriptor(
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  
-       if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !hw_key)
+       if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
                return;
  
        __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@@ -80,7 -80,7 +80,7 @@@ unsigned int rt2x00crypto_tx_overhead(s
        struct ieee80211_key_conf *key = tx_info->control.hw_key;
        unsigned int overhead = 0;
  
-       if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !key)
+       if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
                return overhead;
  
        /*
@@@ -237,7 -237,7 +237,7 @@@ void rt2x00crypto_rx_insert_iv(struct s
        }
  
        /*
 -       * NOTE: Always count the payload as transfered,
 +       * NOTE: Always count the payload as transferred,
         * even when alignment was set to zero. This is required
         * for determining the correct offset for the ICV data.
         */
index 1435976b87798151708814a2f34ec740dd824bf2,fa55399be1921f4d719590beb612121b74f99a8f..ea10b0068f823f503e827f45d9e4c52dd9f8a74f
@@@ -192,17 -192,7 +192,7 @@@ static bool rt2x00lib_antenna_diversity
        /*
         * Determine if software diversity is enabled for
         * either the TX or RX antenna (or both).
-        * Always perform this check since within the link
-        * tuner interval the configuration might have changed.
         */
-       ant->flags &= ~ANTENNA_RX_DIVERSITY;
-       ant->flags &= ~ANTENNA_TX_DIVERSITY;
-       if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
-               ant->flags |= ANTENNA_RX_DIVERSITY;
-       if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
-               ant->flags |= ANTENNA_TX_DIVERSITY;
        if (!(ant->flags & ANTENNA_RX_DIVERSITY) &&
            !(ant->flags & ANTENNA_TX_DIVERSITY)) {
                ant->flags = 0;
@@@ -283,7 -273,7 +273,7 @@@ void rt2x00link_start_tuner(struct rt2x
        /**
         * While scanning, link tuning is disabled. By default
         * the most sensitive settings will be used to make sure
 -       * that all beacons and probe responses will be recieved
 +       * that all beacons and probe responses will be received
         * during the scan.
         */
        if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
@@@ -383,7 -373,7 +373,7 @@@ static void rt2x00link_tuner(struct wor
         * do not support link tuning at all, while other devices can disable
         * the feature from the EEPROM.
         */
-       if (test_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags))
+       if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
                rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
  
        /*
index 94b8bbb7ad80a707a3fd54544920d4121931f679,56f9d0df9c6136646e7adb0fa57a064ae1194209..ab8c16f8bcafebec3faeecb412669792eab664a0
@@@ -60,7 -60,7 +60,7 @@@ struct sk_buff *rt2x00queue_alloc_rxskb
         * at least 8 bytes bytes available in headroom for IV/EIV
         * and 8 bytes for ICV data as tailroon.
         */
-       if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+       if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
                head_size += 8;
                tail_size += 8;
        }
@@@ -86,7 -86,7 +86,7 @@@
        memset(skbdesc, 0, sizeof(*skbdesc));
        skbdesc->entry = entry;
  
-       if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
+       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
                skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
                                                  skb->data,
                                                  skb->len,
@@@ -213,7 -213,7 +213,7 @@@ static void rt2x00queue_create_tx_descr
  
        __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  
-       if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
+       if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
                return;
  
        /*
@@@ -302,6 -302,85 +302,85 @@@ static void rt2x00queue_create_tx_descr
        }
  }
  
+ static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
+                                               struct txentry_desc *txdesc,
+                                               const struct rt2x00_rate *hwrate)
+ {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+       struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+       if (tx_info->control.sta)
+               txdesc->u.ht.mpdu_density =
+                   tx_info->control.sta->ht_cap.ampdu_density;
+       txdesc->u.ht.ba_size = 7;       /* FIXME: What value is needed? */
+       /*
+        * Only one STBC stream is supported for now.
+        */
+       if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+               txdesc->u.ht.stbc = 1;
+       /*
+        * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
+        * mcs rate to be used
+        */
+       if (txrate->flags & IEEE80211_TX_RC_MCS) {
+               txdesc->u.ht.mcs = txrate->idx;
+               /*
+                * MIMO PS should be set to 1 for STA's using dynamic SM PS
+                * when using more then one tx stream (>MCS7).
+                */
+               if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
+                   ((tx_info->control.sta->ht_cap.cap &
+                     IEEE80211_HT_CAP_SM_PS) >>
+                    IEEE80211_HT_CAP_SM_PS_SHIFT) ==
+                   WLAN_HT_CAP_SM_PS_DYNAMIC)
+                       __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
+       } else {
+               txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+               if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+                       txdesc->u.ht.mcs |= 0x08;
+       }
+       /*
+        * This frame is eligible for an AMPDU, however, don't aggregate
+        * frames that are intended to probe a specific tx rate.
+        */
+       if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
+           !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+               __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+       /*
+        * Set 40Mhz mode if necessary (for legacy rates this will
+        * duplicate the frame to both channels).
+        */
+       if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
+           txrate->flags & IEEE80211_TX_RC_DUP_DATA)
+               __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
+       if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+               __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+       /*
+        * Determine IFS values
+        * - Use TXOP_BACKOFF for management frames except beacons
+        * - Use TXOP_SIFS for fragment bursts
+        * - Use TXOP_HTTXOP for everything else
+        *
+        * Note: rt2800 devices won't use CTS protection (if used)
+        * for frames not transmitted with TXOP_HTTXOP
+        */
+       if (ieee80211_is_mgmt(hdr->frame_control) &&
+           !ieee80211_is_beacon(hdr->frame_control))
+               txdesc->u.ht.txop = TXOP_BACKOFF;
+       else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+               txdesc->u.ht.txop = TXOP_SIFS;
+       else
+               txdesc->u.ht.txop = TXOP_HTTXOP;
+ }
  static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
                                             struct txentry_desc *txdesc)
  {
        rt2x00crypto_create_tx_descriptor(entry, txdesc);
        rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
  
-       if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
-               rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
+       if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
+               rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
        else
                rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
  }
@@@ -436,7 -515,7 +515,7 @@@ static int rt2x00queue_write_tx_data(st
        /*
         * Map the skb to DMA.
         */
-       if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+       if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
                rt2x00queue_map_txskb(entry);
  
        return 0;
@@@ -529,23 -608,23 +608,23 @@@ int rt2x00queue_write_tx_frame(struct d
         */
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
            !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
-               if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
+               if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
                        rt2x00crypto_tx_copy_iv(skb, &txdesc);
                else
                        rt2x00crypto_tx_remove_iv(skb, &txdesc);
        }
  
        /*
 -       * When DMA allocation is required we should guarentee to the
 +       * When DMA allocation is required we should guarantee to the
         * driver that the DMA is aligned to a 4-byte boundary.
         * However some drivers require L2 padding to pad the payload
         * rather then the header. This could be a requirement for
         * PCI and USB devices, while header alignment only is valid
         * for PCI devices.
         */
-       if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
+       if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
                rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
-       else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
+       else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
                rt2x00queue_align_frame(entry->skb);
  
        /*
  
        set_bit(ENTRY_DATA_PENDING, &entry->flags);
  
-       rt2x00queue_index_inc(queue, Q_INDEX);
+       rt2x00queue_index_inc(entry, Q_INDEX);
        rt2x00queue_write_tx_descriptor(entry, &txdesc);
        rt2x00queue_kick_tx_queue(queue, &txdesc);
  
@@@ -650,10 -729,12 +729,12 @@@ int rt2x00queue_update_beacon(struct rt
        return ret;
  }
  
void rt2x00queue_for_each_entry(struct data_queue *queue,
bool rt2x00queue_for_each_entry(struct data_queue *queue,
                                enum queue_index start,
                                enum queue_index end,
-                               void (*fn)(struct queue_entry *entry))
+                               void *data,
+                               bool (*fn)(struct queue_entry *entry,
+                                          void *data))
  {
        unsigned long irqflags;
        unsigned int index_start;
                ERROR(queue->rt2x00dev,
                      "Entry requested from invalid index range (%d - %d)\n",
                      start, end);
-               return;
+               return true;
        }
  
        /*
        spin_unlock_irqrestore(&queue->index_lock, irqflags);
  
        /*
 -       * Start from the TX done pointer, this guarentees that we will
 +       * Start from the TX done pointer, this guarantees that we will
         * send out all frames in the correct order.
         */
        if (index_start < index_end) {
-               for (i = index_start; i < index_end; i++)
-                       fn(&queue->entries[i]);
+               for (i = index_start; i < index_end; i++) {
+                       if (fn(&queue->entries[i], data))
+                               return true;
+               }
        } else {
-               for (i = index_start; i < queue->limit; i++)
-                       fn(&queue->entries[i]);
+               for (i = index_start; i < queue->limit; i++) {
+                       if (fn(&queue->entries[i], data))
+                               return true;
+               }
  
-               for (i = 0; i < index_end; i++)
-                       fn(&queue->entries[i]);
+               for (i = 0; i < index_end; i++) {
+                       if (fn(&queue->entries[i], data))
+                               return true;
+               }
        }
+       return false;
  }
  EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
  
@@@ -717,8 -806,9 +806,9 @@@ struct queue_entry *rt2x00queue_get_ent
  }
  EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
  
- void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
+ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
  {
+       struct data_queue *queue = entry->queue;
        unsigned long irqflags;
  
        if (unlikely(index >= Q_INDEX_MAX)) {
        if (queue->index[index] >= queue->limit)
                queue->index[index] = 0;
  
-       queue->last_action[index] = jiffies;
+       entry->last_action = jiffies;
  
        if (index == Q_INDEX) {
                queue->length++;
@@@ -838,7 -928,6 +928,6 @@@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queu
  
  void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
  {
-       unsigned int i;
        bool started;
        bool tx_queue =
                (queue->qid == QID_AC_VO) ||
        }
  
        /*
-        * Check if driver supports flushing, we can only guarantee
-        * full support for flushing if the driver is able
-        * to cancel all pending frames (drop = true).
+        * Check if driver supports flushing, if that is the case we can
+        * defer the flushing to the driver. Otherwise we must use the
+        * alternative which just waits for the queue to become empty.
         */
-       if (drop && queue->rt2x00dev->ops->lib->flush_queue)
-               queue->rt2x00dev->ops->lib->flush_queue(queue);
-       /*
-        * When we don't want to drop any frames, or when
-        * the driver doesn't fully flush the queue correcly,
-        * we must wait for the queue to become empty.
-        */
-       for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
-               msleep(10);
+       if (likely(queue->rt2x00dev->ops->lib->flush_queue))
+               queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
  
        /*
         * The queue flush has failed...
@@@ -959,10 -1040,8 +1040,8 @@@ static void rt2x00queue_reset(struct da
        queue->count = 0;
        queue->length = 0;
  
-       for (i = 0; i < Q_INDEX_MAX; i++) {
+       for (i = 0; i < Q_INDEX_MAX; i++)
                queue->index[i] = 0;
-               queue->last_action[i] = jiffies;
-       }
  
        spin_unlock_irqrestore(&queue->index_lock, irqflags);
  }
@@@ -1069,7 -1148,7 +1148,7 @@@ int rt2x00queue_initialize(struct rt2x0
        if (status)
                goto exit;
  
-       if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
+       if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
                status = rt2x00queue_alloc_entries(rt2x00dev->atim,
                                                   rt2x00dev->ops->atim);
                if (status)
@@@ -1121,7 -1200,7 +1200,7 @@@ int rt2x00queue_allocate(struct rt2x00_
        struct data_queue *queue;
        enum data_queue_qid qid;
        unsigned int req_atim =
-           !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+           !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
  
        /*
         * We need the following queues:
index 5db6a99fce7db34d927dcfeb4cbdb18df067a6be,36f4d03eff61899c604446bd337c75979868647d..167d45873dcab4e35fa757f0ea4c70c5e13a56e7
@@@ -345,8 -345,8 +345,8 @@@ struct txentry_desc 
   *    only be touched after the device has signaled it is done with it.
   * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
   *    for the signal to start sending.
 - * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
 - *    while transfering the data to the hardware. No TX status report will
 + * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
 + *    while transferring the data to the hardware. No TX status report will
   *    be expected from the hardware.
   * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
   *    returned. It is now waiting for the status reporting before the
@@@ -364,15 -364,17 +364,17 @@@ enum queue_entry_flags 
   * struct queue_entry: Entry inside the &struct data_queue
   *
   * @flags: Entry flags, see &enum queue_entry_flags.
+  * @last_action: Timestamp of last change.
   * @queue: The data queue (&struct data_queue) to which this entry belongs.
   * @skb: The buffer which is currently being transmitted (for TX queue),
 - *    or used to directly recieve data in (for RX queue).
 + *    or used to directly receive data in (for RX queue).
   * @entry_idx: The entry index number.
   * @priv_data: Private data belonging to this queue entry. The pointer
   *    points to data specific to a particular driver and queue type.
   */
  struct queue_entry {
        unsigned long flags;
+       unsigned long last_action;
  
        struct data_queue *queue;
  
   * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
   *    owned by the hardware then the queue is considered to be full.
   * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
 - *    transfered to the hardware.
 + *    transferred to the hardware.
   * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
   *    the hardware and for which we need to run the txdone handler. If this
   *    entry is not owned by the hardware the queue is considered to be empty.
@@@ -463,7 -465,6 +465,6 @@@ struct data_queue 
        unsigned short threshold;
        unsigned short length;
        unsigned short index[Q_INDEX_MAX];
-       unsigned long last_action[Q_INDEX_MAX];
  
        unsigned short txop;
        unsigned short aifs;
@@@ -580,16 -581,22 +581,22 @@@ struct data_queue_desc 
   * @queue: Pointer to @data_queue
   * @start: &enum queue_index Pointer to start index
   * @end: &enum queue_index Pointer to end index
+  * @data: Data to pass to the callback function
   * @fn: The function to call for each &struct queue_entry
   *
   * This will walk through all entries in the queue, in chronological
   * order. This means it will start at the current @start pointer
   * and will walk through the queue until it reaches the @end pointer.
+  *
+  * If fn returns true for an entry rt2x00queue_for_each_entry will stop
+  * processing and return true as well.
   */
void rt2x00queue_for_each_entry(struct data_queue *queue,
bool rt2x00queue_for_each_entry(struct data_queue *queue,
                                enum queue_index start,
                                enum queue_index end,
-                               void (*fn)(struct queue_entry *entry));
+                               void *data,
+                               bool (*fn)(struct queue_entry *entry,
+                                          void *data));
  
  /**
   * rt2x00queue_empty - Check if the queue is empty.
@@@ -628,23 -635,25 +635,25 @@@ static inline int rt2x00queue_threshold
  }
  
  /**
 - * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
 + * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports
-  * @queue: Queue to check.
+  * @entry: Queue entry to check.
   */
- static inline int rt2x00queue_status_timeout(struct data_queue *queue)
+ static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
  {
-       return time_after(queue->last_action[Q_INDEX_DMA_DONE],
-                         queue->last_action[Q_INDEX_DONE] + (HZ / 10));
+       if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+               return false;
+       return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
  }
  
  /**
-  * rt2x00queue_timeout - Check if a timeout occurred for DMA transfers
-  * @queue: Queue to check.
 - * rt2x00queuedma__timeout - Check if a timeout occured for DMA transfers
++ * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
+  * @entry: Queue entry to check.
   */
- static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
+ static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
  {
-       return time_after(queue->last_action[Q_INDEX],
-                         queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
+       if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+               return false;
+       return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
  }
  
  /**
index 36f388f97d658e27c40defe1a9e3a79c585d2a15,570184ee163c66fdb8231214dee700f2479288c1..cb208d589ff87d48dd8f9260dcddbf0bfa33ab6d
@@@ -165,6 -165,56 +165,56 @@@ int rt2x00usb_regbusy_read(struct rt2x0
  }
  EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
  
+ struct rt2x00_async_read_data {
+       __le32 reg;
+       struct usb_ctrlrequest cr;
+       struct rt2x00_dev *rt2x00dev;
+       void (*callback)(struct rt2x00_dev *,int,u32);
+ };
+ static void rt2x00usb_register_read_async_cb(struct urb *urb)
+ {
+       struct rt2x00_async_read_data *rd = urb->context;
+       rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg));
+       kfree(urb->context);
+ }
+ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
+                                  const unsigned int offset,
+                                  void (*callback)(struct rt2x00_dev*,int,u32))
+ {
+       struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
+       struct urb *urb;
+       struct rt2x00_async_read_data *rd;
+       rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
+       if (!rd)
+               return;
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               kfree(rd);
+               return;
+       }
+       rd->rt2x00dev = rt2x00dev;
+       rd->callback = callback;
+       rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
+       rd->cr.bRequest = USB_MULTI_READ;
+       rd->cr.wValue = 0;
+       rd->cr.wIndex = cpu_to_le16(offset);
+       rd->cr.wLength = cpu_to_le16(sizeof(u32));
+       usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+                            (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
+                            rt2x00usb_register_read_async_cb, rd);
+       if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+               kfree(rd);
+       usb_free_urb(urb);
+ }
+ EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
  /*
   * TX data handlers.
   */
@@@ -173,7 -223,7 +223,7 @@@ static void rt2x00usb_work_txdone_entry
        /*
         * If the transfer to hardware succeeded, it does not mean the
         * frame was send out correctly. It only means the frame
 -       * was succesfully pushed to the hardware, we have no
 +       * was successfully pushed to the hardware, we have no
         * way to determine the transmission status right now.
         * (Only indirectly by looking at the failed TX counters
         * in the register).
@@@ -212,6 -262,9 +262,9 @@@ static void rt2x00usb_interrupt_txdone(
        if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
                return;
  
+       if (rt2x00dev->ops->lib->tx_dma_done)
+               rt2x00dev->ops->lib->tx_dma_done(entry);
        /*
         * Report the frame as DMA done
         */
         * Schedule the delayed work for reading the TX status
         * from the device.
         */
-       queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+       if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
+           !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+               queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
  }
  
- static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
+ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
  {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  
        if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
            test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
-               return;
+               return true;
  
        /*
         * USB devices cannot blindly pass the skb->len as the
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
                rt2x00lib_dmadone(entry);
        }
+       return false;
  }
  
  /*
@@@ -323,7 -380,7 +380,7 @@@ static void rt2x00usb_interrupt_rxdone(
        queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
  }
  
- static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
+ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data)
  {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  
        if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
            test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
-               return;
+               return true;
  
        rt2x00lib_dmastart(entry);
  
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
                rt2x00lib_dmadone(entry);
        }
+       return false;
  }
  
  void rt2x00usb_kick_queue(struct data_queue *queue)
        case QID_AC_BE:
        case QID_AC_BK:
                if (!rt2x00queue_empty(queue))
-                       rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+                       rt2x00queue_for_each_entry(queue,
+                                                  Q_INDEX_DONE,
+                                                  Q_INDEX,
+                                                  NULL,
                                                   rt2x00usb_kick_tx_entry);
                break;
        case QID_RX:
                if (!rt2x00queue_full(queue))
-                       rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+                       rt2x00queue_for_each_entry(queue,
+                                                  Q_INDEX_DONE,
+                                                  Q_INDEX,
+                                                  NULL,
                                                   rt2x00usb_kick_rx_entry);
                break;
        default:
  }
  EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
  
- static void rt2x00usb_flush_entry(struct queue_entry *entry)
+ static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data)
  {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct queue_entry_priv_usb *entry_priv = entry->priv_data;
        struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
  
        if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
-               return;
+               return true;
  
        usb_kill_urb(entry_priv->urb);
  
         * Kill guardian urb (if required by driver).
         */
        if ((entry->queue->qid == QID_BEACON) &&
-           (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
+           (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
                usb_kill_urb(bcn_priv->guardian_urb);
+       return false;
  }
  
- void rt2x00usb_flush_queue(struct data_queue *queue)
+ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
  {
        struct work_struct *completion;
        unsigned int i;
  
-       rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
-                                  rt2x00usb_flush_entry);
+       if (drop)
+               rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
+                                          rt2x00usb_flush_entry);
  
        /*
         * Obtain the queue completion handler
                return;
        }
  
-       for (i = 0; i < 20; i++) {
+       for (i = 0; i < 10; i++) {
                /*
                 * Check if the driver is already done, otherwise we
                 * have to sleep a little while to give the driver/hw
@@@ -456,15 -524,31 +524,31 @@@ static void rt2x00usb_watchdog_tx_statu
        queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
  }
  
+ static int rt2x00usb_status_timeout(struct data_queue *queue)
+ {
+       struct queue_entry *entry;
+       entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+       return rt2x00queue_status_timeout(entry);
+ }
+ static int rt2x00usb_dma_timeout(struct data_queue *queue)
+ {
+       struct queue_entry *entry;
+       entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
+       return rt2x00queue_dma_timeout(entry);
+ }
  void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
  {
        struct data_queue *queue;
  
        tx_queue_for_each(rt2x00dev, queue) {
                if (!rt2x00queue_empty(queue)) {
-                       if (rt2x00queue_dma_timeout(queue))
+                       if (rt2x00usb_dma_timeout(queue))
                                rt2x00usb_watchdog_tx_dma(queue);
-                       if (rt2x00queue_status_timeout(queue))
+                       if (rt2x00usb_status_timeout(queue))
                                rt2x00usb_watchdog_tx_status(queue);
                }
        }
@@@ -489,7 -573,7 +573,7 @@@ void rt2x00usb_clear_entry(struct queue
        entry->flags = 0;
  
        if (entry->queue->qid == QID_RX)
-               rt2x00usb_kick_rx_entry(entry);
+               rt2x00usb_kick_rx_entry(entry, NULL);
  }
  EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
  
@@@ -583,7 -667,7 +667,7 @@@ static int rt2x00usb_alloc_entries(stru
         * then we are done.
         */
        if (queue->qid != QID_BEACON ||
-           !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
+           !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
                return 0;
  
        for (i = 0; i < queue->limit; i++) {
@@@ -618,7 -702,7 +702,7 @@@ static void rt2x00usb_free_entries(stru
         * then we are done.
         */
        if (queue->qid != QID_BEACON ||
-           !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
+           !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
                return;
  
        for (i = 0; i < queue->limit; i++) {
@@@ -707,10 -791,9 +791,9 @@@ exit
  }
  
  int rt2x00usb_probe(struct usb_interface *usb_intf,
-                   const struct usb_device_id *id)
+                   const struct rt2x00_ops *ops)
  {
        struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
-       struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
        struct ieee80211_hw *hw;
        struct rt2x00_dev *rt2x00dev;
        int retval;
  
        INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
        INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
+       init_timer(&rt2x00dev->txstatus_timer);
  
        retval = rt2x00usb_alloc_reg(rt2x00dev);
        if (retval)
index e11c759ac9ed928ef85e6aa7a00e9aa20abc9b4b,52b09d2e11dead0c9aa3f95143c9e5af533ac8d0..64be34f612f6b10f59ee99a83383f26815260f58
        interface_to_usbdev(intf); \
  })
  
- /*
-  * This variable should be used with the
-  * usb_driver structure initialization.
-  */
- #define USB_DEVICE_DATA(__ops)        .driver_info = (kernel_ulong_t)(__ops)
  /*
   * For USB vendor requests we need to pass a timeout
   * time in ms, for this we use the REGISTER_TIMEOUT,
@@@ -345,6 -339,21 +339,21 @@@ int rt2x00usb_regbusy_read(struct rt2x0
                           const struct rt2x00_field32 field,
                           u32 *reg);
  
+ /**
+  * rt2x00usb_register_read_async - Asynchronously read 32bit register word
+  * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+  * @offset: Register offset
+  * @callback: Functon to call when read completes.
+  *
+  * Submit a control URB to read a 32bit register. This safe to
+  * be called from atomic context.  The callback will be called
+  * when the URB completes. Otherwise the function is similar
+  * to rt2x00usb_register_read().
+  */
+ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
+                                  const unsigned int offset,
+                                  void (*callback)(struct rt2x00_dev*,int,u32));
  /*
   * Radio handlers
   */
@@@ -389,18 -398,20 +398,20 @@@ void rt2x00usb_kick_queue(struct data_q
  /**
   * rt2x00usb_flush_queue - Flush data queue
   * @queue: Data queue to stop
+  * @drop: True to drop all pending frames.
   *
-  * This will walk through all entries of the queue and kill all
-  * URB's which were send to the device.
+  * This will walk through all entries of the queue and will optionally
+  * kill all URB's which were send to the device, or at least wait until
+  * they have been returned from the device..
   */
- void rt2x00usb_flush_queue(struct data_queue *queue);
+ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop);
  
  /**
   * rt2x00usb_watchdog - Watchdog for USB communication
   * @rt2x00dev: Pointer to &struct rt2x00_dev
   *
   * Check the health of the USB communication and determine
 - * if timeouts have occured. If this is the case, this function
 + * if timeouts have occurred. If this is the case, this function
   * will reset all communication to restore functionality again.
   */
  void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev);
@@@ -416,7 -427,7 +427,7 @@@ void rt2x00usb_uninitialize(struct rt2x
   * USB driver handlers.
   */
  int rt2x00usb_probe(struct usb_interface *usb_intf,
-                   const struct usb_device_id *id);
+                   const struct rt2x00_ops *ops);
  void rt2x00usb_disconnect(struct usb_interface *usb_intf);
  #ifdef CONFIG_PM
  int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state);
index 4803f54842e4db9a36fa7e89fa4fb59be07eb006,9477785f11680e12926365e3f0fc6f2a053bfbd9..b259f807ad27b461bd7dccc75ce3b4d9c25001fd
@@@ -251,14 -251,16 +251,16 @@@ void rtl_init_rfkill(struct ieee80211_h
        bool blocked;
        u8 valid = 0;
  
-       radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+       /*set init state to on */
+       rtlpriv->rfkill.rfkill_state = 1;
+       wiphy_rfkill_set_hw_state(hw->wiphy, 0);
  
-       /*set init state to that of switch */
-       rtlpriv->rfkill.rfkill_state = radio_state;
-       printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
-              rtlpriv->rfkill.rfkill_state ? "on" : "off");
+       radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
  
        if (valid) {
+               printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
+                               rtlpriv->rfkill.rfkill_state ? "on" : "off");
                rtlpriv->rfkill.rfkill_state = radio_state;
  
                blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
@@@ -520,7 -522,7 +522,7 @@@ void rtl_get_tcb_desc(struct ieee80211_
                         *because hw will nerver use hw_rate
                         *when tcb_desc->use_driver_rate = false
                         *so we never set highest N rate here,
 -                       *and N rate will all be controled by FW
 +                       *and N rate will all be controlled by FW
                         *when tcb_desc->use_driver_rate = false
                         */
                        if (rtlmac->ht_enable) {
index fbde52d834245dfe18bbe327db176839c57d8ce5,59a150ce3064663dd12e801fc580808d9dca885c..c3dd4cc678ba52f326263768de28e19d7c5a4647
@@@ -374,7 -374,7 +374,7 @@@ static void rtl_pci_init_aspm(struct ie
         * 0 - Disable ASPM,
         * 1 - Enable ASPM without Clock Req,
         * 2 - Enable ASPM with Clock Req,
 -       * 3 - Alwyas Enable ASPM with Clock Req,
 +       * 3 - Always Enable ASPM with Clock Req,
         * 4 - Always Enable ASPM without Clock Req.
         * set defult to RTL8192CE:3 RTL8192E:2
         * */
@@@ -1785,7 -1785,8 +1785,8 @@@ void rtl_pci_disconnect(struct pci_dev 
  
        rtl_pci_deinit(hw);
        rtl_deinit_core(hw);
-       rtlpriv->cfg->ops->deinit_sw_leds(hw);
+       if (rtlpriv->cfg->ops->deinit_sw_leds)
+               rtlpriv->cfg->ops->deinit_sw_leds(hw);
        _rtl_pci_io_handler_release(hw);
        rtlpriv->cfg->ops->deinit_sw_vars(hw);
  
diff --combined include/linux/nl80211.h
index 1832c27c520c4a865aa423195f84f0192b8abe30,be8df57b789d84571d1f1aa60d0f39fa7d310cce..216b1d8a862fcc56999cc3958144908d79c35599
   *    notification. This event is used to indicate that an unprotected
   *    disassociation frame was dropped when MFP is in use.
   *
+  * @NL80211_CMD_NEW_PEER_CANDIDATE: Notification on the reception of a
+  *      beacon or probe response from a compatible mesh peer.  This is only
+  *      sent while no station information (sta_info) exists for the new peer
+  *      candidate and when @NL80211_MESH_SETUP_USERSPACE_AUTH is set.  On
+  *      reception of this notification, userspace may decide to create a new
+  *      station (@NL80211_CMD_NEW_STATION).  To stop this notification from
+  *      reoccurring, the userspace authentication daemon may want to create the
+  *      new station with the AUTHENTICATED flag unset and maybe change it later
+  *      depending on the authentication result.
+  *
   * @NL80211_CMD_MAX: highest used command number
   * @__NL80211_CMD_AFTER_LAST: internal use
   */
  enum nl80211_commands {
 -/* don't change the order or add anything inbetween, this is ABI! */
 +/* don't change the order or add anything between, this is ABI! */
        NL80211_CMD_UNSPEC,
  
        NL80211_CMD_GET_WIPHY,          /* can dump */
        NL80211_CMD_UNPROT_DEAUTHENTICATE,
        NL80211_CMD_UNPROT_DISASSOCIATE,
  
+       NL80211_CMD_NEW_PEER_CANDIDATE,
        /* add new commands above here */
  
        /* used to define NL80211_CMD_MAX below */
  /* source-level API compatibility */
  #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG
  #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG
+ #define NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE NL80211_MESH_SETUP_IE
  
  /**
   * enum nl80211_attrs - nl80211 netlink attributes
   *    This can be used to mask out antennas which are not attached or should
   *    not be used for receiving. If an antenna is not selected in this bitmap
   *    the hardware should not be configured to receive on this antenna.
 - *    For a more detailed descripton see @NL80211_ATTR_WIPHY_ANTENNA_TX.
 + *    For a more detailed description see @NL80211_ATTR_WIPHY_ANTENNA_TX.
   *
   * @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX: Bitmap of antennas which are available
   *    for configuration as TX antennas via the above parameters.
   *    changed once the mesh is active.
   * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute
   *    containing attributes from &enum nl80211_meshconf_params.
+  * @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver
+  *    allows auth frames in a mesh to be passed to userspace for processing via
+  *    the @NL80211_MESH_SETUP_USERSPACE_AUTH flag.
   *
   * @NL80211_ATTR_MAX: highest attribute number currently defined
   * @__NL80211_ATTR_AFTER_LAST: internal use
   */
  enum nl80211_attrs {
 -/* don't change the order or add anything inbetween, this is ABI! */
 +/* don't change the order or add anything between, this is ABI! */
        NL80211_ATTR_UNSPEC,
  
        NL80211_ATTR_WIPHY,
        NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
        NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
  
+       NL80211_ATTR_SUPPORT_MESH_AUTH,
        /* add attributes here, update the policy in nl80211.c */
  
        __NL80211_ATTR_AFTER_LAST,
@@@ -1168,6 -1186,7 +1186,7 @@@ enum nl80211_iftype 
   *    with short barker preamble
   * @NL80211_STA_FLAG_WME: station is WME/QoS capable
   * @NL80211_STA_FLAG_MFP: station uses management frame protection
+  * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
   * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
   * @__NL80211_STA_FLAG_AFTER_LAST: internal use
   */
@@@ -1177,6 -1196,7 +1196,7 @@@ enum nl80211_sta_flags 
        NL80211_STA_FLAG_SHORT_PREAMBLE,
        NL80211_STA_FLAG_WME,
        NL80211_STA_FLAG_MFP,
+       NL80211_STA_FLAG_AUTHENTICATED,
  
        /* keep last */
        __NL80211_STA_FLAG_AFTER_LAST,
@@@ -1277,6 -1297,7 +1297,7 @@@ enum nl80211_sta_bss_param 
   *    attribute, like NL80211_STA_INFO_TX_BITRATE.
   * @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute
   *     containing info as possible, see &enum nl80211_sta_bss_param
+  * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
   * @__NL80211_STA_INFO_AFTER_LAST: internal
   * @NL80211_STA_INFO_MAX: highest possible station info attribute
   */
@@@ -1297,6 -1318,7 +1318,7 @@@ enum nl80211_sta_info 
        NL80211_STA_INFO_SIGNAL_AVG,
        NL80211_STA_INFO_RX_BITRATE,
        NL80211_STA_INFO_BSS_PARAM,
+       NL80211_STA_INFO_CONNECTED_TIME,
  
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
@@@ -1452,7 -1474,7 +1474,7 @@@ enum nl80211_bitrate_attr 
   *    802.11 country information element with regulatory information it
   *    thinks we should consider. cfg80211 only processes the country
   *    code from the IE, and relies on the regulatory domain information
 - *    structure pased by userspace (CRDA) from our wireless-regdb.
 + *    structure passed by userspace (CRDA) from our wireless-regdb.
   *    If a channel is enabled but the country code indicates it should
   *    be disabled we disable the channel and re-enable it upon disassociation.
   */
@@@ -1631,7 -1653,7 +1653,7 @@@ enum nl80211_mntr_flags 
   * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
   * millisecond units, used by the Peer Link Open message
   *
 - * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in
 + * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in
   * millisecond units, used by the peer link management to close a peer link
   *
   * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
@@@ -1719,9 -1741,12 +1741,12 @@@ enum nl80211_meshconf_params 
   * vendor specific path metric or disable it to use the default Airtime
   * metric.
   *
-  * @NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE: A vendor specific information
-  * element that vendors will use to identify the path selection methods and
-  * metrics in use.
+  * @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a
+  * robust security network ie, or a vendor specific information element that
+  * vendors will use to identify the path selection methods and metrics in use.
+  *
+  * @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication
+  * daemon will be authenticating mesh candidates.
   *
   * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
   * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
@@@ -1730,7 -1755,8 +1755,8 @@@ enum nl80211_mesh_setup_params 
        __NL80211_MESH_SETUP_INVALID,
        NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL,
        NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC,
-       NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE,
+       NL80211_MESH_SETUP_IE,
+       NL80211_MESH_SETUP_USERSPACE_AUTH,
  
        /* keep last */
        __NL80211_MESH_SETUP_ATTR_AFTER_LAST,
diff --combined include/net/mac80211.h
index 025d4cc7bbf800c846113496bfec7a375afa07c4,162363b6cb622bcd334872ab19e1d4dca87fd879..d23dd6c1329cf463d5eb5126a97aae94f207bd15
@@@ -1294,7 -1294,7 +1294,7 @@@ ieee80211_get_alt_retry_rate(const stru
   * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key
   * handler.
   * The update_tkip_key() call updates the driver with the new phase 1 key.
 - * This happens everytime the iv16 wraps around (every 65536 packets). The
 + * This happens every time the iv16 wraps around (every 65536 packets). The
   * set_key() call will happen only once for each key (unless the AP did
   * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
   * provided by update_tkip_key only. The trigger that makes mac80211 call this
@@@ -1819,6 -1819,9 +1819,9 @@@ enum ieee80211_ampdu_mlme_action 
   * @set_ringparam: Set tx and rx ring sizes.
   *
   * @get_ringparam: Get tx and rx ring current and maximum sizes.
+  *
+  * @tx_frames_pending: Check if there is any pending frame in the hardware
+  *    queues before entering power save.
   */
  struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
        int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
        void (*get_ringparam)(struct ieee80211_hw *hw,
                              u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
+       bool (*tx_frames_pending)(struct ieee80211_hw *hw);
  };
  
  /**
@@@ -2222,6 -2226,18 +2226,18 @@@ static inline int ieee80211_sta_ps_tran
   */
  #define IEEE80211_TX_STATUS_HEADROOM  13
  
+ /**
+  * ieee80211_sta_set_tim - set the TIM bit for a sleeping station
+  *
+  * If a driver buffers frames for a powersave station instead of passing
+  * them back to mac80211 for retransmission, the station needs to be told
+  * to wake up using the TIM bitmap in the beacon.
+  *
+  * This function sets the station's TIM bit - it will be cleared when the
+  * station wakes up.
+  */
+ void ieee80211_sta_set_tim(struct ieee80211_sta *sta);
  /**
   * ieee80211_tx_status - transmit status callback
   *
index 299fe56a9668e677457d23b691ac38521c61a1fe,473e5973d8fe8eef9ef659f69df575d49e5b341b..47394a178bd5e0198a815e1e310dae7c88305ea6
@@@ -269,7 -269,7 +269,7 @@@ static int l2cap_sock_listen(struct soc
                goto done;
        }
  
-       if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
+       if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->scid) {
                bdaddr_t *src = &bt_sk(sk)->src;
                u16 psm;
  
@@@ -679,7 -679,7 +679,7 @@@ static int l2cap_sock_setsockopt(struc
  
                if (opt == BT_FLUSHABLE_OFF) {
                        struct l2cap_conn *conn = l2cap_pi(sk)->conn;
 -                      /* proceed futher only when we have l2cap_conn and
 +                      /* proceed further only when we have l2cap_conn and
                           No Flush support in the LM */
                        if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
                                err = -EINVAL;
@@@ -757,35 -757,37 +757,37 @@@ static int l2cap_sock_sendmsg(struct ki
        case L2CAP_MODE_ERTM:
        case L2CAP_MODE_STREAMING:
                /* Entire SDU fits into one PDU */
-               if (len <= pi->remote_mps) {
+               if (len <= pi->chan->remote_mps) {
                        control = L2CAP_SDU_UNSEGMENTED;
                        skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
                        if (IS_ERR(skb)) {
                                err = PTR_ERR(skb);
                                goto done;
                        }
-                       __skb_queue_tail(TX_QUEUE(sk), skb);
+                       __skb_queue_tail(&pi->chan->tx_q, skb);
  
-                       if (sk->sk_send_head == NULL)
-                               sk->sk_send_head = skb;
+                       if (pi->chan->tx_send_head == NULL)
+                               pi->chan->tx_send_head = skb;
  
                } else {
                /* Segment SDU into multiples PDUs */
-                       err = l2cap_sar_segment_sdu(sk, msg, len);
+                       err = l2cap_sar_segment_sdu(pi->chan, msg, len);
                        if (err < 0)
                                goto done;
                }
  
                if (pi->mode == L2CAP_MODE_STREAMING) {
-                       l2cap_streaming_send(sk);
-               } else {
-                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
-                                       (pi->conn_state & L2CAP_CONN_WAIT_F)) {
-                               err = len;
-                               break;
-                       }
-                       err = l2cap_ertm_send(sk);
+                       l2cap_streaming_send(pi->chan);
+                       err = len;
+                       break;
+               }
+               if ((pi->chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+                               (pi->chan->conn_state & L2CAP_CONN_WAIT_F)) {
+                       err = len;
+                       break;
                }
+               err = l2cap_ertm_send(pi->chan);
  
                if (err >= 0)
                        err = len;
@@@ -808,29 -810,7 +810,7 @@@ static int l2cap_sock_recvmsg(struct ki
        lock_sock(sk);
  
        if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
-               struct l2cap_conn_rsp rsp;
-               struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-               u8 buf[128];
-               sk->sk_state = BT_CONFIG;
-               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-               rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
-               rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-               l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
-                       release_sock(sk);
-                       return 0;
-               }
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                               l2cap_build_conf_req(sk, buf), buf);
-               l2cap_pi(sk)->num_conf_req++;
+               __l2cap_connect_rsp_defer(sk);
                release_sock(sk);
                return 0;
        }
@@@ -886,6 -866,7 +866,7 @@@ static void l2cap_sock_cleanup_listen(s
  void __l2cap_sock_close(struct sock *sk, int reason)
  {
        struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
  
        BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
  
                                        sk->sk_type == SOCK_STREAM) &&
                                        conn->hcon->type == ACL_LINK) {
                        l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-                       l2cap_send_disconn_req(conn, sk, reason);
+                       l2cap_send_disconn_req(conn, chan, reason);
                } else
-                       l2cap_chan_del(sk, reason);
+                       l2cap_chan_del(chan, reason);
                break;
  
        case BT_CONNECT2:
                        rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
                        rsp.result = cpu_to_le16(result);
                        rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+                       l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+                                                       sizeof(rsp), &rsp);
                }
  
-               l2cap_chan_del(sk, reason);
+               l2cap_chan_del(chan, reason);
                break;
  
        case BT_CONNECT:
        case BT_DISCONN:
-               l2cap_chan_del(sk, reason);
+               l2cap_chan_del(chan, reason);
                break;
  
        default:
@@@ -1035,12 -1016,7 +1016,7 @@@ void l2cap_sock_init(struct sock *sk, s
        }
  
        /* Default config options */
-       pi->conf_len = 0;
        pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-       skb_queue_head_init(TX_QUEUE(sk));
-       skb_queue_head_init(SREJ_QUEUE(sk));
-       skb_queue_head_init(BUSY_QUEUE(sk));
-       INIT_LIST_HEAD(SREJ_LIST(sk));
  }
  
  static struct proto l2cap_proto = {
index 89ce1e329b5d64fe0430e0b4cf744c852ffa295d,8d6d6e3d95da409e6c72f286338411e38d548893..a77849970914eb15634b8d23d8b30f6f75e8a2ef
@@@ -97,7 -97,7 +97,7 @@@ struct ieee80211_bss 
        size_t supp_rates_len;
  
        /*
 -       * During assocation, we save an ERP value from a probe response so
 +       * During association, we save an ERP value from a probe response so
         * that we can feed ERP info to the driver when handling the
         * association completes. these fields probably won't be up-to-date
         * otherwise, you probably don't want to use them.
@@@ -488,8 -488,9 +488,9 @@@ struct ieee80211_if_mesh 
        struct mesh_config mshcfg;
        u32 mesh_seqnum;
        bool accepting_plinks;
-       const u8 *vendor_ie;
-       u8 vendor_ie_len;
+       const u8 *ie;
+       u8 ie_len;
+       bool is_secure;
  };
  
  #ifdef CONFIG_MAC80211_MESH
index 336ca9d0c5c44aac5c7f7eb88b42c18cfefaa5b8,7776ae5a8f15776d32699348d345c749b8d1b928..35c715adaae2031ed9536e6ae257c1023f9c5d89
@@@ -65,42 -65,37 +65,37 @@@ void mesh_table_free(struct mesh_table 
        __mesh_table_free(tbl);
  }
  
- static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
+ static int mesh_table_grow(struct mesh_table *oldtbl,
+               struct mesh_table *newtbl)
  {
-       struct mesh_table *newtbl;
        struct hlist_head *oldhash;
        struct hlist_node *p, *q;
        int i;
  
-       if (atomic_read(&tbl->entries)
-                       < tbl->mean_chain_len * (tbl->hash_mask + 1))
-               goto endgrow;
+       if (atomic_read(&oldtbl->entries)
+                       < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
+               return -EAGAIN;
  
-       newtbl = mesh_table_alloc(tbl->size_order + 1);
-       if (!newtbl)
-               goto endgrow;
  
-       newtbl->free_node = tbl->free_node;
-       newtbl->mean_chain_len = tbl->mean_chain_len;
-       newtbl->copy_node = tbl->copy_node;
-       atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
+       newtbl->free_node = oldtbl->free_node;
+       newtbl->mean_chain_len = oldtbl->mean_chain_len;
+       newtbl->copy_node = oldtbl->copy_node;
+       atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  
-       oldhash = tbl->hash_buckets;
-       for (i = 0; i <= tbl->hash_mask; i++)
+       oldhash = oldtbl->hash_buckets;
+       for (i = 0; i <= oldtbl->hash_mask; i++)
                hlist_for_each(p, &oldhash[i])
-                       if (tbl->copy_node(p, newtbl) < 0)
+                       if (oldtbl->copy_node(p, newtbl) < 0)
                                goto errcopy;
  
-       return newtbl;
+       return 0;
  
  errcopy:
        for (i = 0; i <= newtbl->hash_mask; i++) {
                hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
-                       tbl->free_node(p, 0);
+                       oldtbl->free_node(p, 0);
        }
-       __mesh_table_free(newtbl);
- endgrow:
-       return NULL;
+       return -ENOMEM;
  }
  
  
@@@ -334,10 -329,13 +329,13 @@@ void mesh_mpath_table_grow(void
  {
        struct mesh_table *oldtbl, *newtbl;
  
+       newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
+       if (!newtbl)
+               return;
        write_lock(&pathtbl_resize_lock);
        oldtbl = mesh_paths;
-       newtbl = mesh_table_grow(mesh_paths);
-       if (!newtbl) {
+       if (mesh_table_grow(mesh_paths, newtbl) < 0) {
+               __mesh_table_free(newtbl);
                write_unlock(&pathtbl_resize_lock);
                return;
        }
@@@ -352,10 -350,13 +350,13 @@@ void mesh_mpp_table_grow(void
  {
        struct mesh_table *oldtbl, *newtbl;
  
+       newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
+       if (!newtbl)
+               return;
        write_lock(&pathtbl_resize_lock);
        oldtbl = mpp_paths;
-       newtbl = mesh_table_grow(mpp_paths);
-       if (!newtbl) {
+       if (mesh_table_grow(mpp_paths, newtbl) < 0) {
+               __mesh_table_free(newtbl);
                write_unlock(&pathtbl_resize_lock);
                return;
        }
@@@ -628,7 -629,7 +629,7 @@@ void mesh_path_discard_frame(struct sk_
   *
   * @mpath: mesh path whose queue has to be freed
   *
 - * Locking: the function must me called withing a rcu_read_lock region
 + * Locking: the function must me called within a rcu_read_lock region
   */
  void mesh_path_flush_pending(struct mesh_path *mpath)
  {
diff --combined net/mac80211/rx.c
index 1f0b010904b85b66db447d8dacd6173fb95c0308,1f06b31e21c15f1ebb5bf5836cc5107899b19639..a864890e4d03753c3c50f7a4e8035ea8bdb035d4
@@@ -143,7 -143,8 +143,8 @@@ ieee80211_add_rx_radiotap_header(struc
        if (status->flag & RX_FLAG_HT) {
                /*
                 * MCS information is a separate field in radiotap,
-                * added below.
+                * added below. The byte here is needed as padding
+                * for the channel though, so initialise it to 0.
                 */
                *pos = 0;
        } else {
@@@ -381,7 -382,7 +382,7 @@@ static void ieee80211_parse_qos(struct 
   * specs were sane enough this time around to require padding each A-MSDU
   * subframe to a length that is a multiple of four.
   *
 - * Padding like Atheros hardware adds which is inbetween the 802.11 header and
 + * Padding like Atheros hardware adds which is between the 802.11 header and
   * the payload is not supported, the driver is required to move the 802.11
   * header to be directly in front of the payload in that case.
   */
@@@ -502,7 -503,8 +503,8 @@@ ieee80211_rx_mesh_check(struct ieee8021
  
                if (ieee80211_is_probe_req(hdr->frame_control) ||
                    ieee80211_is_probe_resp(hdr->frame_control) ||
-                   ieee80211_is_beacon(hdr->frame_control))
+                   ieee80211_is_beacon(hdr->frame_control) ||
+                   ieee80211_is_auth(hdr->frame_control))
                        return RX_CONTINUE;
  
                return RX_DROP_MONITOR;
@@@ -1585,7 -1587,7 +1587,7 @@@ ieee80211_drop_unencrypted_mgmt(struct 
  }
  
  static int
- __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
+ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
  {
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        struct ethhdr *ehdr;
        int ret;
  
+       *port_control = false;
        if (ieee80211_has_a4(hdr->frame_control) &&
            sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
                return -1;
                return -1;
  
        ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
-       if (ret < 0 || !check_port_control)
+       if (ret < 0)
                return ret;
  
        ehdr = (struct ethhdr *) rx->skb->data;
-       if (ehdr->h_proto != rx->sdata->control_port_protocol)
+       if (ehdr->h_proto == rx->sdata->control_port_protocol)
+               *port_control = true;
+       else if (check_port_control)
                return -1;
  
        return 0;
@@@ -1916,6 -1921,7 +1921,7 @@@ ieee80211_rx_h_data(struct ieee80211_rx
        struct net_device *dev = sdata->dev;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        __le16 fc = hdr->frame_control;
+       bool port_control;
        int err;
  
        if (unlikely(!ieee80211_is_data(hdr->frame_control)))
            sdata->vif.type == NL80211_IFTYPE_AP)
                return RX_DROP_MONITOR;
  
-       err = __ieee80211_data_to_8023(rx);
+       err = __ieee80211_data_to_8023(rx, &port_control);
        if (unlikely(err))
                return RX_DROP_UNUSABLE;
  
        if (!ieee80211_frame_allowed(rx, fc))
                return RX_DROP_MONITOR;
  
+       if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+           unlikely(port_control) && sdata->bss) {
+               sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+                                    u.ap);
+               dev = sdata->dev;
+               rx->sdata = sdata;
+       }
        rx->skb->dev = dev;
  
        dev->stats.rx_packets++;
diff --combined net/mac80211/sta_info.c
index 52d4b1a695c93d5078ac15e06f604ad107908d61,7c5c6da01beaee000277f8b81ef53afe74a47312..a03d8a31287550d85e96467bc385e3eb291c35be
@@@ -47,9 -47,9 +47,9 @@@
   * Station entries are added by mac80211 when you establish a link with a
   * peer. This means different things for the different type of interfaces
   * we support. For a regular station this mean we add the AP sta when we
 - * receive an assocation response from the AP. For IBSS this occurs when
 + * receive an association response from the AP. For IBSS this occurs when
   * get to know about a peer on the same IBSS. For WDS we add the sta for
 - * the peer imediately upon device open. When using AP mode we add stations
 + * the peer immediately upon device open. When using AP mode we add stations
   * for each respective station upon request from userspace through nl80211.
   *
   * In order to remove a STA info structure, various sta_info_destroy_*()
@@@ -228,6 -228,7 +228,7 @@@ struct sta_info *sta_info_alloc(struct 
  {
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
+       struct timespec uptime;
        int i;
  
        sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
        sta->sdata = sdata;
        sta->last_rx = jiffies;
  
+       do_posix_clock_monotonic_gettime(&uptime);
+       sta->last_connected = uptime.tv_sec;
        ewma_init(&sta->avg_signal, 1024, 8);
  
        if (sta_prepare_rate_control(local, sta, gfp)) {
@@@ -609,7 -612,8 +612,8 @@@ static bool sta_info_cleanup_expire_buf
  #endif
                dev_kfree_skb(skb);
  
-               if (skb_queue_empty(&sta->ps_tx_buf))
+               if (skb_queue_empty(&sta->ps_tx_buf) &&
+                   !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
                        sta_info_clear_tim_bit(sta);
        }
  
@@@ -893,6 -897,7 +897,7 @@@ void ieee80211_sta_ps_deliver_wakeup(st
        struct ieee80211_local *local = sdata->local;
        int sent, buffered;
  
+       clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
        if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
                drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
  
@@@ -985,3 -990,12 +990,12 @@@ void ieee80211_sta_block_awake(struct i
                ieee80211_queue_work(hw, &sta->drv_unblock_wk);
  }
  EXPORT_SYMBOL(ieee80211_sta_block_awake);
+ void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
+ {
+       struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+       set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
+       sta_info_set_tim_bit(sta);
+ }
+ EXPORT_SYMBOL(ieee80211_sta_set_tim);
diff --combined net/mac80211/sta_info.h
index 87b18ba1e0e92483345f0e32c31a674829ef9eee,af1a7f8c86757174c37a138b99916ee644825135..aa0adcbf3a938f50729c7cd969ad5358eb5e24c7
@@@ -43,6 -43,8 +43,8 @@@
   *    be in the queues
   * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
   *    station in power-save mode, reply when the driver unblocks.
+  * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
+  *    buffers. Automatically cleared on station wake-up.
   */
  enum ieee80211_sta_info_flags {
        WLAN_STA_AUTH           = 1<<0,
@@@ -58,6 -60,7 +60,7 @@@
        WLAN_STA_BLOCK_BA       = 1<<11,
        WLAN_STA_PS_DRIVER      = 1<<12,
        WLAN_STA_PSPOLL         = 1<<13,
+       WLAN_STA_PS_DRIVER_BUF  = 1<<14,
  };
  
  #define STA_TID_NUM 16
@@@ -173,7 -176,7 +176,7 @@@ struct sta_ampdu_mlme 
  /**
   * enum plink_state - state of a mesh peer link finite state machine
   *
 - * @PLINK_LISTEN: initial state, considered the implicit state of non existant
 + * @PLINK_LISTEN: initial state, considered the implicit state of non existent
   *    mesh peer links
   * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
   * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
@@@ -226,6 -229,7 +229,7 @@@ enum plink_state 
   * @rx_bytes: Number of bytes received from this STA
   * @wep_weak_iv_count: number of weak WEP IVs received from this station
   * @last_rx: time (in jiffies) when last frame was received from this STA
+  * @last_connected: time (in seconds) when a station got connected
   * @num_duplicates: number of duplicate frames received from this STA
   * @rx_fragments: number of received MPDUs
   * @rx_dropped: number of dropped MPDUs from this STA
@@@ -295,6 -299,7 +299,7 @@@ struct sta_info 
        unsigned long rx_packets, rx_bytes;
        unsigned long wep_weak_iv_count;
        unsigned long last_rx;
+       long last_connected;
        unsigned long num_duplicates;
        unsigned long rx_fragments;
        unsigned long rx_dropped;