X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;f=drivers%2Fnet%2Ftg3.c;h=53896541f7d2ba5e681832ca7573ff70afb93d38;hb=89aeb3bceaa1a02651206a76a7b9dcb8f3884702;hp=9ae33208358517b37fcc2268ea22c80f16c33ece;hpb=5e7ccf2003e6a9c35b5aa24953ba5009a1a8b653;p=~andy%2Flinux diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 9ae33208358..53896541f7d 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.99" -#define DRV_MODULE_RELDATE "April 20, 2009" +#define DRV_MODULE_VERSION "3.101" +#define DRV_MODULE_RELDATE "August 28, 2009" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -92,7 +92,7 @@ /* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU 60 #define TG3_MAX_MTU(tp) \ - ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) + ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) /* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place @@ -117,19 +117,26 @@ #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ TG3_RX_RING_SIZE) -#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_JUMBO_RING_SIZE) +#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ + TG3_RX_JUMBO_RING_SIZE) #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_RCB_RING_SIZE(tp)) + TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) -#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) -#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) +#define TG3_DMA_BYTE_ENAB 64 + +#define TG3_RX_STD_DMA_SZ 1536 +#define TG3_RX_JMB_DMA_SZ 9046 + +#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + +#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) +#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) /* minimum number of free TX descriptors required to wake up TX process */ -#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) +#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) #define TG3_RAW_IP_ALIGN 2 @@ -153,6 +160,7 @@ MODULE_FIRMWARE(FIRMWARE_TG3); MODULE_FIRMWARE(FIRMWARE_TG3TSO); MODULE_FIRMWARE(FIRMWARE_TG3TSO5); +#define TG3_RSS_MIN_NUM_MSIX_VECS 2 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ module_param(tg3_debug, int, 0); @@ -219,7 +227,8 @@ static struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, @@ -605,39 +614,47 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) static void tg3_disable_ints(struct tg3 *tp) { + int i; + tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); -} - -static inline void tg3_cond_int(struct tg3 *tp) -{ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - (tp->hw_status->status & SD_STATUS_UPDATED)) - tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); - else - tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); } static void tg3_enable_ints(struct tg3 *tp) { + int i; + u32 coal_now = 0; + tp->irq_sync = 0; wmb(); tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - (tp->last_tag << 24)); - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - (tp->last_tag << 24)); - tg3_cond_int(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | coal_now); } -static inline unsigned int tg3_has_work(struct tg3 *tp) +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) { - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int work_exists = 0; /* check for phy events */ @@ -648,22 +665,23 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) work_exists = 1; } /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer != tp->tx_cons || - sblk->idx[0].rx_producer != tp->rx_rcb_ptr) + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr) work_exists = 1; return work_exists; } -/* tg3_restart_ints +/* tg3_int_reenable * similar to tg3_enable_ints, but it accurately determines whether there * is new work pending and can return without flushing the PIO write * which reenables interrupts */ -static void tg3_restart_ints(struct tg3 *tp) +static void tg3_int_reenable(struct tg3_napi *tnapi) { - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - tp->last_tag << 24); + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); mmiowb(); /* When doing tagged status, this work check is unnecessary. @@ -671,27 +689,28 @@ static void tg3_restart_ints(struct tg3 *tp) * work we've completed. */ if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - tg3_has_work(tp)) + tg3_has_work(tnapi)) tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + HOSTCC_MODE_ENABLE | tnapi->coal_now); } static inline void tg3_netif_stop(struct tg3 *tp) { tp->dev->trans_start = jiffies; /* prevent tx timeout */ - napi_disable(&tp->napi); + napi_disable(&tp->napi[0].napi); netif_tx_disable(tp->dev); } static inline void tg3_netif_start(struct tg3 *tp) { + struct tg3_napi *tnapi = &tp->napi[0]; netif_wake_queue(tp->dev); /* NOTE: unconditional netif_wake_queue is only appropriate * so long as all callers are assured to have free tx slots * (such as after tg3_init_hw) */ - napi_enable(&tp->napi); - tp->hw_status->status |= SD_STATUS_UPDATED; + napi_enable(&tnapi->napi); + tnapi->hw_status->status |= SD_STATUS_UPDATED; tg3_enable_ints(tp); } @@ -784,7 +803,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) unsigned int loops; int ret; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) return 0; @@ -917,7 +936,9 @@ static void tg3_mdio_config_5785(struct tg3 *tp) tw32(MAC_PHYCFG2, val); val = tr32(MAC_PHYCFG1); - val &= ~MAC_PHYCFG1_RGMII_INT; + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; tw32(MAC_PHYCFG1, val); return; @@ -933,15 +954,18 @@ static void tg3_mdio_config_5785(struct tg3 *tp) tw32(MAC_PHYCFG2, val); - val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC | - MAC_PHYCFG1_RGMII_SND_STAT_EN); - if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) { + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; } - tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); val = tr32(MAC_EXT_RGMII_MODE); val &= ~(MAC_RGMII_MODE_RX_INT_B | @@ -1064,6 +1088,7 @@ static int tg3_mdio_init(struct tg3 *tp) case TG3_PHY_ID_RTL8201E: case TG3_PHY_ID_BCMAC131: phydev->interface = PHY_INTERFACE_MODE_MII; + tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; break; } @@ -1469,14 +1494,38 @@ static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); } +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } +} + static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) { u32 reg; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) return; + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + reg = MII_TG3_MISC_SHDW_WREN | MII_TG3_MISC_SHDW_SCR5_SEL | MII_TG3_MISC_SHDW_SCR5_LPED | @@ -1506,20 +1555,22 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) return; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { u32 ephy; - if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { - tg3_writephy(tp, MII_TG3_EPHY_TEST, - ephy | MII_TG3_EPHY_SHADOW_EN); - if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { if (enable) - phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; else - phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; - tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); } - tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); } } else { phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | @@ -1888,7 +1939,7 @@ out: if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); - } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { + } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { u32 phy_reg; /* Set bit 14 with read-modify-write to preserve other bits */ @@ -1900,7 +1951,7 @@ out: /* Set phy register 0x10 bit 0 to high fifo elasticity to support * jumbo frames transmission. */ - if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { u32 phy_reg; if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) @@ -1910,7 +1961,7 @@ out: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* adjust output voltage */ - tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } tg3_phy_toggle_automdix(tp, 1); @@ -2655,7 +2706,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 break; default: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : SPEED_10; *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : @@ -2990,7 +3041,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) + else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -3100,7 +3151,9 @@ relink: tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else + } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; @@ -3857,9 +3910,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); - tp->hw_status->status = + tp->napi[0].hw_status->status = (SD_STATUS_UPDATED | - (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); for (i = 0; i < 100; i++) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | @@ -4225,24 +4278,25 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } -static inline u32 tg3_tx_avail(struct tg3 *tp) +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { smp_mb(); - return (tp->tx_pending - - ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. */ -static void tg3_tx(struct tg3 *tp) +static void tg3_tx(struct tg3_napi *tnapi) { - u32 hw_idx = tp->hw_status->idx[0].tx_consumer; - u32 sw_idx = tp->tx_cons; + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; while (sw_idx != hw_idx) { - struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; + struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -4258,7 +4312,7 @@ static void tg3_tx(struct tg3 *tp) sw_idx = NEXT_TX(sw_idx); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - ri = &tp->tx_buffers[sw_idx]; + ri = &tnapi->tx_buffers[sw_idx]; if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1; sw_idx = NEXT_TX(sw_idx); @@ -4272,7 +4326,7 @@ static void tg3_tx(struct tg3 *tp) } } - tp->tx_cons = sw_idx; + tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() * before checking for netif_queue_stopped(). Without the @@ -4282,10 +4336,10 @@ static void tg3_tx(struct tg3 *tp) smp_mb(); if (unlikely(netif_queue_stopped(tp->dev) && - (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) netif_wake_queue(tp->dev); netif_tx_unlock(tp->dev); } @@ -4302,33 +4356,35 @@ static void tg3_tx(struct tg3 *tp) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, +static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, int src_idx, u32 dest_idx_unmasked) { + struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *desc; struct ring_info *map, *src_map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; src_map = NULL; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - desc = &tp->rx_std[dest_idx]; - map = &tp->rx_std_buffers[dest_idx]; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; if (src_idx >= 0) - src_map = &tp->rx_std_buffers[src_idx]; - skb_size = tp->rx_pkt_buf_sz; + src_map = &tpr->rx_std_buffers[src_idx]; + skb_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - desc = &tp->rx_jumbo[dest_idx]; - map = &tp->rx_jumbo_buffers[dest_idx]; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; if (src_idx >= 0) - src_map = &tp->rx_jumbo_buffers[src_idx]; - skb_size = RX_JUMBO_PKT_BUF_SZ; + src_map = &tpr->rx_jmb_buffers[src_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; break; default: @@ -4341,14 +4397,13 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size); + skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); if (skb == NULL) return -ENOMEM; skb_reserve(skb, tp->rx_offset); - mapping = pci_map_single(tp->pdev, skb->data, - skb_size - tp->rx_offset, + mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); map->skb = skb; @@ -4367,28 +4422,30 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * members of the RX descriptor are invariant. See notes above * tg3_alloc_rx_skb for full details. */ -static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, +static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, int src_idx, u32 dest_idx_unmasked) { + struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; int dest_idx; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - dest_desc = &tp->rx_std[dest_idx]; - dest_map = &tp->rx_std_buffers[dest_idx]; - src_desc = &tp->rx_std[src_idx]; - src_map = &tp->rx_std_buffers[src_idx]; + dest_desc = &tpr->rx_std[dest_idx]; + dest_map = &tpr->rx_std_buffers[dest_idx]; + src_desc = &tpr->rx_std[src_idx]; + src_map = &tpr->rx_std_buffers[src_idx]; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - dest_desc = &tp->rx_jumbo[dest_idx]; - dest_map = &tp->rx_jumbo_buffers[dest_idx]; - src_desc = &tp->rx_jumbo[src_idx]; - src_map = &tp->rx_jumbo_buffers[src_idx]; + dest_desc = &tpr->rx_jmb[dest_idx].std; + dest_map = &tpr->rx_jmb_buffers[dest_idx]; + src_desc = &tpr->rx_jmb[src_idx].std; + src_map = &tpr->rx_jmb_buffers[src_idx]; break; default: @@ -4404,13 +4461,6 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, src_map->skb = NULL; } -#if TG3_VLAN_TAG_USED -static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) -{ - return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb); -} -#endif - /* The RX ring scheme is composed of multiple rings which post fresh * buffers to the chip, and one special ring the chip uses to report * status back to the host. @@ -4435,14 +4485,16 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) * If both the host and chip were to write into the same ring, cache line * eviction could occur since both entities want it in an exclusive state. */ -static int tg3_rx(struct tg3 *tp, int budget) +static int tg3_rx(struct tg3_napi *tnapi, int budget) { + struct tg3 *tp = tnapi->tp; u32 work_mask, rx_std_posted = 0; - u32 sw_idx = tp->rx_rcb_ptr; + u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; - hw_idx = tp->hw_status->idx[0].rx_producer; + hw_idx = tnapi->hw_status->idx[0].rx_producer; /* * We need to order the read of hw_idx and the read of * the opaque cookie. @@ -4451,7 +4503,7 @@ static int tg3_rx(struct tg3 *tp, int budget) work_mask = 0; received = 0; while (sw_idx != hw_idx && budget > 0) { - struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; unsigned int len; struct sk_buff *skb; dma_addr_t dma_addr; @@ -4460,27 +4512,25 @@ static int tg3_rx(struct tg3 *tp, int budget) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], - mapping); - skb = tp->rx_std_buffers[desc_idx].skb; - post_ptr = &tp->rx_std_ptr; + struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; + dma_addr = pci_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &tpr->rx_std_ptr; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], - mapping); - skb = tp->rx_jumbo_buffers[desc_idx].skb; - post_ptr = &tp->rx_jumbo_ptr; - } - else { + struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; + dma_addr = pci_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &tpr->rx_jmb_ptr; + } else goto next_pkt_nopost; - } work_mask |= opaque_key; if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { drop_it: - tg3_recycle_rx(tp, opaque_key, + tg3_recycle_rx(tnapi, opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ @@ -4500,20 +4550,19 @@ static int tg3_rx(struct tg3 *tp, int budget) ) { int skb_size; - skb_size = tg3_alloc_rx_skb(tp, opaque_key, + skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, desc_idx, *post_ptr); if (skb_size < 0) goto drop_it; - pci_unmap_single(tp->pdev, dma_addr, - skb_size - tp->rx_offset, + pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); skb_put(skb, len); } else { struct sk_buff *copy_skb; - tg3_recycle_rx(tp, opaque_key, + tg3_recycle_rx(tnapi, opaque_key, desc_idx, *post_ptr); copy_skb = netdev_alloc_skb(tp->dev, @@ -4550,11 +4599,11 @@ static int tg3_rx(struct tg3 *tp, int budget) #if TG3_VLAN_TAG_USED if (tp->vlgrp != NULL && desc->type_flags & RXD_FLAG_VLAN) { - tg3_vlan_rx(tp, skb, - desc->err_vlan & RXD_VLAN_MASK); + vlan_gro_receive(&tnapi->napi, tp->vlgrp, + desc->err_vlan & RXD_VLAN_MASK, skb); } else #endif - napi_gro_receive(&tp->napi, skb); + napi_gro_receive(&tnapi->napi, skb); received++; budget--; @@ -4576,23 +4625,23 @@ next_pkt_nopost: /* Refresh hw_idx to see if there is new work */ if (sw_idx == hw_idx) { - hw_idx = tp->hw_status->idx[0].rx_producer; + hw_idx = tnapi->hw_status->idx[0].rx_producer; rmb(); } } /* ACK the status ring. */ - tp->rx_rcb_ptr = sw_idx; - tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ if (work_mask & RXD_OPAQUE_RING_STD) { - sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; + sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, sw_idx); } if (work_mask & RXD_OPAQUE_RING_JUMBO) { - sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; + sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, sw_idx); } @@ -4601,9 +4650,10 @@ next_pkt_nopost: return received; } -static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) { - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; /* handle link change and other phy events */ if (!(tp->tg3_flags & @@ -4627,8 +4677,8 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) } /* run TX completion thread */ - if (sblk->idx[0].tx_consumer != tp->tx_cons) { - tg3_tx(tp); + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) return work_done; } @@ -4637,20 +4687,21 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) * All RX "locking" is done by ensuring outside * code synchronizes with tg3->napi.poll() */ - if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) - work_done += tg3_rx(tp, budget - work_done); + if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); return work_done; } static int tg3_poll(struct napi_struct *napi, int budget) { - struct tg3 *tp = container_of(napi, struct tg3, napi); + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; int work_done = 0; - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { - work_done = tg3_poll_work(tp, work_done, budget); + work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) goto tx_recovery; @@ -4659,19 +4710,19 @@ static int tg3_poll(struct napi_struct *napi, int budget) break; if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { - /* tp->last_tag is used in tg3_restart_ints() below + /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. */ - tp->last_tag = sblk->status_tag; - tp->last_irq_tag = tp->last_tag; + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; rmb(); } else sblk->status &= ~SD_STATUS_UPDATED; - if (likely(!tg3_has_work(tp))) { + if (likely(!tg3_has_work(tnapi))) { napi_complete(napi); - tg3_restart_ints(tp); + tg3_int_reenable(tnapi); break; } } @@ -4687,12 +4738,15 @@ tx_recovery: static void tg3_irq_quiesce(struct tg3 *tp) { + int i; + BUG_ON(tp->irq_sync); tp->irq_sync = 1; smp_mb(); - synchronize_irq(tp->pdev->irq); + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); } static inline int tg3_irq_sync(struct tg3 *tp) @@ -4722,14 +4776,14 @@ static inline void tg3_full_unlock(struct tg3 *tp) */ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - prefetch(tp->hw_status); - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + prefetch(tnapi->hw_status); + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tp->napi); + napi_schedule(&tnapi->napi); return IRQ_HANDLED; } @@ -4740,11 +4794,11 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) */ static irqreturn_t tg3_msi(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - prefetch(tp->hw_status); - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + prefetch(tnapi->hw_status); + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); /* * Writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. @@ -4754,16 +4808,16 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tp->napi); + napi_schedule(&tnapi->napi); return IRQ_RETVAL(1); } static irqreturn_t tg3_interrupt(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at @@ -4794,9 +4848,9 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) if (tg3_irq_sync(tp)) goto out; sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tp))) { - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - napi_schedule(&tp->napi); + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write @@ -4810,9 +4864,9 @@ out: static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at @@ -4820,7 +4874,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * Reading the PCI State register will confirm whether the * interrupt is ours and will flush the status block. */ - if (unlikely(sblk->status_tag == tp->last_irq_tag)) { + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; @@ -4847,14 +4901,14 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * so that the above check can report that the screaming interrupts * are unhandled. Eventually they will be silenced. */ - tp->last_irq_tag = sblk->status_tag; + tnapi->last_irq_tag = sblk->status_tag; if (tg3_irq_sync(tp)) goto out; - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - napi_schedule(&tp->napi); + napi_schedule(&tnapi->napi); out: return IRQ_RETVAL(handled); @@ -4863,9 +4917,9 @@ out: /* ISR for interrupt test */ static irqreturn_t tg3_test_isr(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; if ((sblk->status & SD_STATUS_UPDATED) || !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { @@ -4895,7 +4949,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) tg3_full_unlock(tp); del_timer_sync(&tp->timer); tp->irq_sync = 0; - napi_enable(&tp->napi); + napi_enable(&tp->napi[0].napi); dev_close(tp->dev); tg3_full_lock(tp, 0); } @@ -4905,9 +4959,11 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) { + int i; struct tg3 *tp = netdev_priv(dev); - tg3_interrupt(tp->pdev->irq, dev); + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, dev); } #endif @@ -5002,13 +5058,14 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, #endif } -static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); +static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, u32 last_plus_one, u32 *start, u32 base_flags, u32 mss) { + struct tg3_napi *tnapi = &tp->napi[0]; struct sk_buff *new_skb; dma_addr_t new_addr = 0; u32 entry = *start; @@ -5043,7 +5100,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, dev_kfree_skb(new_skb); new_skb = NULL; } else { - tg3_set_txd(tp, entry, new_addr, new_skb->len, + tg3_set_txd(tnapi, entry, new_addr, new_skb->len, base_flags, 1 | (mss << 1)); *start = NEXT_TX(entry); } @@ -5052,11 +5109,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, /* Now clean up the sw ring entries. */ i = 0; while (entry != last_plus_one) { - if (i == 0) { - tp->tx_buffers[entry].skb = new_skb; - } else { - tp->tx_buffers[entry].skb = NULL; - } + if (i == 0) + tnapi->tx_buffers[entry].skb = new_skb; + else + tnapi->tx_buffers[entry].skb = NULL; entry = NEXT_TX(entry); i++; } @@ -5067,11 +5123,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, return ret; } -static void tg3_set_txd(struct tg3 *tp, int entry, +static void tg3_set_txd(struct tg3_napi *tnapi, int entry, dma_addr_t mapping, int len, u32 flags, u32 mss_and_is_end) { - struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; + struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; int is_end = (mss_and_is_end & 0x1); u32 mss = (mss_and_is_end >> 1); u32 vlan_tag = 0; @@ -5093,12 +5149,14 @@ static void tg3_set_txd(struct tg3 *tp, int entry, /* hard_start_xmit for devices that don't have any bugs and * support TG3_FLG2_HW_TSO_2 only. */ -static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, + struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss; struct skb_shared_info *sp; dma_addr_t mapping; + struct tg3_napi *tnapi = &tp->napi[0]; len = skb_headlen(skb); @@ -5107,7 +5165,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -5118,7 +5176,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - entry = tp->tx_prod; + entry = tnapi->tx_prod; base_flags = 0; mss = 0; if ((mss = skb_shinfo(skb)->gso_size) != 0) { @@ -5166,9 +5224,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = sp->dma_head; - tp->tx_buffers[entry].skb = skb; + tnapi->tx_buffers[entry].skb = skb; - tg3_set_txd(tp, entry, mapping, len, base_flags, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); entry = NEXT_TX(entry); @@ -5183,9 +5241,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = frag->size; mapping = sp->dma_maps[i]; - tp->tx_buffers[entry].skb = NULL; + tnapi->tx_buffers[entry].skb = NULL; - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last) | (mss << 1)); entry = NEXT_TX(entry); @@ -5193,12 +5251,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); + tw32_tx_mbox(tnapi->prodmbox, entry); - tp->tx_prod = entry; - if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_wake_queue(tp->dev); } @@ -5208,7 +5266,8 @@ out_unlock: return NETDEV_TX_OK; } -static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); +static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, + struct net_device *); /* Use GSO to workaround a rare TSO bug that may be triggered when the * TSO header is greater than 80 bytes. @@ -5216,11 +5275,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) { struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { netif_stop_queue(tp->dev); - if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) return NETDEV_TX_BUSY; netif_wake_queue(tp->dev); @@ -5246,13 +5306,15 @@ tg3_tso_bug_end: /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and * support TG3_FLG2_HW_TSO_1 or firmware TSO only. */ -static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, + struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss; struct skb_shared_info *sp; int would_hit_hwbug; dma_addr_t mapping; + struct tg3_napi *tnapi = &tp->napi[0]; len = skb_headlen(skb); @@ -5261,7 +5323,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -5272,7 +5334,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - entry = tp->tx_prod; + entry = tnapi->tx_prod; base_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; @@ -5342,7 +5404,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) mapping = sp->dma_head; - tp->tx_buffers[entry].skb = skb; + tnapi->tx_buffers[entry].skb = skb; would_hit_hwbug = 0; @@ -5351,7 +5413,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) else if (tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; - tg3_set_txd(tp, entry, mapping, len, base_flags, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); entry = NEXT_TX(entry); @@ -5367,7 +5429,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) len = frag->size; mapping = sp->dma_maps[i]; - tp->tx_buffers[entry].skb = NULL; + tnapi->tx_buffers[entry].skb = NULL; if (tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; @@ -5376,10 +5438,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) would_hit_hwbug = 1; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last)|(mss << 1)); else - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last)); entry = NEXT_TX(entry); @@ -5404,12 +5466,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); + tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); - tp->tx_prod = entry; - if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_wake_queue(tp->dev); } @@ -5477,63 +5539,40 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) return err; } -/* Free up pending packets in all rx/tx rings. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock is not held and we are not - * in an interrupt context and thus may sleep. - */ -static void tg3_free_rings(struct tg3 *tp) +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - struct ring_info *rxp; int i; + struct ring_info *rxp; for (i = 0; i < TG3_RX_RING_SIZE; i++) { - rxp = &tp->rx_std_buffers[i]; + rxp = &tpr->rx_std_buffers[i]; if (rxp->skb == NULL) continue; - pci_unmap_single(tp->pdev, - pci_unmap_addr(rxp, mapping), - tp->rx_pkt_buf_sz - tp->rx_offset, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(rxp->skb); - rxp->skb = NULL; - } - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { - rxp = &tp->rx_jumbo_buffers[i]; - - if (rxp->skb == NULL) - continue; pci_unmap_single(tp->pdev, pci_unmap_addr(rxp, mapping), - RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, + tp->rx_pkt_map_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(rxp->skb); rxp->skb = NULL; } - for (i = 0; i < TG3_TX_RING_SIZE; ) { - struct tx_ring_info *txp; - struct sk_buff *skb; + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { + rxp = &tpr->rx_jmb_buffers[i]; - txp = &tp->tx_buffers[i]; - skb = txp->skb; + if (rxp->skb == NULL) + continue; - if (skb == NULL) { - i++; - continue; + pci_unmap_single(tp->pdev, + pci_unmap_addr(rxp, mapping), + TG3_RX_JMB_MAP_SZ, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(rxp->skb); + rxp->skb = NULL; } - - skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); - - txp->skb = NULL; - - i += skb_shinfo(skb)->nr_frags + 1; - - dev_kfree_skb_any(skb); } } @@ -5544,23 +5583,20 @@ static void tg3_free_rings(struct tg3 *tp) * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ -static int tg3_init_rings(struct tg3 *tp) +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - u32 i; - - /* Free up all the SKBs. */ - tg3_free_rings(tp); + u32 i, rx_pkt_dma_sz; + struct tg3_napi *tnapi = &tp->napi[0]; /* Zero out all descriptors. */ - memset(tp->rx_std, 0, TG3_RX_RING_BYTES); - memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); - memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); + memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); - tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && - (tp->dev->mtu > ETH_DATA_LEN)) - tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); /* Initialize invariants of the rings, we only set this * stuff once. This works because the card does not @@ -5569,102 +5605,237 @@ static int tg3_init_rings(struct tg3 *tp) for (i = 0; i < TG3_RX_RING_SIZE; i++) { struct tg3_rx_buffer_desc *rxd; - rxd = &tp->rx_std[i]; - rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) - << RXD_LEN_SHIFT; + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT)); } - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { - struct tg3_rx_buffer_desc *rxd; - - rxd = &tp->rx_jumbo[i]; - rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) - << RXD_LEN_SHIFT; - rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | - RXD_FLAG_JUMBO; - rxd->opaque = (RXD_OPAQUE_RING_JUMBO | - (i << RXD_OPAQUE_INDEX_SHIFT)); - } - } - /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { + if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX standard ring, " "only %d out of %d buffers were allocated " "successfully.\n", tp->dev->name, i, tp->rx_pending); if (i == 0) - return -ENOMEM; + goto initfail; tp->rx_pending = i; break; } } + if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); + if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, + if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, -1, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX jumbo ring, " "only %d out of %d buffers were " "allocated successfully.\n", tp->dev->name, i, tp->rx_jumbo_pending); - if (i == 0) { - tg3_free_rings(tp); - return -ENOMEM; - } + if (i == 0) + goto initfail; tp->rx_jumbo_pending = i; break; } } } + +done: return 0; + +initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; } -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. - */ -static void tg3_free_consistent(struct tg3 *tp) +static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - kfree(tp->rx_std_buffers); - tp->rx_std_buffers = NULL; - if (tp->rx_std) { + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, - tp->rx_std, tp->rx_std_mapping); - tp->rx_std = NULL; + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; } - if (tp->rx_jumbo) { + if (tpr->rx_jmb) { pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, - tp->rx_jumbo, tp->rx_jumbo_mapping); - tp->rx_jumbo = NULL; + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } +} + +static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * + TG3_RX_RING_SIZE, GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, + &tpr->rx_std_mapping); + if (!tpr->rx_std) + goto err_out; + + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * + TG3_RX_JUMBO_RING_SIZE, + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = pci_alloc_consistent(tp->pdev, + TG3_RX_JUMBO_RING_BYTES, + &tpr->rx_jmb_mapping); + if (!tpr->rx_jmb) + goto err_out; } - if (tp->rx_rcb) { - pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - tp->rx_rcb, tp->rx_rcb_mapping); - tp->rx_rcb = NULL; + + return 0; + +err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void tg3_free_rings(struct tg3 *tp) +{ + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + for (i = 0; i < TG3_TX_RING_SIZE; ) { + struct tx_ring_info *txp; + struct sk_buff *skb; + + txp = &tnapi->tx_buffers[i]; + skb = txp->skb; + + if (skb == NULL) { + i++; + continue; + } + + skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); + + txp->skb = NULL; + + i += skb_shinfo(skb)->nr_frags + 1; + + dev_kfree_skb_any(skb); + } } - if (tp->tx_ring) { - pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, - tp->tx_ring, tp->tx_desc_mapping); - tp->tx_ring = NULL; + + tg3_rx_prodring_free(tp, &tp->prodring[0]); +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_init_rings(struct tg3 *tp) +{ + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); } - if (tp->hw_status) { - pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, - tp->hw_status, tp->status_mapping); - tp->hw_status = NULL; + + return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void tg3_free_consistent(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + if (tnapi->hw_status) { + pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } } + if (tp->hw_stats) { pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } + + tg3_rx_prodring_fini(tp, &tp->prodring[0]); } /* @@ -5673,54 +5844,49 @@ static void tg3_free_consistent(struct tg3 *tp) */ static int tg3_alloc_consistent(struct tg3 *tp) { - tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * - (TG3_RX_RING_SIZE + - TG3_RX_JUMBO_RING_SIZE)) + - (sizeof(struct tx_ring_info) * - TG3_TX_RING_SIZE), - GFP_KERNEL); - if (!tp->rx_std_buffers) - return -ENOMEM; + int i; - tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; - tp->tx_buffers = (struct tx_ring_info *) - &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; + if (tg3_rx_prodring_init(tp, &tp->prodring[0])) + return -ENOMEM; - tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, - &tp->rx_std_mapping); - if (!tp->rx_std) + tp->hw_stats = pci_alloc_consistent(tp->pdev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping); + if (!tp->hw_stats) goto err_out; - tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, - &tp->rx_jumbo_mapping); + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - if (!tp->rx_jumbo) - goto err_out; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; - tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - &tp->rx_rcb_mapping); - if (!tp->rx_rcb) - goto err_out; + tnapi->hw_status = pci_alloc_consistent(tp->pdev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping); + if (!tnapi->hw_status) + goto err_out; - tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, - &tp->tx_desc_mapping); - if (!tp->tx_ring) - goto err_out; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - tp->hw_status = pci_alloc_consistent(tp->pdev, - TG3_HW_STATUS_SIZE, - &tp->status_mapping); - if (!tp->hw_status) - goto err_out; + tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping); + if (!tnapi->rx_rcb) + goto err_out; - tp->hw_stats = pci_alloc_consistent(tp->pdev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping); - if (!tp->hw_stats) - goto err_out; + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = pci_alloc_consistent(tp->pdev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping); + if (!tnapi->tx_ring) + goto err_out; + } return 0; @@ -5832,8 +5998,11 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); - if (tp->hw_status) - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } if (tp->hw_stats) memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); @@ -6120,7 +6289,7 @@ static int tg3_chip_reset(struct tg3 *tp) { u32 val; void (*write_op)(struct tg3 *, u32, u32); - int err; + int i, err; tg3_nvram_lock(tp); @@ -6160,14 +6329,19 @@ static int tg3_chip_reset(struct tg3 *tp) * sharing or irqpoll. */ tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; - if (tp->hw_status) { - tp->hw_status->status = 0; - tp->hw_status->status_tag = 0; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; } - tp->last_tag = 0; - tp->last_irq_tag = 0; smp_mb(); - synchronize_irq(tp->pdev->irq); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; @@ -6314,14 +6488,14 @@ static int tg3_chip_reset(struct tg3 *tp) tw32_f(MAC_MODE, 0); udelay(40); - tg3_mdio_start(tp); - tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); err = tg3_poll_fw(tp); if (err) return err; + tg3_mdio_start(tp); + if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { val = tr32(0x7c00); @@ -6694,23 +6868,134 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) } /* tp->lock is held. */ -static int tg3_reset_hw(struct tg3 *tp, int reset_phy) +static void tg3_rings_reset(struct tg3 *tp) { - u32 val, rdmac_mode; - int i, err, limit; + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; - tg3_disable_ints(tp); + /* Disable all transmit rings but the first. */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; - tg3_stop_fw(tp); + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); - tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); - if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { - tg3_abort_hw(tp, 1); - } + /* Disable all receive return rings but the first. */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; - if (reset_phy && - !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + + /* Zero mailbox registers. */ + if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { + for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + } + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (TG3_RX_RCB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (TG3_RX_RCB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + txrcb += TG3_BDINFO_SIZE; + rxrcb += TG3_BDINFO_SIZE; + } +} + +/* tp->lock is held. */ +static int tg3_reset_hw(struct tg3 *tp, int reset_phy) +{ + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { + tg3_abort_hw(tp, 1); + } + + if (reset_phy && + !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) tg3_phy_reset(tp); err = tg3_chip_reset(tp); @@ -6926,35 +7211,33 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) * configurable. */ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->rx_std_mapping >> 32)); + ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->rx_std_mapping & 0xffffffff)); + ((u64) tpr->rx_std_mapping & 0xffffffff)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC); - /* Don't even try to program the JUMBO/MINI buffer descriptor - * configs on 5705. - */ - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { - tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); - } else { - tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); - + /* Disable the mini ring */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { /* Setup replenish threshold. */ tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->rx_jumbo_mapping >> 32)); + ((u64) tpr->rx_jmb_mapping >> 32)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->rx_jumbo_mapping & 0xffffffff)); + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); + (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | + BDINFO_FLAGS_USE_EXT_RECV); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -6962,57 +7245,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) BDINFO_FLAGS_DISABLED); } - } - - /* There is only one send ring on 5705/5750, no need to explicitly - * disable the others. - */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - /* Clear out send RCB ring in SRAM. */ - for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) - tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - } - - tp->tx_prod = 0; - tp->tx_cons = 0; - tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); - tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); - - tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, - tp->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - - /* There is only one receive return ring on 5705/5750, no need - * to explicitly disable the others. - */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; - i += TG3_BDINFO_SIZE) { - tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - } - } - - tp->rx_rcb_ptr = 0; - tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); + val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; - tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, - tp->rx_rcb_mapping, - (TG3_RX_RCB_RING_SIZE(tp) << - BDINFO_FLAGS_MAXLEN_SHIFT), - 0); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); - tp->rx_std_ptr = tp->rx_pending; + tpr->rx_std_ptr = tp->rx_pending; tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, - tp->rx_std_ptr); + tpr->rx_std_ptr); - tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? - tp->rx_jumbo_pending : 0; + tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? + tp->rx_jumbo_pending : 0; tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, - tp->rx_jumbo_ptr); + tpr->rx_jmb_ptr); + + tg3_rings_reset(tp); /* Initialize MAC address and backoff seed. */ __tg3_set_mac_addr(tp, 0); @@ -7101,12 +7349,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) __tg3_set_coalesce(tp, &tp->coal); - /* set status block DMA address */ - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->status_mapping >> 32)); - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->status_mapping & 0xffffffff)); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { /* Status/statistics block address. See tg3_timer, * the tg3_periodic_fetch_stats call there, and @@ -7117,7 +7359,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tp->stats_mapping & 0xffffffff)); tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } } tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); @@ -7127,15 +7378,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); - /* Clear statistics/status block in chip, and status block in ram. */ - for (i = NIC_SRAM_STATS_BLK; - i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; - i += sizeof(u32)) { - tg3_write_mem(tp, i, 0); - udelay(40); - } - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; /* reset to prevent losing 1st rx packet intermittently */ @@ -7187,8 +7429,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); udelay(40); @@ -7342,7 +7582,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) return err; if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { + !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { u32 tmp; /* Clear CRC stats. */ @@ -7489,12 +7729,12 @@ static void tg3_timer(unsigned long __opaque) * IRQ status the mailbox/status_block protocol the chip * uses with the cpu is race prone. */ - if (tp->hw_status->status & SD_STATUS_UPDATED) { + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); } else { tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); } if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { @@ -7595,13 +7835,22 @@ restart_timer: add_timer(&tp->timer); } -static int tg3_request_irq(struct tg3 *tp) +static int tg3_request_irq(struct tg3 *tp, int irq_num) { irq_handler_t fn; unsigned long flags; - struct net_device *dev = tp->dev; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { fn = tg3_msi; if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) fn = tg3_msi_1shot; @@ -7612,11 +7861,13 @@ static int tg3_request_irq(struct tg3 *tp) fn = tg3_interrupt_tagged; flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; } - return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); } static int tg3_test_interrupt(struct tg3 *tp) { + struct tg3_napi *tnapi = &tp->napi[0]; struct net_device *dev = tp->dev; int err, i, intr_ok = 0; @@ -7625,24 +7876,23 @@ static int tg3_test_interrupt(struct tg3 *tp) tg3_disable_ints(tp); - free_irq(tp->pdev->irq, dev); + free_irq(tnapi->irq_vec, tnapi); - err = request_irq(tp->pdev->irq, tg3_test_isr, - IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); if (err) return err; - tp->hw_status->status &= ~SD_STATUS_UPDATED; + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; tg3_enable_ints(tp); tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + tnapi->coal_now); for (i = 0; i < 5; i++) { u32 int_mbox, misc_host_ctrl; - int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + - TG3_64BIT_REG_LOW); + int_mbox = tr32_mailbox(tnapi->int_mbox); misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); if ((int_mbox != 0) || @@ -7656,9 +7906,9 @@ static int tg3_test_interrupt(struct tg3 *tp) tg3_disable_ints(tp); - free_irq(tp->pdev->irq, dev); + free_irq(tnapi->irq_vec, tnapi); - err = tg3_request_irq(tp); + err = tg3_request_irq(tp, 0); if (err) return err; @@ -7674,7 +7924,6 @@ static int tg3_test_interrupt(struct tg3 *tp) */ static int tg3_test_msi(struct tg3 *tp) { - struct net_device *dev = tp->dev; int err; u16 pci_cmd; @@ -7705,12 +7954,13 @@ static int tg3_test_msi(struct tg3 *tp) "the PCI maintainer and include system chipset information.\n", tp->dev->name); - free_irq(tp->pdev->irq, dev); + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + pci_disable_msi(tp->pdev); tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - err = tg3_request_irq(tp); + err = tg3_request_irq(tp, 0); if (err) return err; @@ -7725,7 +7975,7 @@ static int tg3_test_msi(struct tg3 *tp) tg3_full_unlock(tp); if (err) - free_irq(tp->pdev->irq, dev); + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); return err; } @@ -7761,10 +8011,87 @@ static int tg3_request_firmware(struct tg3 *tp) return 0; } +static bool tg3_enable_msix(struct tg3 *tp) +{ + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc != 0) { + if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) + return false; + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + printk(KERN_NOTICE + "%s: Requested %d MSI-X vectors, received %d\n", + tp->dev->name, tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + return true; +} + +static void tg3_ints_init(struct tg3 *tp) +{ + if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && + !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + printk(KERN_WARNING PFX "%s: MSI without TAGGED? " + "Not using MSI.\n", tp->dev->name); + goto defcfg; + } + + if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) + tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; + else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && + pci_enable_msi(tp->pdev) == 0) + tp->tg3_flags2 |= TG3_FLG2_USING_MSI; + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + u32 msi_mode = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } +defcfg: + if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + } +} + +static void tg3_ints_fini(struct tg3 *tp) +{ + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + pci_disable_msix(tp->pdev); + else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) + pci_disable_msi(tp->pdev); + tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; +} + static int tg3_open(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - int err; + int i, err; if (tp->fw_needed) { err = tg3_request_firmware(tp); @@ -7795,40 +8122,33 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ err = tg3_alloc_consistent(tp); if (err) - return err; + goto err_out1; - if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) { - /* All MSI supporting chips should support tagged - * status. Assert that this is the case. - */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { - printk(KERN_WARNING PFX "%s: MSI without TAGGED? " - "Not using MSI.\n", tp->dev->name); - } else if (pci_enable_msi(tp->pdev) == 0) { - u32 msi_mode; - - msi_mode = tr32(MSGINT_MODE); - tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); - tp->tg3_flags2 |= TG3_FLG2_USING_MSI; - } - } - err = tg3_request_irq(tp); + napi_enable(&tp->napi[0].napi); - if (err) { - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; } - tg3_free_consistent(tp); - return err; } - napi_enable(&tp->napi); + if (err) + goto err_out2; tg3_full_lock(tp, 0); @@ -7856,36 +8176,19 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); - if (err) { - napi_disable(&tp->napi); - free_irq(tp->pdev->irq, dev); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - } - tg3_free_consistent(tp); - return err; - } + if (err) + goto err_out3; if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { err = tg3_test_msi(tp); if (err) { tg3_full_lock(tp, 0); - - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - } tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); - tg3_free_consistent(tp); - tg3_full_unlock(tp); - napi_disable(&tp->napi); - - return err; + goto err_out2; } if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { @@ -7911,6 +8214,20 @@ static int tg3_open(struct net_device *dev) netif_start_queue(dev); return 0; + +err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +err_out2: + napi_disable(&tp->napi[0].napi); + tg3_free_consistent(tp); + +err_out1: + tg3_ints_fini(tp); + return err; } #if 0 @@ -7919,6 +8236,7 @@ static int tg3_open(struct net_device *dev) u32 val32, val32_2, val32_3, val32_4, val32_5; u16 val16; int i; + struct tg3_hw_status *sblk = tp->napi[0]->hw_status; pci_read_config_word(tp->pdev, PCI_STATUS, &val16); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); @@ -8071,14 +8389,15 @@ static int tg3_open(struct net_device *dev) val32, val32_2, val32_3, val32_4, val32_5); /* SW status block */ - printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", - tp->hw_status->status, - tp->hw_status->status_tag, - tp->hw_status->rx_jumbo_consumer, - tp->hw_status->rx_consumer, - tp->hw_status->rx_mini_consumer, - tp->hw_status->idx[0].rx_producer, - tp->hw_status->idx[0].tx_consumer); + printk(KERN_DEBUG + "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + sblk->status, + sblk->status_tag, + sblk->rx_jumbo_consumer, + sblk->rx_consumer, + sblk->rx_mini_consumer, + sblk->idx[0].rx_producer, + sblk->idx[0].tx_consumer); /* SW statistics block */ printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", @@ -8146,9 +8465,10 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); static int tg3_close(struct net_device *dev) { + int i; struct tg3 *tp = netdev_priv(dev); - napi_disable(&tp->napi); + napi_disable(&tp->napi[0].napi); cancel_work_sync(&tp->reset_task); netif_stop_queue(dev); @@ -8168,12 +8488,13 @@ static int tg3_close(struct net_device *dev) tg3_full_unlock(tp); - free_irq(tp->pdev->irq, dev); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } + tg3_ints_fini(tp); + memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), sizeof(tp->net_stats_prev)); memcpy(&tp->estats_prev, tg3_get_estats(tp), @@ -8974,13 +9295,13 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * else ering->rx_jumbo_pending = 0; - ering->tx_pending = tp->tx_pending; + ering->tx_pending = tp->napi[0].tx_pending; } static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0, err = 0; + int i, irq_sync = 0, err = 0; if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || @@ -9004,7 +9325,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tp->rx_pending > 63) tp->rx_pending = 63; tp->rx_jumbo_pending = ering->rx_jumbo_pending; - tp->tx_pending = ering->tx_pending; + + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) + tp->napi[i].tx_pending = ering->tx_pending; if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -9712,12 +10035,18 @@ static int tg3_test_memory(struct tg3 *tp) static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) { u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; - u32 desc_idx; + u32 desc_idx, coal_now; struct sk_buff *skb, *rx_skb; u8 *tx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + coal_now = tnapi->coal_now | rnapi->coal_now; if (loopback_mode == TG3_MAC_LOOPBACK) { /* HW errata - mac loopback fails in some cases on 5780. @@ -9739,18 +10068,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) } else if (loopback_mode == TG3_PHY_LOOPBACK) { u32 val; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 phytest; - - if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) { - u32 phy; - - tg3_writephy(tp, MII_TG3_EPHY_TEST, - phytest | MII_TG3_EPHY_SHADOW_EN); - if (!tg3_readphy(tp, 0x1b, &phy)) - tg3_writephy(tp, 0x1b, phy & ~0x20); - tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); - } + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + tg3_phy_fet_toggle_apd(tp, false); val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; } else val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; @@ -9761,8 +10080,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(40); mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800); mac_mode |= MAC_MODE_PORT_MODE_MII; } else mac_mode |= MAC_MODE_PORT_MODE_GMII; @@ -9805,35 +10125,34 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + rnapi->coal_now); udelay(10); - rx_start_idx = tp->hw_status->idx[0].rx_producer; + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; num_pkts = 0; - tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); + tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); - tp->tx_prod++; + tnapi->tx_prod++; num_pkts++; - tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, - tp->tx_prod); - tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); udelay(10); /* 250 usec to allow enough time on some 10/100 Mbps devices. */ for (i = 0; i < 25; i++) { tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + coal_now); udelay(10); - tx_idx = tp->hw_status->idx[0].tx_consumer; - rx_idx = tp->hw_status->idx[0].rx_producer; - if ((tx_idx == tp->tx_prod) && + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && (rx_idx == (rx_start_idx + num_pkts))) break; } @@ -9841,13 +10160,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); - if (tx_idx != tp->tx_prod) + if (tx_idx != tnapi->tx_prod) goto out; if (rx_idx != rx_start_idx + num_pkts) goto out; - desc = &tp->rx_rcb[rx_start_idx]; + desc = &rnapi->rx_rcb[rx_start_idx]; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key != RXD_OPAQUE_RING_STD) @@ -9861,9 +10180,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) if (rx_len != tx_len) goto out; - rx_skb = tp->rx_std_buffers[desc_idx].skb; + rx_skb = tpr->rx_std_buffers[desc_idx].skb; - map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); + map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); for (i = 14; i < tx_len; i++) { @@ -10276,8 +10595,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { tp->tg3_flags2 |= TG3_FLG2_FLASH; - } - else { + } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } @@ -10285,37 +10603,36 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { - case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; - break; - case FLASH_VENDOR_ATMEL_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_ST: - tp->nvram_jedecnum = JEDEC_ST; - tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_SAIFUN: - tp->nvram_jedecnum = JEDEC_SAIFUN; - tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; - break; - case FLASH_VENDOR_SST_SMALL: - case FLASH_VENDOR_SST_LARGE: - tp->nvram_jedecnum = JEDEC_SST; - tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; - break; + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; } - } - else { + } else { tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; @@ -10333,48 +10650,47 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - break; + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + break; } if (tp->tg3_flags2 & TG3_FLG2_FLASH) { switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { - case FLASH_5752PAGE_SIZE_256: - tp->nvram_pagesize = 256; - break; - case FLASH_5752PAGE_SIZE_512: - tp->nvram_pagesize = 512; - break; - case FLASH_5752PAGE_SIZE_1K: - tp->nvram_pagesize = 1024; - break; - case FLASH_5752PAGE_SIZE_2K: - tp->nvram_pagesize = 2048; - break; - case FLASH_5752PAGE_SIZE_4K: - tp->nvram_pagesize = 4096; - break; - case FLASH_5752PAGE_SIZE_264: - tp->nvram_pagesize = 264; - break; + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; } - } - else { + } else { /* For eeprom, set pagesize to maximum eeprom size */ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; @@ -10397,45 +10713,45 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; switch (nvcfg1) { - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - case FLASH_5755VENDOR_ATMEL_FLASH_5: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 264; - if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || - nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) - tp->nvram_size = (protect ? 0x3e200 : - TG3_NVRAM_SIZE_512KB); - else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_128KB); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; - if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_128KB); - else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_128KB : - TG3_NVRAM_SIZE_512KB); - break; + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; } } @@ -10446,34 +10762,34 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 264; - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; - break; + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; } } @@ -10491,63 +10807,63 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; - tp->nvram_pagesize = 256; - break; - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; - break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; } if (protect) { tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); } else { switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_size = TG3_NVRAM_SIZE_2MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; } } } @@ -12008,9 +12324,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } } + tp->irq_max = 1; + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; + tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); @@ -12259,12 +12577,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + /* A few boards don't want Ethernet@WireSpeed phy feature */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || + (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; @@ -12275,7 +12596,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 && + !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || @@ -12400,7 +12721,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) tp->tg3_flags |= TG3_FLAG_10_100_ONLY; err = tg3_phy_probe(tp); @@ -13206,7 +13527,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, static int tg3_version_printed = 0; struct net_device *dev; struct tg3 *tp; - int err, pm_cap; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@ -13300,9 +13622,52 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; - tp->tx_pending = TG3_DEF_TX_RING_PENDING; - netif_napi_add(dev, &tp->napi, tg3_poll, 64); + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i < 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->irq = pdev->irq;